filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
database.py
|
import os
import pymongo
from pymongo import MongoClient
from dotenv import load_dotenv
from pprint import pprint
load_dotenv()
CONNECT_URL = os.getenv("CONNECT_URL")
cluster = MongoClient(CONNECT_URL)
'''
db = cluster.ctox
serverStatusResult = db.command("serverStatus")
pprint(serverStatusResult)
'''
db = cluster["valdb"]
collection = db["userbytag"]
def set_record(id,name,tag,details):
if collection.count_documents({"_id": id}, limit=1) == 0:
collection.insert_one({"_id": id, "display_name": name,"tag": tag,"details": details})
else:
collection.update_one({"_id": id}, {"$set": {"display_name": name,"tag": tag,"details": details}})
def get_record(id):
query={"_id": id}
try:
return collection.find_one(query)
except:
return None
|
[] |
[] |
[
"CONNECT_URL"
] |
[]
|
["CONNECT_URL"]
|
python
| 1 | 0 | |
var/spack/repos/builtin/packages/c/package.py
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
class C(Package):
"""Virtual package for C compilers."""
homepage = 'http://open-std.org/JTC1/SC22/WG14/www/standards'
virtual = True
def test(self):
test_source = self.test_suite.current_test_data_dir
for test in os.listdir(test_source):
filepath = test_source.join(test)
exe_name = '%s.exe' % test
cc_exe = os.environ['CC']
cc_opts = ['-o', exe_name, filepath]
compiled = self.run_test(cc_exe, options=cc_opts, installed=True)
if compiled:
expected = ['Hello world', 'YES!']
self.run_test(exe_name, expected=expected)
|
[] |
[] |
[
"CC"
] |
[]
|
["CC"]
|
python
| 1 | 0 | |
kubernetes/client.go
|
package kubernetes
import (
"errors"
"fmt"
"net"
"os"
"time"
"k8s.io/api/apps/v1beta1"
"k8s.io/api/apps/v1beta2"
auth_v1 "k8s.io/api/authorization/v1"
batch_v1 "k8s.io/api/batch/v1"
batch_v1beta1 "k8s.io/api/batch/v1beta1"
v1 "k8s.io/api/core/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
kube "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
kialiConfig "github.com/kiali/kiali/config"
"github.com/kiali/kiali/log"
osappsv1 "github.com/openshift/api/apps/v1"
osv1 "github.com/openshift/api/project/v1"
)
var (
emptyListOptions = meta_v1.ListOptions{}
emptyGetOptions = meta_v1.GetOptions{}
)
// IstioClientInterface for mocks (only mocked function are necessary here)
type IstioClientInterface interface {
CreateIstioObject(api, namespace, resourceType, json string) (IstioObject, error)
DeleteIstioObject(api, namespace, resourceType, name string) error
GetAdapter(namespace, adapterType, adapterName string) (IstioObject, error)
GetAdapters(namespace string) ([]IstioObject, error)
GetAuthorizationDetails(namespace string) (*RBACDetails, error)
GetCronJobs(namespace string) ([]batch_v1beta1.CronJob, error)
GetDeployment(namespace string, deploymentName string) (*v1beta1.Deployment, error)
GetDeployments(namespace string) ([]v1beta1.Deployment, error)
GetDeploymentConfig(namespace string, deploymentconfigName string) (*osappsv1.DeploymentConfig, error)
GetDeploymentConfigs(namespace string) ([]osappsv1.DeploymentConfig, error)
GetDestinationRule(namespace string, destinationrule string) (IstioObject, error)
GetDestinationRules(namespace string, serviceName string) ([]IstioObject, error)
GetEndpoints(namespace string, serviceName string) (*v1.Endpoints, error)
GetGateway(namespace string, gateway string) (IstioObject, error)
GetGateways(namespace string) ([]IstioObject, error)
GetIstioDetails(namespace string, serviceName string) (*IstioDetails, error)
GetIstioRule(namespace string, istiorule string) (IstioObject, error)
GetIstioRules(namespace string) ([]IstioObject, error)
GetJobs(namespace string) ([]batch_v1.Job, error)
GetNamespace(namespace string) (*v1.Namespace, error)
GetNamespaces() ([]v1.Namespace, error)
GetPods(namespace, labelSelector string) ([]v1.Pod, error)
GetProject(project string) (*osv1.Project, error)
GetProjects() ([]osv1.Project, error)
GetQuotaSpec(namespace string, quotaSpecName string) (IstioObject, error)
GetQuotaSpecs(namespace string) ([]IstioObject, error)
GetQuotaSpecBinding(namespace string, quotaSpecBindingName string) (IstioObject, error)
GetQuotaSpecBindings(namespace string) ([]IstioObject, error)
GetReplicationControllers(namespace string) ([]v1.ReplicationController, error)
GetReplicaSets(namespace string) ([]v1beta2.ReplicaSet, error)
GetSelfSubjectAccessReview(namespace, api, resourceType string, verbs []string) ([]*auth_v1.SelfSubjectAccessReview, error)
GetService(namespace string, serviceName string) (*v1.Service, error)
GetServices(namespace string, selectorLabels map[string]string) ([]v1.Service, error)
GetServiceEntries(namespace string) ([]IstioObject, error)
GetServiceEntry(namespace string, serviceEntryName string) (IstioObject, error)
GetStatefulSet(namespace string, statefulsetName string) (*v1beta2.StatefulSet, error)
GetStatefulSets(namespace string) ([]v1beta2.StatefulSet, error)
GetTemplate(namespace, templateType, templateName string) (IstioObject, error)
GetTemplates(namespace string) ([]IstioObject, error)
GetPolicy(namespace string, policyName string) (IstioObject, error)
GetPolicies(namespace string) ([]IstioObject, error)
GetMeshPolicy(namespace string, policyName string) (IstioObject, error)
GetMeshPolicies(namespace string) ([]IstioObject, error)
GetClusterRbacConfig(namespace string, name string) (IstioObject, error)
GetClusterRbacConfigs(namespace string) ([]IstioObject, error)
GetRbacConfig(namespace string, name string) (IstioObject, error)
GetRbacConfigs(namespace string) ([]IstioObject, error)
GetServiceRole(namespace string, name string) (IstioObject, error)
GetServiceRoles(namespace string) ([]IstioObject, error)
GetServiceRoleBinding(namespace string, name string) (IstioObject, error)
GetServiceRoleBindings(namespace string) ([]IstioObject, error)
GetVirtualService(namespace string, virtualservice string) (IstioObject, error)
GetVirtualServices(namespace string, serviceName string) ([]IstioObject, error)
IsOpenShift() bool
Stop()
UpdateIstioObject(api, namespace, resourceType, name, jsonPatch string) (IstioObject, error)
}
// IstioClient is the client struct for Kubernetes and Istio APIs
// It hides the way it queries each API
type IstioClient struct {
IstioClientInterface
k8s *kube.Clientset
istioConfigApi *rest.RESTClient
istioNetworkingApi *rest.RESTClient
istioAuthenticationApi *rest.RESTClient
istioRbacApi *rest.RESTClient
// isOpenShift private variable will check if kiali is deployed under an OpenShift cluster or not
// It is represented as a pointer to include the initialization phase.
// See kubernetes_service.go#IsOpenShift() for more details.
isOpenShift *bool
// rbacResources private variable will check which resources kiali has access to from rbac.istio.io group
// It is represented as a pointer to include the initialization phase.
// See istio_details_service.go#HasRbacResource() for more details.
rbacResources *map[string]bool
// Cache controller is a global cache for all k8s objects fetched by kiali in multiple namespaces.
// It doesn't support reduced permissions scenarios yet, don't forget to disabled on those use cases.
k8sCache cacheController
stopCache chan struct{}
}
// ClientFactory interface for the clientFactory object
type ClientFactory interface {
NewClient(token string) (IstioClientInterface, error)
}
// clientFactory used to generate per users clients
type clientFactory struct {
ClientFactory
baseIstioConfig *rest.Config
}
// GetK8sApi returns the clientset referencing all K8s rest clients
func (client *IstioClient) GetK8sApi() *kube.Clientset {
return client.k8s
}
// GetIstioConfigApi returns the istio config rest client
func (client *IstioClient) GetIstioConfigApi() *rest.RESTClient {
return client.istioConfigApi
}
// GetIstioNetworkingApi returns the istio config rest client
func (client *IstioClient) GetIstioNetworkingApi() *rest.RESTClient {
return client.istioNetworkingApi
}
// GetIstioRbacApi returns the istio rbac rest client
func (client *IstioClient) GetIstioRbacApi() *rest.RESTClient {
return client.istioRbacApi
}
// ConfigClient return a client with the correct configuration
// Returns configuration if Kiali is in Cluster when InCluster is true
// Returns configuration if Kiali is not int Cluster when InCluster is false
// It returns an error on any problem
func ConfigClient() (*rest.Config, error) {
if kialiConfig.Get().InCluster {
incluster, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
incluster.QPS = kialiConfig.Get().KubernetesConfig.QPS
incluster.Burst = kialiConfig.Get().KubernetesConfig.Burst
return incluster, nil
}
host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT")
if len(host) == 0 || len(port) == 0 {
return nil, fmt.Errorf("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined")
}
return &rest.Config{
// TODO: switch to using cluster DNS.
Host: "http://" + net.JoinHostPort(host, port),
QPS: kialiConfig.Get().KubernetesConfig.QPS,
Burst: kialiConfig.Get().KubernetesConfig.Burst,
}, nil
}
// NewClientFactory create a new ClientFactory that can be used to generate per user clients
func NewClientFactory() (ClientFactory, error) {
// Get the normal configuration
config, err := ConfigClient()
if err != nil {
return nil, err
}
// Create a new config based on what was gathered above but don't specify the bearer token to use
istioConfig := rest.Config{
Host: config.Host,
TLSClientConfig: config.TLSClientConfig,
QPS: config.QPS,
Burst: config.Burst,
}
return &clientFactory{
baseIstioConfig: &istioConfig,
}, nil
}
// NewClient creates a new IstioClientInterface based on a users k8s token
func (uc *clientFactory) NewClient(token string) (IstioClientInterface, error) {
config := uc.baseIstioConfig
config.BearerToken = token
return NewClientFromConfig(config)
}
// NewClientFromConfig creates a new client to the Kubernetes and Istio APIs.
// It takes the assumption that Istio is deployed into the cluster.
// It hides the access to Kubernetes/Openshift credentials.
// It hides the low level use of the API of Kubernetes and Istio, it should be considered as an implementation detail.
// It returns an error on any problem.
func NewClientFromConfig(config *rest.Config) (*IstioClient, error) {
client := IstioClient{}
log.Debugf("Rest perf config QPS: %f Burst: %d", config.QPS, config.Burst)
k8s, err := kube.NewForConfig(config)
if err != nil {
return nil, err
}
client.k8s = k8s
// Init client cache
// Note that cache will work only in full permissions scenarios (similar permissions as mixer/istio-telemetry component)
kialiK8sCfg := kialiConfig.Get().KubernetesConfig
if client.k8sCache == nil && kialiK8sCfg.CacheEnabled {
log.Infof("Kiali K8S Cache enabled")
client.stopCache = make(chan struct{})
client.k8sCache = newCacheController(client.k8s, time.Duration(kialiConfig.Get().KubernetesConfig.CacheDuration))
client.k8sCache.Start()
if !client.k8sCache.WaitForSync() {
return nil, errors.New("Cache cannot connect with the k8s API on host: " + config.Host)
}
}
// Istio is a CRD extension of Kubernetes API, so any custom type should be registered here.
// KnownTypes registers the Istio objects we use, as soon as we get more info we will increase the number of types.
types := runtime.NewScheme()
schemeBuilder := runtime.NewSchemeBuilder(
func(scheme *runtime.Scheme) error {
// Register networking types
for _, nt := range networkingTypes {
scheme.AddKnownTypeWithName(NetworkingGroupVersion.WithKind(nt.objectKind), &GenericIstioObject{})
scheme.AddKnownTypeWithName(NetworkingGroupVersion.WithKind(nt.collectionKind), &GenericIstioObjectList{})
}
// Register config types
for _, cf := range configTypes {
scheme.AddKnownTypeWithName(ConfigGroupVersion.WithKind(cf.objectKind), &GenericIstioObject{})
scheme.AddKnownTypeWithName(ConfigGroupVersion.WithKind(cf.collectionKind), &GenericIstioObjectList{})
}
// Register adapter types
for _, ad := range adapterTypes {
scheme.AddKnownTypeWithName(ConfigGroupVersion.WithKind(ad.objectKind), &GenericIstioObject{})
scheme.AddKnownTypeWithName(ConfigGroupVersion.WithKind(ad.collectionKind), &GenericIstioObjectList{})
}
// Register template types
for _, tp := range templateTypes {
scheme.AddKnownTypeWithName(ConfigGroupVersion.WithKind(tp.objectKind), &GenericIstioObject{})
scheme.AddKnownTypeWithName(ConfigGroupVersion.WithKind(tp.collectionKind), &GenericIstioObjectList{})
}
// Register authentication types
for _, at := range authenticationTypes {
scheme.AddKnownTypeWithName(AuthenticationGroupVersion.WithKind(at.objectKind), &GenericIstioObject{})
scheme.AddKnownTypeWithName(AuthenticationGroupVersion.WithKind(at.collectionKind), &GenericIstioObjectList{})
}
// Register rbac types
for _, rt := range rbacTypes {
scheme.AddKnownTypeWithName(RbacGroupVersion.WithKind(rt.objectKind), &GenericIstioObject{})
scheme.AddKnownTypeWithName(RbacGroupVersion.WithKind(rt.collectionKind), &GenericIstioObjectList{})
}
meta_v1.AddToGroupVersion(scheme, ConfigGroupVersion)
meta_v1.AddToGroupVersion(scheme, NetworkingGroupVersion)
meta_v1.AddToGroupVersion(scheme, AuthenticationGroupVersion)
meta_v1.AddToGroupVersion(scheme, RbacGroupVersion)
return nil
})
err = schemeBuilder.AddToScheme(types)
if err != nil {
return nil, err
}
// Istio needs another type as it queries a different K8S API.
istioConfigAPI, err := newClientForAPI(config, ConfigGroupVersion, types)
if err != nil {
return nil, err
}
istioNetworkingAPI, err := newClientForAPI(config, NetworkingGroupVersion, types)
if err != nil {
return nil, err
}
istioAuthenticationAPI, err := newClientForAPI(config, AuthenticationGroupVersion, types)
if err != nil {
return nil, err
}
istioRbacApi, err := newClientForAPI(config, RbacGroupVersion, types)
if err != nil {
return nil, err
}
client.istioConfigApi = istioConfigAPI
client.istioNetworkingApi = istioNetworkingAPI
client.istioAuthenticationApi = istioAuthenticationAPI
client.istioRbacApi = istioRbacApi
return &client, nil
}
func newClientForAPI(fromCfg *rest.Config, groupVersion schema.GroupVersion, scheme *runtime.Scheme) (*rest.RESTClient, error) {
cfg := rest.Config{
Host: fromCfg.Host,
APIPath: "/apis",
ContentConfig: rest.ContentConfig{
GroupVersion: &groupVersion,
NegotiatedSerializer: serializer.DirectCodecFactory{CodecFactory: serializer.NewCodecFactory(scheme)},
ContentType: runtime.ContentTypeJSON,
},
BearerToken: fromCfg.BearerToken,
TLSClientConfig: fromCfg.TLSClientConfig,
QPS: fromCfg.QPS,
Burst: fromCfg.Burst,
}
return rest.RESTClientFor(&cfg)
}
func (in *IstioClient) Stop() {
if in.k8sCache != nil {
in.k8sCache.Stop()
}
}
|
[
"\"KUBERNETES_SERVICE_HOST\"",
"\"KUBERNETES_SERVICE_PORT\""
] |
[] |
[
"KUBERNETES_SERVICE_HOST",
"KUBERNETES_SERVICE_PORT"
] |
[]
|
["KUBERNETES_SERVICE_HOST", "KUBERNETES_SERVICE_PORT"]
|
go
| 2 | 0 | |
internal/app/status/status.go
|
package status
import (
"os"
"sync"
"github.com/gotasma/internal/pkg/status"
"github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
)
type (
//Status format from status pkg
Status = status.Status
GenStatus struct {
Success Status
NotFound Status `yaml:"not_found"`
BadRequest Status `yaml:"bad_request"`
Internal Status
}
ProjectStatus struct {
NotFoundProject Status `yaml:"not_found_project"`
DuplicateProject Status `yaml:"duplicated_project"`
AlreadyInProject Status `yaml:"already_in_project"`
NotInProject Status `yaml:"not_in_project"`
NotFoundDev Status `yaml:"not_found_dev"`
ProjectCreater Status `yaml:"creater_project"`
}
TaskStatus struct {
InvalidTask Status `yaml:"invalid_task"`
DuplicatedTask Status `yaml:"duplicated_task"`
NotFoundTask Status `yaml:"not_found_task"`
NotInProject Status `yaml:"not_in_project"`
}
HolidayStatus struct {
InvalidHoliday Status `yaml:"invalid_holiday"`
DuplicatedHoliday Status `yaml:"duplicated_holiday"`
NotFoundHoliday Status `yaml:"not_found_holiday"`
NotFoundProject Status `yaml:"not_found_project"`
AlreadyInProject Status `yaml:"already_in_project"`
}
PolicyStatus struct {
Unauthorized Status
}
UserStatus struct {
DuplicatedEmail Status `yaml:"duplicated_email"`
NotFoundUser Status `yaml:"not_found_user"`
NotFoundProject Status `yaml:"not_found_project"`
NotFoundTask Status `yaml:"not_found_task"`
AlreadyInProject Status `yaml:"already_in_project"`
AlreadyInTask Status `yaml:"already_in_task"`
}
AuthStatus struct {
InvalidUserPassword Status `yaml:"invalid_user_password"`
}
SercurityStatus struct {
InvalidAction Status `yaml:"invalid_action"`
}
statuses struct {
Gen GenStatus
User UserStatus
Auth AuthStatus
Policy PolicyStatus
Sercurity SercurityStatus
Holiday HolidayStatus
Project ProjectStatus
Task TaskStatus
}
)
var (
all *statuses
once sync.Once
)
// Init load statuses from the given config file.
// Init panics if cannot access or error while parsing the config file.
func Init(conf string) {
once.Do(func() {
f, err := os.Open(conf)
if err != nil {
logrus.Errorf("Fail to open status file, %v", err)
panic(err)
}
all = &statuses{}
if err := yaml.NewDecoder(f).Decode(all); err != nil {
logrus.Errorf("Fail to parse status file data to statuses struct, %v", err)
panic(err)
}
})
}
// all return all registered statuses.
// all will load statuses from configs/Status.yml if the statuses has not initalized yet.
func load() *statuses {
conf := os.Getenv("STATUS_PATH")
if conf == "" {
conf = "configs/status.yml"
}
Init(conf)
return all
}
func Gen() GenStatus {
return load().Gen
}
func User() UserStatus {
return load().User
}
func Success() Status {
return Gen().Success
}
func Auth() AuthStatus {
return load().Auth
}
func Policy() PolicyStatus {
return load().Policy
}
func Sercurity() SercurityStatus {
return load().Sercurity
}
func Holiday() HolidayStatus {
return load().Holiday
}
func Project() ProjectStatus {
return load().Project
}
func Task() TaskStatus {
return load().Task
}
|
[
"\"STATUS_PATH\""
] |
[] |
[
"STATUS_PATH"
] |
[]
|
["STATUS_PATH"]
|
go
| 1 | 0 | |
scripts/deploy_docs.py
|
#!/usr/bin/env python3
"""Deploys documentation to GitHub pages.
If the environment variable :attr:`TRAVIS_BRANCH` is set, it overrides
the current git branch.
If the environment variable :attr:`GH_TOKEN` is set, it is used as the API
token.
"""
import os
import shutil
import subprocess as sub
import sys
def get_current_git_branch():
"""Returns the current git branch."""
return str(sub.check_output("git rev-parse --abbrev-ref HEAD",
shell=True).splitlines()[0], 'utf-8')
GIT_CONFIG = ['user.email [email protected]',
'user.name "Travis CI"']
GIT_BRANCH = os.environ.get('TRAVIS_BRANCH', get_current_git_branch())
WORKING_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__),
'../docs'))
DEPLOY_REPO_DIR = os.path.join(WORKING_DIR, 'deploy-repo')
DEPLOY_REPO_REMOTE = "https://{token}github.com/garstka/garstka.github.io.git"
DEPLOY_DOCS_PARENT_DIR = os.path.join(DEPLOY_REPO_DIR, 'idact')
DEPLOY_DOCS_DIR = os.path.join(DEPLOY_DOCS_PARENT_DIR,
'{git_branch}/html'.format(
git_branch=GIT_BRANCH))
SOURCE_DOCS_DIR = os.path.join(WORKING_DIR, '_build/html')
BUILD_NUMBER = os.environ.get('TRAVIS_BUILD_NUMBER', 'manual')
COMMIT_MESSAGE = ("Deploy docs for branch {git_branch},"
" build: {build_number}").format(git_branch=GIT_BRANCH,
build_number=BUILD_NUMBER)
def main():
"""Main script function."""
def call(command):
"""Alias for shell check_call."""
sub.check_call(command, shell=True)
try:
os.chdir(WORKING_DIR)
print("Deploying docs...")
if os.path.isdir(DEPLOY_REPO_DIR):
shutil.rmtree(DEPLOY_REPO_DIR)
os.mkdir(DEPLOY_REPO_DIR)
os.chdir(DEPLOY_REPO_DIR)
call("git init")
for config in GIT_CONFIG:
call("git config {}".format(config))
token = os.environ.get('GH_TOKEN', '')
if token:
token += '@'
remote = DEPLOY_REPO_REMOTE.format(token=token)
call("git remote add origin {}".format(remote))
call("git fetch origin")
call("git checkout master")
if os.path.isdir(DEPLOY_DOCS_DIR):
shutil.rmtree(DEPLOY_DOCS_DIR)
shutil.copytree(SOURCE_DOCS_DIR, DEPLOY_DOCS_DIR)
call("git add {}".format(DEPLOY_DOCS_DIR))
call('git commit -m "{}"'.format(COMMIT_MESSAGE))
call("git push")
return 0
except Exception as e: # pylint: disable=broad-except
print(e)
return 1
if __name__ == '__main__':
sys.exit(main())
|
[] |
[] |
[
"TRAVIS_BUILD_NUMBER",
"TRAVIS_BRANCH",
"GH_TOKEN"
] |
[]
|
["TRAVIS_BUILD_NUMBER", "TRAVIS_BRANCH", "GH_TOKEN"]
|
python
| 3 | 0 | |
pkg/multus/multus.go
|
// Copyright (c) 2017 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package multus
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"strings"
"time"
"github.com/containernetworking/cni/libcni"
"github.com/containernetworking/cni/pkg/invoke"
"github.com/containernetworking/cni/pkg/skel"
cnitypes "github.com/containernetworking/cni/pkg/types"
cnicurrent "github.com/containernetworking/cni/pkg/types/current"
"github.com/containernetworking/plugins/pkg/ns"
nettypes "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
nadutils "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils"
"github.com/vishvananda/netlink"
k8s "gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/k8sclient"
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/logging"
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/netutils"
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/types"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
k8snet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/wait"
)
const (
shortPollDuration = 250 * time.Millisecond
shortPollTimeout = 2500 * time.Millisecond
)
var (
version = "master@git"
commit = "unknown commit"
date = "unknown date"
)
var (
pollDuration = 1000 * time.Millisecond
pollTimeout = 45 * time.Second
)
//PrintVersionString ...
func PrintVersionString() string {
return fmt.Sprintf("multus-cni version:%s, commit:%s, date:%s",
version, commit, date)
}
func saveScratchNetConf(containerID, dataDir string, netconf []byte) error {
logging.Debugf("saveScratchNetConf: %s, %s, %s", containerID, dataDir, string(netconf))
if err := os.MkdirAll(dataDir, 0700); err != nil {
return logging.Errorf("saveScratchNetConf: failed to create the multus data directory(%q): %v", dataDir, err)
}
path := filepath.Join(dataDir, containerID)
err := ioutil.WriteFile(path, netconf, 0600)
if err != nil {
return logging.Errorf("saveScratchNetConf: failed to write container data in the path(%q): %v", path, err)
}
return err
}
func consumeScratchNetConf(containerID, dataDir string) ([]byte, string, error) {
logging.Debugf("consumeScratchNetConf: %s, %s", containerID, dataDir)
path := filepath.Join(dataDir, containerID)
b, err := ioutil.ReadFile(path)
return b, path, err
}
func getIfname(delegate *types.DelegateNetConf, argif string, idx int) string {
logging.Debugf("getIfname: %v, %s, %d", delegate, argif, idx)
if delegate.IfnameRequest != "" {
return delegate.IfnameRequest
}
if delegate.MasterPlugin {
// master plugin always uses the CNI-provided interface name
return argif
}
// Otherwise construct a unique interface name from the delegate's
// position in the delegate list
return fmt.Sprintf("net%d", idx)
}
func getDelegateDeviceInfo(delegate *types.DelegateNetConf, runtimeConf *libcni.RuntimeConf) (*nettypes.DeviceInfo, error) {
// If the DPDeviceInfoFile was created, it was copied to the CNIDeviceInfoFile.
// If the DPDeviceInfoFile was not created, CNI might have created it. So
// either way, load CNIDeviceInfoFile.
if info, ok := runtimeConf.CapabilityArgs["CNIDeviceInfoFile"]; ok {
if infostr, ok := info.(string); ok {
return nadutils.LoadDeviceInfoFromCNI(infostr)
}
} else {
logging.Debugf("getDelegateDeviceInfo(): No CapArgs - info=%v ok=%v", info, ok)
}
return nil, nil
}
func saveDelegates(containerID, dataDir string, delegates []*types.DelegateNetConf) error {
logging.Debugf("saveDelegates: %s, %s, %v", containerID, dataDir, delegates)
delegatesBytes, err := json.Marshal(delegates)
if err != nil {
return logging.Errorf("saveDelegates: error serializing delegate netconf: %v", err)
}
if err = saveScratchNetConf(containerID, dataDir, delegatesBytes); err != nil {
return logging.Errorf("saveDelegates: error in saving the delegates : %v", err)
}
return err
}
func deleteDelegates(containerID, dataDir string) error {
logging.Debugf("deleteDelegates: %s, %s", containerID, dataDir)
path := filepath.Join(dataDir, containerID)
if err := os.Remove(path); err != nil {
return logging.Errorf("deleteDelegates: error in deleting the delegates : %v", err)
}
return nil
}
func validateIfName(nsname string, ifname string) error {
logging.Debugf("validateIfName: %s, %s", nsname, ifname)
podNs, err := ns.GetNS(nsname)
if err != nil {
return logging.Errorf("validateIfName: no net namespace %s found: %v", nsname, err)
}
err = podNs.Do(func(_ ns.NetNS) error {
_, err := netlink.LinkByName(ifname)
if err != nil {
if err.Error() == "Link not found" {
return nil
}
return err
}
return logging.Errorf("validateIfName: interface name %s already exists", ifname)
})
return err
}
func confAdd(rt *libcni.RuntimeConf, rawNetconf []byte, multusNetconf *types.NetConf, exec invoke.Exec) (cnitypes.Result, error) {
logging.Debugf("confAdd: %v, %s", rt, string(rawNetconf))
// In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
binDirs = append([]string{multusNetconf.BinDir}, binDirs...)
cniNet := libcni.NewCNIConfigWithCacheDir(binDirs, multusNetconf.CNIDir, exec)
conf, err := libcni.ConfFromBytes(rawNetconf)
if err != nil {
return nil, logging.Errorf("error in converting the raw bytes to conf: %v", err)
}
result, err := cniNet.AddNetwork(context.Background(), conf, rt)
if err != nil {
return nil, err
}
return result, nil
}
func confCheck(rt *libcni.RuntimeConf, rawNetconf []byte, multusNetconf *types.NetConf, exec invoke.Exec) error {
logging.Debugf("confCheck: %v, %s", rt, string(rawNetconf))
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
binDirs = append([]string{multusNetconf.BinDir}, binDirs...)
cniNet := libcni.NewCNIConfigWithCacheDir(binDirs, multusNetconf.CNIDir, exec)
conf, err := libcni.ConfFromBytes(rawNetconf)
if err != nil {
return logging.Errorf("error in converting the raw bytes to conf: %v", err)
}
err = cniNet.CheckNetwork(context.Background(), conf, rt)
if err != nil {
return logging.Errorf("error in getting result from DelNetwork: %v", err)
}
return err
}
func confDel(rt *libcni.RuntimeConf, rawNetconf []byte, multusNetconf *types.NetConf, exec invoke.Exec) error {
logging.Debugf("conflistDel: %v, %s", rt, string(rawNetconf))
// In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
binDirs = append([]string{multusNetconf.BinDir}, binDirs...)
cniNet := libcni.NewCNIConfigWithCacheDir(binDirs, multusNetconf.CNIDir, exec)
conf, err := libcni.ConfFromBytes(rawNetconf)
if err != nil {
return logging.Errorf("error in converting the raw bytes to conf: %v", err)
}
err = cniNet.DelNetwork(context.Background(), conf, rt)
if err != nil {
return logging.Errorf("error in getting result from DelNetwork: %v", err)
}
return err
}
func conflistAdd(rt *libcni.RuntimeConf, rawnetconflist []byte, multusNetconf *types.NetConf, exec invoke.Exec) (cnitypes.Result, error) {
logging.Debugf("conflistAdd: %v, %s", rt, string(rawnetconflist))
// In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
binDirs = append([]string{multusNetconf.BinDir}, binDirs...)
cniNet := libcni.NewCNIConfigWithCacheDir(binDirs, multusNetconf.CNIDir, exec)
confList, err := libcni.ConfListFromBytes(rawnetconflist)
if err != nil {
return nil, logging.Errorf("conflistAdd: error converting the raw bytes into a conflist: %v", err)
}
result, err := cniNet.AddNetworkList(context.Background(), confList, rt)
if err != nil {
return nil, err
}
return result, nil
}
func conflistCheck(rt *libcni.RuntimeConf, rawnetconflist []byte, multusNetconf *types.NetConf, exec invoke.Exec) error {
logging.Debugf("conflistCheck: %v, %s", rt, string(rawnetconflist))
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
binDirs = append([]string{multusNetconf.BinDir}, binDirs...)
cniNet := libcni.NewCNIConfigWithCacheDir(binDirs, multusNetconf.CNIDir, exec)
confList, err := libcni.ConfListFromBytes(rawnetconflist)
if err != nil {
return logging.Errorf("conflistCheck: error converting the raw bytes into a conflist: %v", err)
}
err = cniNet.CheckNetworkList(context.Background(), confList, rt)
if err != nil {
return logging.Errorf("conflistCheck: error in getting result from CheckNetworkList: %v", err)
}
return err
}
func conflistDel(rt *libcni.RuntimeConf, rawnetconflist []byte, multusNetconf *types.NetConf, exec invoke.Exec) error {
logging.Debugf("conflistDel: %v, %s", rt, string(rawnetconflist))
// In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
binDirs = append([]string{multusNetconf.BinDir}, binDirs...)
cniNet := libcni.NewCNIConfigWithCacheDir(binDirs, multusNetconf.CNIDir, exec)
confList, err := libcni.ConfListFromBytes(rawnetconflist)
if err != nil {
return logging.Errorf("conflistDel: error converting the raw bytes into a conflist: %v", err)
}
err = cniNet.DelNetworkList(context.Background(), confList, rt)
if err != nil {
return logging.Errorf("conflistDel: error in getting result from DelNetworkList: %v", err)
}
return err
}
func delegateAdd(exec invoke.Exec, kubeClient *k8s.ClientInfo, pod *v1.Pod, ifName string, delegate *types.DelegateNetConf, rt *libcni.RuntimeConf, multusNetconf *types.NetConf, cniArgs string) (cnitypes.Result, error) {
logging.Debugf("delegateAdd: %v, %s, %v, %v", exec, ifName, delegate, rt)
if os.Setenv("CNI_IFNAME", ifName) != nil {
return nil, logging.Errorf("delegateAdd: error setting environment variable CNI_IFNAME")
}
if err := validateIfName(os.Getenv("CNI_NETNS"), ifName); err != nil {
return nil, logging.Errorf("delegateAdd: cannot set %q interface name to %q: %v", delegate.Conf.Type, ifName, err)
}
// Deprecated in ver 3.5.
if delegate.MacRequest != "" || delegate.IPRequest != nil {
if cniArgs != "" {
cniArgs = fmt.Sprintf("%s;IgnoreUnknown=true", cniArgs)
} else {
cniArgs = "IgnoreUnknown=true"
}
if delegate.MacRequest != "" {
// validate Mac address
_, err := net.ParseMAC(delegate.MacRequest)
if err != nil {
return nil, logging.Errorf("delegateAdd: failed to parse mac address %q", delegate.MacRequest)
}
cniArgs = fmt.Sprintf("%s;MAC=%s", cniArgs, delegate.MacRequest)
logging.Debugf("delegateAdd: set MAC address %q to %q", delegate.MacRequest, ifName)
rt.Args = append(rt.Args, [2]string{"MAC", delegate.MacRequest})
}
if delegate.IPRequest != nil {
// validate IP address
for _, ip := range delegate.IPRequest {
if strings.Contains(ip, "/") {
_, _, err := net.ParseCIDR(ip)
if err != nil {
return nil, logging.Errorf("delegateAdd: failed to parse IP address %q", ip)
}
} else if net.ParseIP(ip) == nil {
return nil, logging.Errorf("delegateAdd: failed to parse IP address %q", ip)
}
}
ips := strings.Join(delegate.IPRequest, ",")
cniArgs = fmt.Sprintf("%s;IP=%s", cniArgs, ips)
logging.Debugf("delegateAdd: set IP address %q to %q", ips, ifName)
rt.Args = append(rt.Args, [2]string{"IP", ips})
}
}
var result cnitypes.Result
var err error
if delegate.ConfListPlugin {
result, err = conflistAdd(rt, delegate.Bytes, multusNetconf, exec)
if err != nil {
return nil, err
}
} else {
result, err = confAdd(rt, delegate.Bytes, multusNetconf, exec)
if err != nil {
return nil, err
}
}
if logging.GetLoggingLevel() >= logging.VerboseLevel {
data, _ := json.Marshal(result)
var cniConfName string
if delegate.ConfListPlugin {
cniConfName = delegate.ConfList.Name
} else {
cniConfName = delegate.Conf.Name
}
podUID := "unknownUID"
if pod != nil {
podUID = string(pod.ObjectMeta.UID)
}
logging.Verbosef("Add: %s:%s:%s:%s(%s):%s %s", rt.Args[1][1], rt.Args[2][1], podUID, delegate.Name, cniConfName, rt.IfName, string(data))
}
// get IP addresses from result
ips := []string{}
res, err := cnicurrent.NewResultFromResult(result)
if err != nil {
logging.Errorf("delegateAdd: error converting result: %v", err)
return result, nil
}
for _, ip := range res.IPs {
ips = append(ips, ip.Address.String())
}
if pod != nil {
// send kubernetes events
if delegate.Name != "" {
kubeClient.Eventf(pod, v1.EventTypeNormal, "AddedInterface", "Add %s %v from %s", rt.IfName, ips, delegate.Name)
} else {
kubeClient.Eventf(pod, v1.EventTypeNormal, "AddedInterface", "Add %s %v", rt.IfName, ips)
}
} else {
// for further debug https://github.com/k8snetworkplumbingwg/multus-cni/issues/481
logging.Errorf("delegateAdd: pod nil pointer: namespace: %s, name: %s, container id: %s, pod: %v", rt.Args[1][1], rt.Args[2][1], rt.Args[3][1], pod)
}
return result, nil
}
func delegateCheck(exec invoke.Exec, ifName string, delegateConf *types.DelegateNetConf, rt *libcni.RuntimeConf, multusNetconf *types.NetConf) error {
logging.Debugf("delegateCheck: %v, %s, %v, %v", exec, ifName, delegateConf, rt)
if os.Setenv("CNI_IFNAME", ifName) != nil {
return logging.Errorf("delegateCheck: error setting environment variable CNI_IFNAME")
}
if logging.GetLoggingLevel() >= logging.VerboseLevel {
var cniConfName string
if delegateConf.ConfListPlugin {
cniConfName = delegateConf.ConfList.Name
} else {
cniConfName = delegateConf.Conf.Name
}
logging.Verbosef("Check: %s:%s:%s(%s):%s %s", rt.Args[1][1], rt.Args[2][1], delegateConf.Name, cniConfName, rt.IfName, string(delegateConf.Bytes))
}
var err error
if delegateConf.ConfListPlugin {
err = conflistCheck(rt, delegateConf.Bytes, multusNetconf, exec)
if err != nil {
return logging.Errorf("delegateCheck: error invoking ConflistCheck - %q: %v", delegateConf.ConfList.Name, err)
}
} else {
err = confCheck(rt, delegateConf.Bytes, multusNetconf, exec)
if err != nil {
return logging.Errorf("delegateCheck: error invoking DelegateCheck - %q: %v", delegateConf.Conf.Type, err)
}
}
return err
}
func delegateDel(exec invoke.Exec, pod *v1.Pod, ifName string, delegateConf *types.DelegateNetConf, rt *libcni.RuntimeConf, multusNetconf *types.NetConf) error {
logging.Debugf("delegateDel: %v, %v, %s, %v, %v", exec, pod, ifName, delegateConf, rt)
if os.Setenv("CNI_IFNAME", ifName) != nil {
return logging.Errorf("delegateDel: error setting environment variable CNI_IFNAME")
}
if logging.GetLoggingLevel() >= logging.VerboseLevel {
var confName string
if delegateConf.ConfListPlugin {
confName = delegateConf.ConfList.Name
} else {
confName = delegateConf.Conf.Name
}
podUID := "unknownUID"
if pod != nil {
podUID = string(pod.ObjectMeta.UID)
}
logging.Verbosef("Del: %s:%s:%s:%s:%s %s", rt.Args[1][1], rt.Args[2][1], podUID, confName, rt.IfName, string(delegateConf.Bytes))
}
var err error
if delegateConf.ConfListPlugin {
err = conflistDel(rt, delegateConf.Bytes, multusNetconf, exec)
if err != nil {
return logging.Errorf("delegateDel: error invoking ConflistDel - %q: %v", delegateConf.ConfList.Name, err)
}
} else {
err = confDel(rt, delegateConf.Bytes, multusNetconf, exec)
if err != nil {
return logging.Errorf("delegateDel: error invoking DelegateDel - %q: %v", delegateConf.Conf.Type, err)
}
}
return err
}
// delPlugins deletes plugins in reverse order from lastdIdx
// Uses netRt as base RuntimeConf (coming from NetConf) but merges it
// with each of the delegates' configuration
func delPlugins(exec invoke.Exec, pod *v1.Pod, args *skel.CmdArgs, k8sArgs *types.K8sArgs, delegates []*types.DelegateNetConf, lastIdx int, netRt *types.RuntimeConfig, multusNetconf *types.NetConf) error {
logging.Debugf("delPlugins: %v, %v, %v, %v, %v, %d, %v", exec, pod, args, k8sArgs, delegates, lastIdx, netRt)
if os.Setenv("CNI_COMMAND", "DEL") != nil {
return logging.Errorf("delPlugins: error setting environment variable CNI_COMMAND to a value of DEL")
}
var errorstrings []string
for idx := lastIdx; idx >= 0; idx-- {
ifName := getIfname(delegates[idx], args.IfName, idx)
rt, cniDeviceInfoPath := types.CreateCNIRuntimeConf(args, k8sArgs, ifName, netRt, delegates[idx])
// Attempt to delete all but do not error out, instead, collect all errors.
if err := delegateDel(exec, pod, ifName, delegates[idx], rt, multusNetconf); err != nil {
errorstrings = append(errorstrings, err.Error())
}
if cniDeviceInfoPath != "" {
err := nadutils.CleanDeviceInfoForCNI(cniDeviceInfoPath)
// Even if the filename is set, file may not be present. Ignore error,
// but log and in the future may need to filter on specific errors.
if err != nil {
logging.Debugf("delPlugins: CleanDeviceInfoForCNI returned an error - err=%v", err)
}
}
}
// Check if we had any errors, and send them all back.
if len(errorstrings) > 0 {
return fmt.Errorf(strings.Join(errorstrings, " / "))
}
return nil
}
func cmdErr(k8sArgs *types.K8sArgs, format string, args ...interface{}) error {
prefix := "Multus: "
if k8sArgs != nil {
prefix += fmt.Sprintf("[%s/%s/%s]: ", k8sArgs.K8S_POD_NAMESPACE, k8sArgs.K8S_POD_NAME, k8sArgs.K8S_POD_UID)
}
return logging.Errorf(prefix+format, args...)
}
func cmdPluginErr(k8sArgs *types.K8sArgs, confName string, format string, args ...interface{}) error {
msg := ""
if k8sArgs != nil {
msg += fmt.Sprintf("[%s/%s/%s:%s]: ", k8sArgs.K8S_POD_NAMESPACE, k8sArgs.K8S_POD_NAME, k8sArgs.K8S_POD_UID, confName)
}
return logging.Errorf(msg+format, args...)
}
func isCriticalRequestRetriable(err error) bool {
logging.Debugf("isCriticalRequestRetriable: %v", err)
errorTypesAllowingRetry := []func(error) bool{
errors.IsServiceUnavailable, errors.IsInternalError, k8snet.IsConnectionReset, k8snet.IsConnectionRefused}
for _, f := range errorTypesAllowingRetry {
if f(err) {
return true
}
}
return false
}
func getPod(kubeClient *k8s.ClientInfo, k8sArgs *types.K8sArgs, ignoreNotFound bool) (*v1.Pod, error) {
if kubeClient == nil {
return nil, nil
}
podNamespace := string(k8sArgs.K8S_POD_NAMESPACE)
podName := string(k8sArgs.K8S_POD_NAME)
podUID := string(k8sArgs.K8S_POD_UID)
pod, err := kubeClient.GetPod(podNamespace, podName)
if err != nil {
// in case of a retriable error, retry 10 times with 0.25 sec interval
if isCriticalRequestRetriable(err) {
waitErr := wait.PollImmediate(shortPollDuration, shortPollTimeout, func() (bool, error) {
pod, err = kubeClient.GetPod(podNamespace, podName)
return pod != nil, err
})
// retry failed, then return error with retry out
if waitErr != nil {
return nil, cmdErr(k8sArgs, "error waiting for pod: %v", err)
}
} else if ignoreNotFound && errors.IsNotFound(err) {
// If not found, proceed to remove interface with cache
return nil, nil
} else {
// Other case, return error
return nil, cmdErr(k8sArgs, "error getting pod: %v", err)
}
}
if podUID != "" && string(pod.UID) != podUID {
return nil, cmdErr(k8sArgs, "expected pod UID %q but got %q from Kube API", podUID, pod.UID)
}
return pod, nil
}
//CmdAdd ...
func CmdAdd(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) (cnitypes.Result, error) {
n, err := types.LoadNetConf(args.StdinData)
logging.Debugf("CmdAdd: %v, %v, %v", args, exec, kubeClient)
if err != nil {
return nil, cmdErr(nil, "error loading netconf: %v", err)
}
kubeClient, err = k8s.GetK8sClient(n.Kubeconfig, kubeClient)
if err != nil {
return nil, cmdErr(nil, "error getting k8s client: %v", err)
}
k8sArgs, err := k8s.GetK8sArgs(args)
if err != nil {
return nil, cmdErr(nil, "error getting k8s args: %v", err)
}
if n.ReadinessIndicatorFile != "" {
err := wait.PollImmediate(pollDuration, pollTimeout, func() (bool, error) {
_, err := os.Stat(n.ReadinessIndicatorFile)
return err == nil, nil
})
if err != nil {
return nil, cmdErr(k8sArgs, "have you checked that your default network is ready? still waiting for readinessindicatorfile @ %v. pollimmediate error: %v", n.ReadinessIndicatorFile, err)
}
}
pod, err := getPod(kubeClient, k8sArgs, false)
if err != nil {
return nil, err
}
// resourceMap holds Pod device allocation information; only initizized if CRD contains 'resourceName' annotation.
// This will only be initialized once and all delegate objects can reference this to look up device info.
var resourceMap map[string]*types.ResourceInfo
if n.ClusterNetwork != "" {
resourceMap, err = k8s.GetDefaultNetworks(pod, n, kubeClient, resourceMap)
if err != nil {
return nil, cmdErr(k8sArgs, "failed to get clusterNetwork/defaultNetworks: %v", err)
}
// First delegate is always the master plugin
n.Delegates[0].MasterPlugin = true
}
_, kc, err := k8s.TryLoadPodDelegates(pod, n, kubeClient, resourceMap)
if err != nil {
return nil, cmdErr(k8sArgs, "error loading k8s delegates k8s args: %v", err)
}
// cache the multus config
if err := saveDelegates(args.ContainerID, n.CNIDir, n.Delegates); err != nil {
return nil, cmdErr(k8sArgs, "error saving the delegates: %v", err)
}
var result, tmpResult cnitypes.Result
var netStatus []nettypes.NetworkStatus
cniArgs := os.Getenv("CNI_ARGS")
for idx, delegate := range n.Delegates {
ifName := getIfname(delegate, args.IfName, idx)
rt, cniDeviceInfoPath := types.CreateCNIRuntimeConf(args, k8sArgs, ifName, n.RuntimeConfig, delegate)
if cniDeviceInfoPath != "" && delegate.ResourceName != "" && delegate.DeviceID != "" {
err = nadutils.CopyDeviceInfoForCNIFromDP(cniDeviceInfoPath, delegate.ResourceName, delegate.DeviceID)
// Even if the filename is set, file may not be present. Ignore error,
// but log and in the future may need to filter on specific errors.
if err != nil {
logging.Debugf("cmdAdd: CopyDeviceInfoForCNIFromDP returned an error - err=%v", err)
}
}
tmpResult, err = delegateAdd(exec, kubeClient, pod, ifName, delegate, rt, n, cniArgs)
if err != nil {
// If the add failed, tear down all networks we already added
netName := delegate.Conf.Name
if netName == "" {
netName = delegate.ConfList.Name
}
// Ignore errors; DEL must be idempotent anyway
_ = delPlugins(exec, nil, args, k8sArgs, n.Delegates, idx, n.RuntimeConfig, n)
return nil, cmdPluginErr(k8sArgs, netName, "error adding container to network %q: %v", netName, err)
}
// Remove gateway from routing table if the gateway is not used
deletegateway := false
adddefaultgateway := false
if delegate.IsFilterGateway {
deletegateway = true
logging.Debugf("Marked interface %v for gateway deletion", ifName)
} else {
// Otherwise, determine if this interface now gets our default route.
// According to
// https://docs.google.com/document/d/1Ny03h6IDVy_e_vmElOqR7UdTPAG_RNydhVE1Kx54kFQ (4.1.2.1.9)
// the list can be empty; if it is, we'll assume the CNI's config for the default gateway holds,
// else we'll update the defaultgateway to the one specified.
if delegate.GatewayRequest != nil && delegate.GatewayRequest[0] != nil {
deletegateway = true
adddefaultgateway = true
logging.Debugf("Detected gateway override on interface %v to %v", ifName, delegate.GatewayRequest)
}
}
if deletegateway {
tmpResult, err = netutils.DeleteDefaultGW(args, ifName, &tmpResult)
if err != nil {
return nil, cmdErr(k8sArgs, "error deleting default gateway: %v", err)
}
}
// Here we'll set the default gateway
if adddefaultgateway {
tmpResult, err = netutils.SetDefaultGW(args, ifName, delegate.GatewayRequest, &tmpResult)
if err != nil {
return nil, cmdErr(k8sArgs, "error setting default gateway: %v", err)
}
}
// Master plugin result is always used if present
if delegate.MasterPlugin || result == nil {
result = tmpResult
}
// Read devInfo from CNIDeviceInfoFile if it exists so
// it can be copied to the NetworkStatus.
devinfo, err := getDelegateDeviceInfo(delegate, rt)
if err != nil {
// Even if the filename is set, file may not be present. Ignore error,
// but log and in the future may need to filter on specific errors.
logging.Debugf("cmdAdd: getDelegateDeviceInfo returned an error - err=%v", err)
}
// create the network status, only in case Multus as kubeconfig
if n.Kubeconfig != "" && kc != nil {
if !types.CheckSystemNamespaces(string(k8sArgs.K8S_POD_NAME), n.SystemNamespaces) {
delegateNetStatus, err := nadutils.CreateNetworkStatus(tmpResult, delegate.Name, delegate.MasterPlugin, devinfo)
if err != nil {
return nil, cmdErr(k8sArgs, "error setting network status: %v", err)
}
netStatus = append(netStatus, *delegateNetStatus)
}
} else if devinfo != nil {
// Warn that devinfo exists but could not add it to downwards API
logging.Errorf("devinfo available, but no kubeConfig so NetworkStatus not modified.")
}
}
// set the network status annotation in apiserver, only in case Multus as kubeconfig
if n.Kubeconfig != "" && kc != nil {
if !types.CheckSystemNamespaces(string(k8sArgs.K8S_POD_NAME), n.SystemNamespaces) {
err = k8s.SetNetworkStatus(kubeClient, k8sArgs, netStatus, n)
if err != nil {
if strings.Contains(err.Error(), "failed to query the pod") {
return nil, cmdErr(k8sArgs, "error setting the networks status, pod was already deleted: %v", err)
}
return nil, cmdErr(k8sArgs, "error setting the networks status: %v", err)
}
}
}
return result, nil
}
//CmdCheck ...
func CmdCheck(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) error {
in, err := types.LoadNetConf(args.StdinData)
logging.Debugf("CmdCheck: %v, %v, %v", args, exec, kubeClient)
if err != nil {
return err
}
k8sArgs, err := k8s.GetK8sArgs(args)
if err != nil {
return cmdErr(nil, "error getting k8s args: %v", err)
}
for idx, delegate := range in.Delegates {
ifName := getIfname(delegate, args.IfName, idx)
rt, _ := types.CreateCNIRuntimeConf(args, k8sArgs, ifName, in.RuntimeConfig, delegate)
err = delegateCheck(exec, ifName, delegate, rt, in)
if err != nil {
return err
}
}
return nil
}
//CmdDel ...
func CmdDel(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) error {
in, err := types.LoadNetConf(args.StdinData)
logging.Debugf("CmdDel: %v, %v, %v", args, exec, kubeClient)
if err != nil {
return err
}
netnsfound := true
netns, err := ns.GetNS(args.Netns)
if err != nil {
// if NetNs is passed down by the Cloud Orchestration Engine, or if it called multiple times
// so don't return an error if the device is already removed.
// https://github.com/kubernetes/kubernetes/issues/43014#issuecomment-287164444
_, ok := err.(ns.NSPathNotExistErr)
if ok {
netnsfound = false
logging.Debugf("CmdDel: WARNING netns may not exist, netns: %s, err: %s", args.Netns, err)
} else {
return cmdErr(nil, "failed to open netns %q: %v", netns, err)
}
}
if netns != nil {
defer netns.Close()
}
k8sArgs, err := k8s.GetK8sArgs(args)
if err != nil {
return cmdErr(nil, "error getting k8s args: %v", err)
}
if in.ReadinessIndicatorFile != "" {
err := wait.PollImmediate(pollDuration, pollTimeout, func() (bool, error) {
_, err := os.Stat(in.ReadinessIndicatorFile)
return err == nil, nil
})
if err != nil {
return cmdErr(k8sArgs, "PollImmediate error waiting for ReadinessIndicatorFile (on del): %v", err)
}
}
kubeClient, err = k8s.GetK8sClient(in.Kubeconfig, kubeClient)
if err != nil {
return cmdErr(nil, "error getting k8s client: %v", err)
}
pod, err := getPod(kubeClient, k8sArgs, true)
if err != nil {
return err
}
// Read the cache to get delegates json for the pod
netconfBytes, path, err := consumeScratchNetConf(args.ContainerID, in.CNIDir)
if err != nil {
// Fetch delegates again if cache is not exist and pod info can be read
if os.IsNotExist(err) && pod != nil {
if in.ClusterNetwork != "" {
_, err = k8s.GetDefaultNetworks(pod, in, kubeClient, nil)
if err != nil {
return cmdErr(k8sArgs, "failed to get clusterNetwork/defaultNetworks: %v", err)
}
// First delegate is always the master plugin
in.Delegates[0].MasterPlugin = true
}
// Get pod annotation and so on
_, _, err := k8s.TryLoadPodDelegates(pod, in, kubeClient, nil)
if err != nil {
if len(in.Delegates) == 0 {
// No delegate available so send error
return cmdErr(k8sArgs, "failed to get delegates: %v", err)
}
// Get clusterNetwork before, so continue to delete
logging.Errorf("Multus: failed to get delegates: %v, but continue to delete clusterNetwork", err)
}
} else {
// The options to continue with a delete have been exhausted (cachefile + API query didn't work)
// We cannot exit with an error as this may cause a sandbox to never get deleted.
logging.Errorf("Multus: failed to get the cached delegates file: %v, cannot properly delete", err)
return nil
}
} else {
defer os.Remove(path)
if err := json.Unmarshal(netconfBytes, &in.Delegates); err != nil {
return cmdErr(k8sArgs, "failed to load netconf: %v", err)
}
// check plugins field and enable ConfListPlugin if there is
for _, v := range in.Delegates {
if len(v.ConfList.Plugins) != 0 {
v.ConfListPlugin = true
}
}
// First delegate is always the master plugin
in.Delegates[0].MasterPlugin = true
}
// set CNIVersion in delegate CNI config if there is no CNIVersion and multus conf have CNIVersion.
for _, v := range in.Delegates {
if v.ConfListPlugin == true && v.ConfList.CNIVersion == "" && in.CNIVersion != "" {
v.ConfList.CNIVersion = in.CNIVersion
v.Bytes, err = json.Marshal(v.ConfList)
if err != nil {
// error happen but continue to delete
logging.Errorf("Multus: failed to marshal delegate %q config: %v", v.Name, err)
}
}
}
// unset the network status annotation in apiserver, only in case Multus as kubeconfig
if in.Kubeconfig != "" {
if netnsfound {
if !types.CheckSystemNamespaces(string(k8sArgs.K8S_POD_NAMESPACE), in.SystemNamespaces) {
err := k8s.SetNetworkStatus(kubeClient, k8sArgs, nil, in)
if err != nil {
// error happen but continue to delete
logging.Errorf("Multus: error unsetting the networks status: %v", err)
}
}
} else {
logging.Debugf("WARNING: Unset SetNetworkStatus skipped due to netns not found.")
}
}
return delPlugins(exec, pod, args, k8sArgs, in.Delegates, len(in.Delegates)-1, in.RuntimeConfig, in)
}
|
[
"\"CNI_PATH\"",
"\"CNI_PATH\"",
"\"CNI_PATH\"",
"\"CNI_PATH\"",
"\"CNI_PATH\"",
"\"CNI_PATH\"",
"\"CNI_NETNS\"",
"\"CNI_ARGS\""
] |
[] |
[
"CNI_ARGS",
"CNI_NETNS",
"CNI_PATH"
] |
[]
|
["CNI_ARGS", "CNI_NETNS", "CNI_PATH"]
|
go
| 3 | 0 | |
contrib/spendfrom/spendfrom.py
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend DLRs received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a delord or delor-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the delor data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Delor/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Delor")
return os.path.expanduser("~/.delor")
def read_bitcoin_config(dbdir):
"""Read the delor.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "delor.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a delor JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 9734 if testnet else 9734
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the delord we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(delord):
info = delord.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
delord.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = delord.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(delord):
address_summary = dict()
address_to_account = dict()
for info in delord.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = delord.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = delord.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-delor-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(delord, fromaddresses, toaddress, amount, fee):
all_coins = list_available(delord)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to delord.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = delord.createrawtransaction(inputs, outputs)
signed_rawtx = delord.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(delord, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = delord.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(delord, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = delord.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(delord, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get DLRs from")
parser.add_option("--to", dest="to", default=None,
help="address to get send DLRs to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of delor.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
delord = connect_JSON(config)
if options.amount is None:
address_summary = list_available(delord)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(delord) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(delord, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(delord, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = delord.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"APPDATA"
] |
[]
|
["APPDATA"]
|
python
| 1 | 0 | |
pkg/ovs/ovn-nbctl.go
|
package ovs
import (
"encoding/json"
"errors"
"fmt"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
"time"
netv1 "k8s.io/api/networking/v1"
"k8s.io/klog"
kubeovnv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1"
"github.com/kubeovn/kube-ovn/pkg/util"
)
type AclDirection string
const (
SgAclIngressDirection AclDirection = "to-lport"
SgAclEgressDirection AclDirection = "from-lport"
)
func (c Client) ovnNbCommand(cmdArgs ...string) (string, error) {
start := time.Now()
cmdArgs = append([]string{fmt.Sprintf("--timeout=%d", c.OvnTimeout), "--wait=sb"}, cmdArgs...)
raw, err := exec.Command(OvnNbCtl, cmdArgs...).CombinedOutput()
elapsed := float64((time.Since(start)) / time.Millisecond)
klog.V(4).Infof("command %s %s in %vms, output %q", OvnNbCtl, strings.Join(cmdArgs, " "), elapsed, raw)
method := ""
for _, arg := range cmdArgs {
if !strings.HasPrefix(arg, "--") {
method = arg
break
}
}
code := "0"
defer func() {
ovsClientRequestLatency.WithLabelValues("ovn-nb", method, code).Observe(elapsed)
}()
if err != nil {
code = "1"
klog.Warningf("ovn-nbctl command error: %s %s in %vms", OvnNbCtl, strings.Join(cmdArgs, " "), elapsed)
return "", fmt.Errorf("%s, %q", raw, err)
} else if elapsed > 500 {
klog.Warningf("ovn-nbctl command took too long: %s %s in %vms", OvnNbCtl, strings.Join(cmdArgs, " "), elapsed)
}
return trimCommandOutput(raw), nil
}
func (c Client) SetAzName(azName string) error {
if _, err := c.ovnNbCommand("set", "NB_Global", ".", fmt.Sprintf("name=%s", azName)); err != nil {
return fmt.Errorf("failed to set az name, %v", err)
}
return nil
}
func (c Client) SetICAutoRoute(enable bool, blackList []string) error {
if enable {
if _, err := c.ovnNbCommand("set", "NB_Global", ".", "options:ic-route-adv=true", "options:ic-route-learn=true", fmt.Sprintf("options:ic-route-blacklist=%s", strings.Join(blackList, ","))); err != nil {
return fmt.Errorf("failed to enable ovn-ic auto route, %v", err)
}
return nil
} else {
if _, err := c.ovnNbCommand("set", "NB_Global", ".", "options:ic-route-adv=false", "options:ic-route-learn=false"); err != nil {
return fmt.Errorf("failed to disable ovn-ic auto route, %v", err)
}
return nil
}
}
// DeleteLogicalSwitchPort delete logical switch port in ovn
func (c Client) DeleteLogicalSwitchPort(port string) error {
if _, err := c.ovnNbCommand(IfExists, "lsp-del", port); err != nil {
return fmt.Errorf("failed to delete logical switch port %s, %v", port, err)
}
return nil
}
// DeleteLogicalRouterPort delete logical switch port in ovn
func (c Client) DeleteLogicalRouterPort(port string) error {
if _, err := c.ovnNbCommand(IfExists, "lrp-del", port); err != nil {
return fmt.Errorf("failed to delete logical router port %s, %v", port, err)
}
return nil
}
func (c Client) CreateICLogicalRouterPort(az, mac, subnet string, chassises []string) error {
if _, err := c.ovnNbCommand(MayExist, "lrp-add", c.ClusterRouter, fmt.Sprintf("%s-ts", az), mac, subnet); err != nil {
return fmt.Errorf("failed to crate ovn-ic lrp, %v", err)
}
if _, err := c.ovnNbCommand(MayExist, "lsp-add", util.InterconnectionSwitch, fmt.Sprintf("ts-%s", az), "--",
"lsp-set-addresses", fmt.Sprintf("ts-%s", az), "router", "--",
"lsp-set-type", fmt.Sprintf("ts-%s", az), "router", "--",
"lsp-set-options", fmt.Sprintf("ts-%s", az), fmt.Sprintf("router-port=%s-ts", az), "--",
"set", "logical_switch_port", fmt.Sprintf("ts-%s", az), fmt.Sprintf("external_ids:vendor=%s", util.CniTypeName)); err != nil {
return fmt.Errorf("failed to create ovn-ic lsp, %v", err)
}
for index, chassis := range chassises {
if _, err := c.ovnNbCommand("lrp-set-gateway-chassis", fmt.Sprintf("%s-ts", az), chassis, fmt.Sprintf("%d", 100-index)); err != nil {
return fmt.Errorf("failed to set gateway chassis, %v", err)
}
}
return nil
}
func (c Client) DeleteICLogicalRouterPort(az string) error {
if err := c.DeleteLogicalRouterPort(fmt.Sprintf("%s-ts", az)); err != nil {
return fmt.Errorf("failed to delete ovn-ic logical router port. %v", err)
}
if err := c.DeleteLogicalSwitchPort(fmt.Sprintf("ts-%s", az)); err != nil {
return fmt.Errorf("failed to delete ovn-ic logical switch port. %v", err)
}
return nil
}
func (c Client) SetPortExternalIds(port, key, value string) error {
rets, err := c.ListLogicalEntity("logical_switch_port", fmt.Sprintf("name=%s", port))
if err != nil {
return fmt.Errorf("failed to find port %s. %v", port, err)
}
if len(rets) == 0 {
return nil
}
if _, err := c.ovnNbCommand("set", "logical_switch_port", port, fmt.Sprintf("external_ids:%s=\"%s\"", key, value)); err != nil {
klog.Errorf("set port %s external_ids failed %v", port, err)
return err
}
return nil
}
// CreatePort create logical switch port in ovn
func (c Client) CreatePort(ls, port, ip, cidr, mac, tag, pod, namespace string, portSecurity bool, securityGroups string) error {
var ovnCommand []string
if util.CheckProtocol(cidr) == kubeovnv1.ProtocolDual {
ips := strings.Split(ip, ",")
ovnCommand = []string{MayExist, "lsp-add", ls, port, "--",
"lsp-set-addresses", port, fmt.Sprintf("%s %s %s", mac, ips[0], ips[1])}
ipAddr := util.GetIpAddrWithMask(ip, cidr)
ipAddrs := strings.Split(ipAddr, ",")
if portSecurity {
ovnCommand = append(ovnCommand,
"--", "lsp-set-port-security", port, fmt.Sprintf("%s %s %s", mac, ipAddrs[0], ipAddrs[1]))
}
} else {
ovnCommand = []string{MayExist, "lsp-add", ls, port, "--",
"lsp-set-addresses", port, fmt.Sprintf("%s %s", mac, ip)}
if portSecurity {
ovnCommand = append(ovnCommand,
"--", "lsp-set-port-security", port, fmt.Sprintf("%s %s", mac, ip))
if securityGroups != "" {
sgList := strings.Split(securityGroups, ",")
ovnCommand = append(ovnCommand,
"--", "set", "logical_switch_port", port, fmt.Sprintf("external_ids:security_groups=%s", securityGroups))
for _, sg := range sgList {
ovnCommand = append(ovnCommand,
"--", "set", "logical_switch_port", port, fmt.Sprintf("external_ids:associated_sg_%s=true", sg))
}
}
}
}
if tag != "" && tag != "0" {
ovnCommand = append(ovnCommand,
"--", "set", "logical_switch_port", port, fmt.Sprintf("tag=%s", tag))
}
if pod != "" && namespace != "" {
ovnCommand = append(ovnCommand,
"--", "set", "logical_switch_port", port, fmt.Sprintf("external_ids:pod=%s/%s", namespace, pod), fmt.Sprintf("external_ids:vendor=%s", util.CniTypeName))
} else {
ovnCommand = append(ovnCommand,
"--", "set", "logical_switch_port", port, fmt.Sprintf("external_ids:vendor=%s", util.CniTypeName))
}
if _, err := c.ovnNbCommand(ovnCommand...); err != nil {
klog.Errorf("create port %s failed %v", port, err)
return err
}
return nil
}
func (c Client) ListPodLogicalSwitchPorts(pod, namespace string) ([]string, error) {
output, err := c.ovnNbCommand("--format=csv", "--data=bare", "--no-heading", "--columns=name", "find", "logical_switch_port", fmt.Sprintf("external_ids:pod=%s/%s", namespace, pod))
if err != nil {
klog.Errorf("failed to list logical switch port, %v", err)
return nil, err
}
lines := strings.Split(output, "\n")
result := make([]string, 0, len(lines))
for _, l := range lines {
if len(strings.TrimSpace(l)) == 0 {
continue
}
result = append(result, strings.TrimSpace(l))
}
return result, nil
}
func (c Client) SetLogicalSwitchConfig(ls string, underlay bool, lr, protocol, subnet, gateway string, excludeIps []string) error {
var err error
cidrBlocks := strings.Split(subnet, ",")
mask := strings.Split(cidrBlocks[0], "/")[1]
var cmd []string
var networks string
switch protocol {
case kubeovnv1.ProtocolIPv4:
networks = fmt.Sprintf("%s/%s", gateway, mask)
cmd = []string{MayExist, "ls-add", ls, "--",
"set", "logical_switch", ls, fmt.Sprintf("other_config:subnet=%s", subnet), "--",
"set", "logical_switch", ls, fmt.Sprintf("other_config:gateway=%s", gateway), "--",
"set", "logical_switch", ls, fmt.Sprintf("other_config:exclude_ips=%s", strings.Join(excludeIps, " "))}
case kubeovnv1.ProtocolIPv6:
gateway := strings.ReplaceAll(gateway, ":", "\\:")
networks = fmt.Sprintf("%s/%s", gateway, mask)
cmd = []string{MayExist, "ls-add", ls, "--",
"set", "logical_switch", ls, fmt.Sprintf("other_config:ipv6_prefix=%s", strings.Split(subnet, "/")[0]), "--",
"set", "logical_switch", ls, fmt.Sprintf("other_config:gateway=%s", gateway), "--",
"set", "logical_switch", ls, fmt.Sprintf("other_config:exclude_ips=%s", strings.Join(excludeIps, " "))}
case kubeovnv1.ProtocolDual:
gws := strings.Split(gateway, ",")
v6Mask := strings.Split(cidrBlocks[1], "/")[1]
gwStr := gws[0] + "/" + mask + "," + gws[1] + "/" + v6Mask
networks = strings.ReplaceAll(strings.Join(strings.Split(gwStr, ","), " "), ":", "\\:")
cmd = []string{MayExist, "ls-add", ls, "--",
"set", "logical_switch", ls, fmt.Sprintf("other_config:subnet=%s", cidrBlocks[0]), "--",
"set", "logical_switch", ls, fmt.Sprintf("other_config:gateway=%s", gateway), "--",
"set", "logical_switch", ls, fmt.Sprintf("other_config:ipv6_prefix=%s", strings.Split(cidrBlocks[1], "/")[0]), "--",
"set", "logical_switch", ls, fmt.Sprintf("other_config:exclude_ips=%s", strings.Join(excludeIps, " "))}
}
if !underlay {
cmd = append(cmd, []string{"--",
"set", "logical_router_port", fmt.Sprintf("%s-%s", lr, ls), fmt.Sprintf("networks=%s", networks)}...)
}
cmd = append(cmd, []string{"--",
"set", "logical_switch", ls, fmt.Sprintf("external_ids:vendor=%s", util.CniTypeName)}...)
_, err = c.ovnNbCommand(cmd...)
if err != nil {
klog.Errorf("set switch config for %s failed %v", ls, err)
return err
}
return nil
}
// CreateLogicalSwitch create logical switch in ovn, connect it to router and apply tcp/udp lb rules
func (c Client) CreateLogicalSwitch(ls, lr, protocol, subnet, gateway string, excludeIps []string, underlay, defaultVpc bool) error {
var err error
switch protocol {
case kubeovnv1.ProtocolIPv4:
_, err = c.ovnNbCommand(MayExist, "ls-add", ls, "--",
"set", "logical_switch", ls, fmt.Sprintf("other_config:subnet=%s", subnet), "--",
"set", "logical_switch", ls, fmt.Sprintf("other_config:gateway=%s", gateway), "--",
"set", "logical_switch", ls, fmt.Sprintf("other_config:exclude_ips=%s", strings.Join(excludeIps, " ")), "--",
"set", "logical_switch", ls, fmt.Sprintf("external_ids:vendor=%s", util.CniTypeName))
case kubeovnv1.ProtocolIPv6:
_, err = c.ovnNbCommand(MayExist, "ls-add", ls, "--",
"set", "logical_switch", ls, fmt.Sprintf("other_config:ipv6_prefix=%s", strings.Split(subnet, "/")[0]), "--",
"set", "logical_switch", ls, fmt.Sprintf("other_config:gateway=%s", gateway), "--",
"set", "logical_switch", ls, fmt.Sprintf("other_config:exclude_ips=%s", strings.Join(excludeIps, " ")), "--",
"set", "logical_switch", ls, fmt.Sprintf("external_ids:vendor=%s", util.CniTypeName))
case kubeovnv1.ProtocolDual:
// gateway is not an official column, which is used for private
cidrBlocks := strings.Split(subnet, ",")
_, err = c.ovnNbCommand(MayExist, "ls-add", ls, "--",
"set", "logical_switch", ls, fmt.Sprintf("other_config:subnet=%s", cidrBlocks[0]), "--",
"set", "logical_switch", ls, fmt.Sprintf("other_config:gateway=%s", gateway), "--",
"set", "logical_switch", ls, fmt.Sprintf("other_config:ipv6_prefix=%s", strings.Split(cidrBlocks[1], "/")[0]), "--",
"set", "logical_switch", ls, fmt.Sprintf("other_config:exclude_ips=%s", strings.Join(excludeIps, " ")), "--",
"set", "logical_switch", ls, fmt.Sprintf("external_ids:vendor=%s", util.CniTypeName))
}
if err != nil {
klog.Errorf("create switch %s failed %v", ls, err)
return err
}
ip := util.GetIpAddrWithMask(gateway, subnet)
mac := util.GenerateMac()
if !underlay {
if err := c.createRouterPort(ls, lr, ip, mac); err != nil {
klog.Errorf("failed to connect switch %s to router, %v", ls, err)
return err
}
}
return nil
}
func (c Client) AddLbToLogicalSwitch(tcpLb, tcpSessLb, udpLb, udpSessLb, ls string) error {
if err := c.addLoadBalancerToLogicalSwitch(tcpLb, ls); err != nil {
klog.Errorf("failed to add tcp lb to %s, %v", ls, err)
return err
}
if err := c.addLoadBalancerToLogicalSwitch(udpLb, ls); err != nil {
klog.Errorf("failed to add udp lb to %s, %v", ls, err)
return err
}
if err := c.addLoadBalancerToLogicalSwitch(tcpSessLb, ls); err != nil {
klog.Errorf("failed to add tcp session lb to %s, %v", ls, err)
return err
}
if err := c.addLoadBalancerToLogicalSwitch(udpSessLb, ls); err != nil {
klog.Errorf("failed to add udp session lb to %s, %v", ls, err)
return err
}
return nil
}
func (c Client) RemoveLbFromLogicalSwitch(tcpLb, tcpSessLb, udpLb, udpSessLb, ls string) error {
if err := c.removeLoadBalancerFromLogicalSwitch(tcpLb, ls); err != nil {
klog.Errorf("failed to add tcp lb to %s, %v", ls, err)
return err
}
if err := c.removeLoadBalancerFromLogicalSwitch(udpLb, ls); err != nil {
klog.Errorf("failed to add udp lb to %s, %v", ls, err)
return err
}
if err := c.removeLoadBalancerFromLogicalSwitch(tcpSessLb, ls); err != nil {
klog.Errorf("failed to add tcp session lb to %s, %v", ls, err)
return err
}
if err := c.removeLoadBalancerFromLogicalSwitch(udpSessLb, ls); err != nil {
klog.Errorf("failed to add udp session lb to %s, %v", ls, err)
return err
}
return nil
}
// DeleteLoadBalancer delete loadbalancer in ovn
func (c Client) DeleteLoadBalancer(lbs ...string) error {
for _, lb := range lbs {
lbid, err := c.FindLoadbalancer(lb)
if err != nil {
klog.Warningf("failed to find load_balancer '%s', %v", lb, err)
continue
}
if _, err := c.ovnNbCommand(IfExists, "destroy", "load_balancer", lbid); err != nil {
return err
}
}
return nil
}
// ListLoadBalancer list loadbalancer names
func (c Client) ListLoadBalancer() ([]string, error) {
output, err := c.ovnNbCommand("--format=csv", "--data=bare", "--no-heading", "--columns=name", "find", "load_balancer")
if err != nil {
klog.Errorf("failed to list load balancer %v", err)
return nil, err
}
lines := strings.Split(output, "\n")
result := make([]string, 0, len(lines))
for _, l := range lines {
l = strings.TrimSpace(l)
if len(l) > 0 {
result = append(result, l)
}
}
return result, nil
}
func (c Client) CreateGatewaySwitch(name, ip, mac string, chassises []string) error {
lsTolr := fmt.Sprintf("%s-%s", name, c.ClusterRouter)
lrTols := fmt.Sprintf("%s-%s", c.ClusterRouter, name)
localnetPort := fmt.Sprintf("ln-%s", name)
_, err := c.ovnNbCommand(
MayExist, "ls-add", name, "--",
"set", "logical_switch", name, fmt.Sprintf("external_ids:vendor=%s", util.CniTypeName), "--",
MayExist, "lsp-add", name, localnetPort, "--",
"lsp-set-type", localnetPort, "localnet", "--",
"lsp-set-addresses", localnetPort, "unknown", "--",
"lsp-set-options", localnetPort, "network_name=external", "--",
MayExist, "lrp-add", c.ClusterRouter, lrTols, mac, ip, "--",
MayExist, "lsp-add", name, lsTolr, "--",
"lsp-set-type", lsTolr, "router", "--",
"lsp-set-addresses", lsTolr, "router", "--",
"lsp-set-options", lsTolr, fmt.Sprintf("router-port=%s", lrTols),
)
if err != nil {
return fmt.Errorf("failed to create external gateway switch, %v", err)
}
for index, chassis := range chassises {
if _, err := c.ovnNbCommand("lrp-set-gateway-chassis", lrTols, chassis, fmt.Sprintf("%d", 100-index)); err != nil {
return fmt.Errorf("failed to set gateway chassis, %v", err)
}
}
return nil
}
func (c Client) DeleteGatewaySwitch(name string) error {
lrTols := fmt.Sprintf("%s-%s", c.ClusterRouter, name)
_, err := c.ovnNbCommand(
IfExists, "ls-del", name, "--",
IfExists, "lrp-del", lrTols,
)
return err
}
// ListLogicalSwitch list logical switch names
func (c Client) ListLogicalSwitch(args ...string) ([]string, error) {
return c.ListLogicalEntity("logical_switch", args...)
}
func (c Client) ListLogicalEntity(entity string, args ...string) ([]string, error) {
cmd := []string{"--format=csv", "--data=bare", "--no-heading", "--columns=name", "find", entity}
cmd = append(cmd, args...)
output, err := c.ovnNbCommand(cmd...)
if err != nil {
klog.Errorf("failed to list logical %s %v", entity, err)
return nil, err
}
lines := strings.Split(output, "\n")
result := make([]string, 0, len(lines))
for _, l := range lines {
l = strings.TrimSpace(l)
if len(l) > 0 {
result = append(result, l)
}
}
return result, nil
}
func (c Client) CustomFindEntity(entity string, attris []string, args ...string) (result []map[string][]string, err error) {
result = []map[string][]string{}
var attrStr strings.Builder
for _, e := range attris {
attrStr.WriteString(e)
attrStr.WriteString(",")
}
// Assuming that the order of the elements in attris does not change
cmd := []string{"--format=csv", "--data=bare", "--no-heading", fmt.Sprintf("--columns=%s", attrStr.String()), "find", entity}
cmd = append(cmd, args...)
output, err := c.ovnNbCommand(cmd...)
if err != nil {
klog.Errorf("failed to customized list logical %s %v", entity, err)
return nil, err
}
if output == "" {
return result, nil
}
lines := strings.Split(output, "\n")
for _, l := range lines {
aResult := make(map[string][]string)
parts := strings.Split(strings.TrimSpace(l), ",")
for i, e := range attris {
part := strings.Split(strings.TrimSpace(parts[i]), " ")
if part[0] == "" {
aResult[e] = []string{}
} else {
aResult[e] = part
}
}
result = append(result, aResult)
}
return result, nil
}
func (c Client) GetEntityInfo(entity string, index string, attris []string) (result map[string]string, err error) {
var attrstr strings.Builder
for _, e := range attris {
attrstr.WriteString(e)
attrstr.WriteString(" ")
}
cmd := []string{"get", entity, index, strings.TrimSpace(attrstr.String())}
output, err := c.ovnNbCommand(cmd...)
if err != nil {
klog.Errorf("failed to get attributes from %s %s %v", entity, index, err)
return nil, err
}
result = make(map[string]string)
if output == "" {
return result, nil
}
lines := strings.Split(output, "\n")
if len(lines) != len(attris) {
klog.Errorf("failed to get attributes from %s %s %s", entity, index, attris)
return nil, errors.New("length abnormal")
}
for i, l := range lines {
result[attris[i]] = l
}
return result, nil
}
func (c Client) LogicalSwitchExists(logicalSwitch string, args ...string) (bool, error) {
lss, err := c.ListLogicalSwitch(args...)
if err != nil {
return false, err
}
for _, ls := range lss {
if ls == logicalSwitch {
return true, nil
}
}
return false, nil
}
func (c Client) ListLogicalSwitchPort(needVendorFilter bool) ([]string, error) {
cmdArg := []string{"--format=csv", "--data=bare", "--no-heading", "--columns=name", "find", "logical_switch_port", "type=\"\""}
if needVendorFilter {
cmdArg = append(cmdArg, fmt.Sprintf("external_ids:vendor=%s", util.CniTypeName))
}
output, err := c.ovnNbCommand(cmdArg...)
if err != nil {
klog.Errorf("failed to list logical switch port, %v", err)
return nil, err
}
lines := strings.Split(output, "\n")
result := make([]string, 0, len(lines))
for _, l := range lines {
if len(strings.TrimSpace(l)) == 0 {
continue
}
result = append(result, strings.TrimSpace(l))
}
return result, nil
}
func (c Client) LogicalSwitchPortExists(port string) (bool, error) {
output, err := c.ovnNbCommand("--format=csv", "--data=bare", "--no-heading", "--columns=name", "find", "logical_switch_port", fmt.Sprintf("name=%s", port))
if err != nil {
klog.Errorf("failed to find port %s", port)
return false, err
}
if output != "" {
return true, nil
}
return false, nil
}
func (c Client) ListRemoteLogicalSwitchPortAddress() ([]string, error) {
output, err := c.ovnNbCommand("--format=csv", "--data=bare", "--no-heading", "--columns=addresses", "find", "logical_switch_port", "type=remote")
if err != nil {
return nil, fmt.Errorf("failed to list ic remote addresses, %v", err)
}
lines := strings.Split(output, "\n")
result := make([]string, 0, len(lines))
for _, l := range lines {
if len(strings.TrimSpace(l)) == 0 {
continue
}
if len(strings.Split(l, " ")) != 2 {
continue
}
cidr := strings.Split(l, " ")[1]
result = append(result, strings.TrimSpace(cidr))
}
return result, nil
}
// ListLogicalRouter list logical router names
func (c Client) ListLogicalRouter(args ...string) ([]string, error) {
return c.ListLogicalEntity("logical_router", args...)
}
// DeleteLogicalSwitch delete logical switch
func (c Client) DeleteLogicalSwitch(ls string) error {
if _, err := c.ovnNbCommand(IfExists, "ls-del", ls); err != nil {
klog.Errorf("failed to del ls %s, %v", ls, err)
return err
}
return nil
}
// CreateLogicalRouter delete logical router in ovn
func (c Client) CreateLogicalRouter(lr string) error {
_, err := c.ovnNbCommand(MayExist, "lr-add", lr, "--",
"set", "Logical_Router", lr, fmt.Sprintf("external_ids:vendor=%s", util.CniTypeName))
return err
}
// DeleteLogicalRouter create logical router in ovn
func (c Client) DeleteLogicalRouter(lr string) error {
_, err := c.ovnNbCommand(IfExists, "lr-del", lr)
return err
}
func (c Client) RemoveRouterPort(ls, lr string) error {
lsTolr := fmt.Sprintf("%s-%s", ls, lr)
lrTols := fmt.Sprintf("%s-%s", lr, ls)
_, err := c.ovnNbCommand(IfExists, "lsp-del", lsTolr, "--",
IfExists, "lrp-del", lrTols)
if err != nil {
klog.Errorf("failed to remove router port, %v", err)
return err
}
return nil
}
func (c Client) createRouterPort(ls, lr, ip, mac string) error {
klog.Infof("add %s to %s with ip: %s, mac :%s", ls, lr, ip, mac)
lsTolr := fmt.Sprintf("%s-%s", ls, lr)
lrTols := fmt.Sprintf("%s-%s", lr, ls)
_, err := c.ovnNbCommand(MayExist, "lsp-add", ls, lsTolr, "--",
"set", "logical_switch_port", lsTolr, "type=router", "--",
"set", "logical_switch_port", lsTolr, fmt.Sprintf("addresses=\"%s\"", mac), "--",
"set", "logical_switch_port", lsTolr, fmt.Sprintf("options:router-port=%s", lrTols), "--",
"set", "logical_switch_port", lsTolr, fmt.Sprintf("external_ids:vendor=%s", util.CniTypeName))
if err != nil {
klog.Errorf("failed to create switch router port %s %v", lsTolr, err)
return err
}
if len(ip) == 0 {
klog.Errorf("failed to create switch router port: ip is empty")
return err
}
ipStr := strings.Split(ip, ",")
if len(ipStr) == 2 {
_, err = c.ovnNbCommand(MayExist, "lrp-add", lr, lrTols, mac, ipStr[0], ipStr[1])
} else {
_, err = c.ovnNbCommand(MayExist, "lrp-add", lr, lrTols, mac, ipStr[0])
}
if err != nil {
klog.Errorf("failed to create router port %s %v", lrTols, err)
return err
}
return nil
}
type StaticRoute struct {
Policy string
CIDR string
NextHop string
}
func (c Client) ListStaticRoute() ([]StaticRoute, error) {
output, err := c.ovnNbCommand("--format=csv", "--no-heading", "--data=bare", "--columns=ip_prefix,nexthop,policy", "find", "Logical_Router_Static_Route", "external_ids{=}{}")
if err != nil {
return nil, err
}
entries := strings.Split(output, "\n")
staticRoutes := make([]StaticRoute, 0, len(entries))
for _, entry := range strings.Split(output, "\n") {
if len(strings.Split(entry, ",")) == 3 {
t := strings.Split(entry, ",")
staticRoutes = append(staticRoutes,
StaticRoute{CIDR: strings.TrimSpace(t[0]), NextHop: strings.TrimSpace(t[1]), Policy: strings.TrimSpace(t[2])})
}
}
return staticRoutes, nil
}
// AddStaticRoute add a static route rule in ovn
func (c Client) AddStaticRoute(policy, cidr, nextHop, router string, routeType string) error {
if policy == "" {
policy = PolicyDstIP
}
for _, cidrBlock := range strings.Split(cidr, ",") {
for _, gw := range strings.Split(nextHop, ",") {
if util.CheckProtocol(cidrBlock) != util.CheckProtocol(gw) {
continue
}
if routeType == util.EcmpRouteType {
if _, err := c.ovnNbCommand(MayExist, fmt.Sprintf("%s=%s", Policy, policy), "--ecmp", "lr-route-add", router, cidrBlock, gw); err != nil {
return err
}
} else {
if _, err := c.ovnNbCommand(MayExist, fmt.Sprintf("%s=%s", Policy, policy), "lr-route-add", router, cidrBlock, gw); err != nil {
return err
}
}
}
}
return nil
}
func (c Client) GetStaticRouteList(router string) (routeList []*StaticRoute, err error) {
output, err := c.ovnNbCommand("lr-route-list", router)
if err != nil {
klog.Errorf("failed to list logical router route %v", err)
return nil, err
}
return parseLrRouteListOutput(output)
}
var routeRegexp = regexp.MustCompile(`^\s*((\d+(\.\d+){3})|(([a-f0-9:]*:+)+[a-f0-9]?))(/\d+)?\s+((\d+(\.\d+){3})|(([a-f0-9:]*:+)+[a-f0-9]?))\s+(dst-ip|src-ip)(\s+.+)?$`)
func parseLrRouteListOutput(output string) (routeList []*StaticRoute, err error) {
lines := strings.Split(output, "\n")
routeList = make([]*StaticRoute, 0, len(lines))
for _, l := range lines {
if strings.Contains(l, "learned") {
continue
}
if len(l) == 0 {
continue
}
if !routeRegexp.MatchString(l) {
continue
}
fields := strings.Fields(l)
routeList = append(routeList, &StaticRoute{
Policy: fields[2],
CIDR: fields[0],
NextHop: fields[1],
})
}
return routeList, nil
}
func (c Client) UpdateNatRule(policy, logicalIP, externalIP, router, logicalMac, port string) error {
if policy == "snat" {
if externalIP == "" {
_, err := c.ovnNbCommand(IfExists, "lr-nat-del", router, "snat", logicalIP)
return err
}
_, err := c.ovnNbCommand(IfExists, "lr-nat-del", router, "snat", logicalIP, "--",
MayExist, "lr-nat-add", router, policy, externalIP, logicalIP)
return err
} else {
output, err := c.ovnNbCommand("--format=csv", "--no-heading", "--data=bare", "--columns=external_ip", "find", "NAT", fmt.Sprintf("logical_ip=%s", logicalIP), "type=dnat_and_snat")
if err != nil {
klog.Errorf("failed to list nat rules, %v", err)
return err
}
eips := strings.Split(output, "\n")
for _, eip := range eips {
eip = strings.TrimSpace(eip)
if eip == "" || eip == externalIP {
continue
}
if _, err := c.ovnNbCommand(IfExists, "lr-nat-del", router, "dnat_and_snat", eip); err != nil {
klog.Errorf("failed to delete nat rule, %v", err)
return err
}
}
if externalIP != "" {
if c.ExternalGatewayType == "distributed" {
_, err = c.ovnNbCommand(MayExist, "--stateless", "lr-nat-add", router, policy, externalIP, logicalIP, port, logicalMac)
return err
} else {
_, err = c.ovnNbCommand(MayExist, "lr-nat-add", router, policy, externalIP, logicalIP)
return err
}
}
}
return nil
}
func (c Client) DeleteNatRule(logicalIP, router string) error {
output, err := c.ovnNbCommand("--format=csv", "--no-heading", "--data=bare", "--columns=type,external_ip", "find", "NAT", fmt.Sprintf("logical_ip=%s", logicalIP))
if err != nil {
klog.Errorf("failed to list nat rules, %v", err)
return err
}
rules := strings.Split(output, "\n")
for _, rule := range rules {
if len(strings.Split(rule, ",")) != 2 {
continue
}
policy, externalIP := strings.Split(rule, ",")[0], strings.Split(rule, ",")[1]
if policy == "snat" {
if _, err := c.ovnNbCommand(IfExists, "lr-nat-del", router, "snat", logicalIP); err != nil {
klog.Errorf("failed to delete nat rule, %v", err)
return err
}
} else if policy == "dnat_and_snat" {
if _, err := c.ovnNbCommand(IfExists, "lr-nat-del", router, "dnat_and_snat", externalIP); err != nil {
klog.Errorf("failed to delete nat rule, %v", err)
return err
}
}
}
return err
}
func (c Client) DeleteMatchedStaticRoute(cidr, nexthop, router string) error {
if cidr == "" || nexthop == "" {
return nil
}
_, err := c.ovnNbCommand(IfExists, "lr-route-del", router, cidr, nexthop)
return err
}
// DeleteStaticRoute delete a static route rule in ovn
func (c Client) DeleteStaticRoute(cidr, router string) error {
if cidr == "" {
return nil
}
_, err := c.ovnNbCommand(IfExists, "lr-route-del", router, cidr)
return err
}
func (c Client) DeleteStaticRouteByNextHop(nextHop string) error {
if strings.TrimSpace(nextHop) == "" {
return nil
}
output, err := c.ovnNbCommand("--format=csv", "--no-heading", "--data=bare", "--columns=ip_prefix", "find", "Logical_Router_Static_Route", fmt.Sprintf("nexthop=%s", nextHop))
if err != nil {
klog.Errorf("failed to list static route %s, %v", nextHop, err)
return err
}
ipPrefixes := strings.Split(output, "\n")
for _, ipPre := range ipPrefixes {
if strings.TrimSpace(ipPre) == "" {
continue
}
if err := c.DeleteStaticRoute(ipPre, c.ClusterRouter); err != nil {
klog.Errorf("failed to delete route %s, %v", ipPre, err)
return err
}
}
return nil
}
// FindLoadbalancer find ovn loadbalancer uuid by name
func (c Client) FindLoadbalancer(lb string) (string, error) {
output, err := c.ovnNbCommand("--data=bare", "--no-heading", "--columns=_uuid",
"find", "load_balancer", fmt.Sprintf("name=%s", lb))
count := len(strings.FieldsFunc(output, func(c rune) bool { return c == '\n' }))
if count > 1 {
klog.Errorf("%s has %d lb entries", lb, count)
return "", fmt.Errorf("%s has %d lb entries", lb, count)
}
return output, err
}
// CreateLoadBalancer create loadbalancer in ovn
func (c Client) CreateLoadBalancer(lb, protocol, selectFields string) error {
var err error
if selectFields == "" {
_, err = c.ovnNbCommand("create", "load_balancer",
fmt.Sprintf("name=%s", lb), fmt.Sprintf("protocol=%s", protocol))
} else {
_, err = c.ovnNbCommand("create", "load_balancer",
fmt.Sprintf("name=%s", lb), fmt.Sprintf("protocol=%s", protocol), fmt.Sprintf("selection_fields=%s", selectFields))
}
return err
}
// CreateLoadBalancerRule create loadbalancer rul in ovn
func (c Client) CreateLoadBalancerRule(lb, vip, ips, protocol string) error {
_, err := c.ovnNbCommand(MayExist, "lb-add", lb, vip, ips, strings.ToLower(protocol))
return err
}
func (c Client) addLoadBalancerToLogicalSwitch(lb, ls string) error {
_, err := c.ovnNbCommand(MayExist, "ls-lb-add", ls, lb)
return err
}
func (c Client) removeLoadBalancerFromLogicalSwitch(lb, ls string) error {
if lb == "" {
return nil
}
lbUuid, err := c.FindLoadbalancer(lb)
if err != nil {
return err
}
if lbUuid == "" {
return nil
}
_, err = c.ovnNbCommand(IfExists, "ls-lb-del", ls, lb)
return err
}
// DeleteLoadBalancerVip delete a vip rule from loadbalancer
func (c Client) DeleteLoadBalancerVip(vip, lb string) error {
lbUuid, err := c.FindLoadbalancer(lb)
if err != nil {
klog.Errorf("failed to get lb %v", err)
return err
}
existVips, err := c.GetLoadBalancerVips(lbUuid)
if err != nil {
klog.Errorf("failed to list lb %s vips, %v", lb, err)
return err
}
// vip is empty or delete last rule will destroy the loadbalancer
if vip == "" || len(existVips) == 1 {
return nil
}
_, err = c.ovnNbCommand(IfExists, "lb-del", lb, vip)
return err
}
// GetLoadBalancerVips return vips of a loadbalancer
func (c Client) GetLoadBalancerVips(lb string) (map[string]string, error) {
output, err := c.ovnNbCommand("--data=bare", "--no-heading",
"get", "load_balancer", lb, "vips")
if err != nil {
return nil, err
}
result := map[string]string{}
err = json.Unmarshal([]byte(strings.Replace(output, "=", ":", -1)), &result)
return result, err
}
// CleanLogicalSwitchAcl clean acl of a switch
func (c Client) CleanLogicalSwitchAcl(ls string) error {
_, err := c.ovnNbCommand("acl-del", ls)
return err
}
// ResetLogicalSwitchAcl reset acl of a switch
func (c Client) ResetLogicalSwitchAcl(ls string) error {
_, err := c.ovnNbCommand("acl-del", ls)
return err
}
// SetPrivateLogicalSwitch will drop all ingress traffic except allow subnets
func (c Client) SetPrivateLogicalSwitch(ls, protocol, cidr string, allow []string) error {
delArgs := []string{"acl-del", ls}
allowArgs := []string{}
var dropArgs []string
if protocol == kubeovnv1.ProtocolIPv4 {
dropArgs = []string{"--", "--log", fmt.Sprintf("--name=%s", ls), fmt.Sprintf("--severity=%s", "warning"), "acl-add", ls, "to-lport", util.DefaultDropPriority, "ip", "drop"}
allowArgs = append(allowArgs, "--", MayExist, "acl-add", ls, "to-lport", util.NodeAllowPriority, fmt.Sprintf("ip4.src==%s", c.NodeSwitchCIDR), "allow-related")
allowArgs = append(allowArgs, "--", MayExist, "acl-add", ls, "to-lport", util.SubnetAllowPriority, fmt.Sprintf(`ip4.src==%s && ip4.dst==%s`, cidr, cidr), "allow-related")
} else {
dropArgs = []string{"--", "--log", fmt.Sprintf("--name=%s", ls), fmt.Sprintf("--severity=%s", "warning"), "acl-add", ls, "to-lport", util.DefaultDropPriority, "ip", "drop"}
allowArgs = append(allowArgs, "--", MayExist, "acl-add", ls, "to-lport", util.NodeAllowPriority, fmt.Sprintf("ip6.src==%s", c.NodeSwitchCIDR), "allow-related")
allowArgs = append(allowArgs, "--", MayExist, "acl-add", ls, "to-lport", util.SubnetAllowPriority, fmt.Sprintf(`ip6.src==%s && ip6.dst==%s`, cidr, cidr), "allow-related")
}
ovnArgs := append(delArgs, dropArgs...)
for _, subnet := range allow {
if strings.TrimSpace(subnet) != "" {
var match string
switch protocol {
case kubeovnv1.ProtocolIPv4:
match = fmt.Sprintf("(ip4.src==%s && ip4.dst==%s) || (ip4.src==%s && ip4.dst==%s)", strings.TrimSpace(subnet), cidr, cidr, strings.TrimSpace(subnet))
case kubeovnv1.ProtocolIPv6:
match = fmt.Sprintf("(ip6.src==%s && ip6.dst==%s) || (ip6.src==%s && ip6.dst==%s)", strings.TrimSpace(subnet), cidr, cidr, strings.TrimSpace(subnet))
}
allowArgs = append(allowArgs, "--", MayExist, "acl-add", ls, "to-lport", util.SubnetAllowPriority, match, "allow-related")
}
}
ovnArgs = append(ovnArgs, allowArgs...)
_, err := c.ovnNbCommand(ovnArgs...)
return err
}
func (c Client) GetLogicalSwitchPortAddress(port string) ([]string, error) {
output, err := c.ovnNbCommand("get", "logical_switch_port", port, "addresses")
if err != nil {
klog.Errorf("get port %s addresses failed %v", port, err)
return nil, err
}
if strings.Contains(output, "dynamic") {
// [dynamic]
return nil, nil
}
output = strings.Trim(output, `[]"`)
if len(strings.Split(output, " ")) != 2 {
return nil, nil
}
// currently user may only have one fixed address
// ["0a:00:00:00:00:0c 10.16.0.13"]
mac := strings.Split(output, " ")[0]
ip := strings.Split(output, " ")[1]
return []string{mac, ip}, nil
}
func (c Client) GetLogicalSwitchPortDynamicAddress(port string) ([]string, error) {
output, err := c.ovnNbCommand("wait-until", "logical_switch_port", port, "dynamic_addresses!=[]", "--",
"get", "logical_switch_port", port, "dynamic-addresses")
if err != nil {
klog.Errorf("get port %s dynamic_addresses failed %v", port, err)
return nil, err
}
if output == "[]" {
return nil, ErrNoAddr
}
output = strings.Trim(output, `"`)
// "0a:00:00:00:00:02"
if len(strings.Split(output, " ")) != 2 {
klog.Error("Subnet address space has been exhausted")
return nil, ErrNoAddr
}
// "0a:00:00:00:00:02 100.64.0.3"
mac := strings.Split(output, " ")[0]
ip := strings.Split(output, " ")[1]
return []string{mac, ip}, nil
}
// GetPortAddr return port [mac, ip]
func (c Client) GetPortAddr(port string) ([]string, error) {
var address []string
var err error
address, err = c.GetLogicalSwitchPortAddress(port)
if err != nil {
return nil, err
}
if address == nil {
address, err = c.GetLogicalSwitchPortDynamicAddress(port)
if err != nil {
return nil, err
}
}
return address, nil
}
func (c Client) CreateNpPortGroup(pgName, npNs, npName string) error {
output, err := c.ovnNbCommand(
"--data=bare", "--no-heading", "--columns=_uuid", "find", "port_group", fmt.Sprintf("name=%s", pgName))
if err != nil {
klog.Errorf("failed to find port_group %s", pgName)
return err
}
if output != "" {
return nil
}
_, err = c.ovnNbCommand(
"pg-add", pgName,
"--", "set", "port_group", pgName, fmt.Sprintf("external_ids:np=%s/%s", npNs, npName),
)
return err
}
func (c Client) DeletePortGroup(pgName string) error {
if _, err := c.ovnNbCommand("get", "port_group", pgName, "_uuid"); err != nil {
if strings.Contains(err.Error(), "no row") {
return nil
}
klog.Errorf("failed to get pg %s, %v", pgName, err)
return err
}
_, err := c.ovnNbCommand("pg-del", pgName)
return err
}
type portGroup struct {
Name string
NpName string
NpNamespace string
}
func (c Client) ListNpPortGroup() ([]portGroup, error) {
output, err := c.ovnNbCommand("--data=bare", "--format=csv", "--no-heading", "--columns=name,external_ids", "find", "port_group", "external_ids:np!=[]")
if err != nil {
klog.Errorf("failed to list logical port-group, %v", err)
return nil, err
}
lines := strings.Split(output, "\n")
result := make([]portGroup, 0, len(lines))
for _, l := range lines {
if len(strings.TrimSpace(l)) == 0 {
continue
}
parts := strings.Split(strings.TrimSpace(l), ",")
if len(parts) != 2 {
continue
}
name := strings.TrimSpace(parts[0])
np := strings.Split(strings.TrimPrefix(strings.TrimSpace(parts[1]), "np="), "/")
if len(np) != 2 {
continue
}
result = append(result, portGroup{Name: name, NpNamespace: np[0], NpName: np[1]})
}
return result, nil
}
func (c Client) CreateAddressSet(asName, npNamespace, npName, direction string) error {
output, err := c.ovnNbCommand("--data=bare", "--no-heading", "--columns=_uuid", "find", "address_set", fmt.Sprintf("name=%s", asName))
if err != nil {
klog.Errorf("failed to find address_set %s", asName)
return err
}
if output != "" {
return nil
}
_, err = c.ovnNbCommand("create", "address_set", fmt.Sprintf("name=%s", asName), fmt.Sprintf("external_ids:np=%s/%s/%s", npNamespace, npName, direction))
return err
}
func (c Client) ListAddressSet(npNamespace, npName, direction string) ([]string, error) {
output, err := c.ovnNbCommand("--data=bare", "--no-heading", "--columns=name", "find", "address_set", fmt.Sprintf("external_ids:np=%s/%s/%s", npNamespace, npName, direction))
if err != nil {
klog.Errorf("failed to list address_set of %s/%s/%s", npNamespace, npName, direction)
return nil, err
}
return strings.Split(output, "\n"), nil
}
func (c Client) DeleteAddressSet(asName string) error {
_, err := c.ovnNbCommand(IfExists, "destroy", "address_set", asName)
return err
}
func (c Client) CreateIngressACL(npName, pgName, asIngressName, asExceptName, protocol string, npp []netv1.NetworkPolicyPort) error {
ipSuffix := "ip4"
if protocol == kubeovnv1.ProtocolIPv6 {
ipSuffix = "ip6"
}
pgAs := fmt.Sprintf("%s_%s", pgName, ipSuffix)
ovnArgs := []string{MayExist, "--type=port-group", "--log", fmt.Sprintf("--name=%s", npName), fmt.Sprintf("--severity=%s", "warning"), "acl-add", pgName, "to-lport", util.IngressDefaultDrop, fmt.Sprintf("%s.dst == $%s", ipSuffix, pgAs), "drop"}
if len(npp) == 0 {
allowArgs := []string{"--", MayExist, "--type=port-group", "acl-add", pgName, "to-lport", util.IngressAllowPriority, fmt.Sprintf("%s.src == $%s && %s.src != $%s && %s.dst == $%s", ipSuffix, asIngressName, ipSuffix, asExceptName, ipSuffix, pgAs), "allow-related"}
ovnArgs = append(ovnArgs, allowArgs...)
} else {
for _, port := range npp {
allowArgs := []string{"--", MayExist, "--type=port-group", "acl-add", pgName, "to-lport", util.IngressAllowPriority, fmt.Sprintf("%s.src == $%s && %s.src != $%s && %s.dst == %d && %s.dst == $%s", ipSuffix, asIngressName, ipSuffix, asExceptName, strings.ToLower(string(*port.Protocol)), port.Port.IntVal, ipSuffix, pgAs), "allow-related"}
ovnArgs = append(ovnArgs, allowArgs...)
}
}
_, err := c.ovnNbCommand(ovnArgs...)
return err
}
func (c Client) CreateEgressACL(npName, pgName, asEgressName, asExceptName, protocol string, npp []netv1.NetworkPolicyPort) error {
ipSuffix := "ip4"
if protocol == kubeovnv1.ProtocolIPv6 {
ipSuffix = "ip6"
}
pgAs := fmt.Sprintf("%s_%s", pgName, ipSuffix)
ovnArgs := []string{"--", MayExist, "--type=port-group", "--log", fmt.Sprintf("--name=%s", npName), fmt.Sprintf("--severity=%s", "warning"), "acl-add", pgName, "from-lport", util.EgressDefaultDrop, fmt.Sprintf("%s.src == $%s", ipSuffix, pgAs), "drop"}
if len(npp) == 0 {
allowArgs := []string{"--", MayExist, "--type=port-group", "acl-add", pgName, "from-lport", util.EgressAllowPriority, fmt.Sprintf("%s.dst == $%s && %s.dst != $%s && %s.src == $%s", ipSuffix, asEgressName, ipSuffix, asExceptName, ipSuffix, pgAs), "allow-related"}
ovnArgs = append(ovnArgs, allowArgs...)
} else {
for _, port := range npp {
allowArgs := []string{"--", MayExist, "--type=port-group", "acl-add", pgName, "from-lport", util.EgressAllowPriority, fmt.Sprintf("%s.dst == $%s && %s.dst == $%s && %s.dst == %d && %s.src == $%s", ipSuffix, asEgressName, ipSuffix, asExceptName, strings.ToLower(string(*port.Protocol)), port.Port.IntVal, ipSuffix, pgAs), "allow-related"}
ovnArgs = append(ovnArgs, allowArgs...)
}
}
_, err := c.ovnNbCommand(ovnArgs...)
return err
}
func (c Client) DeleteACL(pgName, direction string) error {
if direction != "" {
_, err := c.ovnNbCommand("--type=port-group", "acl-del", pgName, direction)
return err
} else {
_, err := c.ovnNbCommand("--type=port-group", "acl-del", pgName)
return err
}
}
func (c Client) CreateGatewayACL(pgName, gateway, cidr string) error {
for _, cidrBlock := range strings.Split(cidr, ",") {
for _, gw := range strings.Split(gateway, ",") {
if util.CheckProtocol(cidrBlock) != util.CheckProtocol(gw) {
continue
}
protocol := util.CheckProtocol(cidrBlock)
ipSuffix := "ip4"
if protocol == kubeovnv1.ProtocolIPv6 {
ipSuffix = "ip6"
}
ingressArgs := []string{MayExist, "--type=port-group", "acl-add", pgName, "to-lport", util.IngressAllowPriority, fmt.Sprintf("%s.src == %s", ipSuffix, gw), "allow-related"}
egressArgs := []string{"--", MayExist, "--type=port-group", "acl-add", pgName, "from-lport", util.EgressAllowPriority, fmt.Sprintf("%s.dst == %s", ipSuffix, gw), "allow-related"}
ovnArgs := append(ingressArgs, egressArgs...)
if _, err := c.ovnNbCommand(ovnArgs...); err != nil {
return err
}
}
}
return nil
}
func (c Client) CreateACLForNodePg(pgName, nodeIpStr string) error {
for _, nodeIp := range strings.Split(nodeIpStr, ",") {
protocol := util.CheckProtocol(nodeIp)
ipSuffix := "ip4"
if protocol == kubeovnv1.ProtocolIPv6 {
ipSuffix = "ip6"
}
pgAs := fmt.Sprintf("%s_%s", pgName, ipSuffix)
ingressArgs := []string{MayExist, "--type=port-group", "acl-add", pgName, "to-lport", util.NodeAllowPriority, fmt.Sprintf("%s.src == %s && %s.dst == $%s", ipSuffix, nodeIp, ipSuffix, pgAs), "allow-related"}
egressArgs := []string{"--", MayExist, "--type=port-group", "acl-add", pgName, "from-lport", util.NodeAllowPriority, fmt.Sprintf("%s.dst == %s && %s.src == $%s", ipSuffix, nodeIp, ipSuffix, pgAs), "allow-related"}
ovnArgs := append(ingressArgs, egressArgs...)
if _, err := c.ovnNbCommand(ovnArgs...); err != nil {
klog.Errorf("failed to add node port-group acl")
return err
}
}
return nil
}
func (c Client) DeleteAclForNodePg(pgName string) error {
ingressArgs := []string{"acl-del", pgName, "to-lport"}
if _, err := c.ovnNbCommand(ingressArgs...); err != nil {
klog.Errorf("failed to delete node port-group ingress acl")
return err
}
egressArgs := []string{"acl-del", pgName, "from-lport"}
if _, err := c.ovnNbCommand(egressArgs...); err != nil {
klog.Errorf("failed to delete node port-group egress acl")
return err
}
return nil
}
func (c Client) ListPgPorts(pgName string) ([]string, error) {
output, err := c.ovnNbCommand("--format=csv", "--data=bare", "--no-heading", "--columns=ports", "find", "port_group", fmt.Sprintf("name=%s", pgName))
if err != nil {
klog.Errorf("failed to list port-group ports, %v", err)
return nil, err
}
lines := strings.Split(output, "\n")
result := make([]string, 0, len(lines))
for _, l := range lines {
if len(strings.TrimSpace(l)) == 0 {
continue
}
result = append(result, strings.Split(l, " ")...)
}
return result, nil
}
func (c Client) ConvertLspNameToUuid(name string) (string, error) {
output, err := c.ovnNbCommand("--data=bare", "--no-heading", "--columns=_uuid", "find", "logical_switch_port", fmt.Sprintf("name=%s", name))
lines := strings.Split(output, "\n")
if len(lines) == 0 {
klog.Errorf("failed to get lsp uuid by name, %v", err)
return "", err
}
return strings.TrimSpace(lines[0]), nil
}
func (c Client) SetPortsToPortGroup(portGroup string, portNames []string) error {
ovnArgs := []string{"clear", "port_group", portGroup, "ports"}
if len(portNames) > 0 {
ovnArgs = []string{"pg-set-ports", portGroup}
ovnArgs = append(ovnArgs, portNames...)
}
_, err := c.ovnNbCommand(ovnArgs...)
return err
}
func (c Client) SetAddressesToAddressSet(addresses []string, as string) error {
ovnArgs := []string{"clear", "address_set", as, "addresses"}
if len(addresses) > 0 {
var newAddrs []string
for _, addr := range addresses {
if util.CheckProtocol(addr) == kubeovnv1.ProtocolIPv6 {
newAddr := strings.ReplaceAll(addr, ":", "\\:")
newAddrs = append(newAddrs, newAddr)
} else {
newAddrs = append(newAddrs, addr)
}
}
ovnArgs = append(ovnArgs, "--", "add", "address_set", as, "addresses")
ovnArgs = append(ovnArgs, newAddrs...)
}
_, err := c.ovnNbCommand(ovnArgs...)
return err
}
// StartOvnNbctlDaemon start a daemon and set OVN_NB_DAEMON env
func StartOvnNbctlDaemon(ovnNbAddr string) error {
klog.Infof("start ovn-nbctl daemon")
output, err := exec.Command(
"pkill",
"-f",
"ovn-nbctl",
).CombinedOutput()
if err != nil {
klog.Errorf("failed to kill old ovn-nbctl daemon: %q", output)
return err
}
command := []string{
fmt.Sprintf("--db=%s", ovnNbAddr),
"--pidfile",
"--detach",
"--overwrite-pidfile",
}
if os.Getenv("ENABLE_SSL") == "true" {
command = []string{
"-p", "/var/run/tls/key",
"-c", "/var/run/tls/cert",
"-C", "/var/run/tls/cacert",
fmt.Sprintf("--db=%s", ovnNbAddr),
"--pidfile",
"--detach",
"--overwrite-pidfile",
}
}
_ = os.Unsetenv("OVN_NB_DAEMON")
output, err = exec.Command("ovn-nbctl", command...).CombinedOutput()
if err != nil {
klog.Errorf("start ovn-nbctl daemon failed, %q", output)
return err
}
daemonSocket := strings.TrimSpace(string(output))
if err := os.Setenv("OVN_NB_DAEMON", daemonSocket); err != nil {
klog.Errorf("failed to set env OVN_NB_DAEMON, %v", err)
return err
}
return nil
}
// CheckAlive check if kube-ovn-controller can access ovn-nb from nbctl-daemon
func CheckAlive() error {
output, err := exec.Command(
"ovn-nbctl",
"--timeout=10",
"show",
).CombinedOutput()
if err != nil {
klog.Errorf("failed to access ovn-nb from daemon, %q", output)
return err
}
return nil
}
// GetLogicalSwitchExcludeIPS get a logical switch exclude ips
// ovn-nbctl get logical_switch ovn-default other_config:exclude_ips => "10.17.0.1 10.17.0.2 10.17.0.3..10.17.0.5"
func (c Client) GetLogicalSwitchExcludeIPS(logicalSwitch string) ([]string, error) {
output, err := c.ovnNbCommand(IfExists, "get", "logical_switch", logicalSwitch, "other_config:exclude_ips")
if err != nil {
return nil, err
}
output = strings.Trim(output, `"`)
if output == "" {
return nil, ErrNoAddr
}
return strings.Fields(output), nil
}
// SetLogicalSwitchExcludeIPS set a logical switch exclude ips
// ovn-nbctl set logical_switch ovn-default other_config:exclude_ips="10.17.0.2 10.17.0.1"
func (c Client) SetLogicalSwitchExcludeIPS(logicalSwitch string, excludeIPS []string) error {
_, err := c.ovnNbCommand("set", "logical_switch", logicalSwitch,
fmt.Sprintf(`other_config:exclude_ips="%s"`, strings.Join(excludeIPS, " ")))
return err
}
func (c Client) GetLogicalSwitchPortByLogicalSwitch(logicalSwitch string) ([]string, error) {
output, err := c.ovnNbCommand("lsp-list", logicalSwitch)
if err != nil {
return nil, err
}
var rv []string
lines := strings.Split(output, "\n")
for _, line := range lines {
lsp := strings.Fields(line)[0]
rv = append(rv, lsp)
}
return rv, nil
}
func (c Client) CreateLocalnetPort(ls, port, providerName, vlanID string) error {
cmdArg := []string{
MayExist, "lsp-add", ls, port, "--",
"lsp-set-addresses", port, "unknown", "--",
"lsp-set-type", port, "localnet", "--",
"lsp-set-options", port, fmt.Sprintf("network_name=%s", providerName), "--",
"set", "logical_switch_port", port, fmt.Sprintf("external_ids:vendor=%s", util.CniTypeName),
}
if vlanID != "" && vlanID != "0" {
cmdArg = append(cmdArg,
"--", "set", "logical_switch_port", port, fmt.Sprintf("tag=%s", vlanID))
}
if _, err := c.ovnNbCommand(cmdArg...); err != nil {
klog.Errorf("create localnet port %s failed, %v", port, err)
return err
}
return nil
}
func GetSgPortGroupName(sgName string) string {
return strings.Replace(fmt.Sprintf("ovn.sg.%s", sgName), "-", ".", -1)
}
func GetSgV4AssociatedName(sgName string) string {
return strings.Replace(fmt.Sprintf("ovn.sg.%s.associated.v4", sgName), "-", ".", -1)
}
func GetSgV6AssociatedName(sgName string) string {
return strings.Replace(fmt.Sprintf("ovn.sg.%s.associated.v6", sgName), "-", ".", -1)
}
func (c Client) CreateSgPortGroup(sgName string) error {
sgPortGroupName := GetSgPortGroupName(sgName)
output, err := c.ovnNbCommand(
"--data=bare", "--no-heading", "--columns=_uuid", "find", "port_group", fmt.Sprintf("name=%s", sgPortGroupName))
if err != nil {
klog.Errorf("failed to find port_group of sg %s", sgPortGroupName)
return err
}
if output != "" {
return nil
}
_, err = c.ovnNbCommand(
"pg-add", sgPortGroupName,
"--", "set", "port_group", sgPortGroupName, "external_ids:type=security_group",
fmt.Sprintf("external_ids:sg=%s", sgName),
fmt.Sprintf("external_ids:name=%s", sgPortGroupName))
return err
}
func (c Client) DeleteSgPortGroup(sgName string) error {
sgPortGroupName := GetSgPortGroupName(sgName)
// delete acl
if err := c.DeleteACL(sgPortGroupName, ""); err != nil {
return err
}
// delete address_set
asList, err := c.ListSgRuleAddressSet(sgName, "")
if err != nil {
return err
}
for _, as := range asList {
if err = c.DeleteAddressSet(as); err != nil {
return err
}
}
// delete pg
err = c.DeletePortGroup(sgPortGroupName)
if err != nil {
return err
}
return nil
}
func (c Client) CreateSgAssociatedAddressSet(sgName string) error {
v4AsName := GetSgV4AssociatedName(sgName)
v6AsName := GetSgV6AssociatedName(sgName)
outputV4, err := c.ovnNbCommand("--data=bare", "--no-heading", "--columns=_uuid", "find", "address_set", fmt.Sprintf("name=%s", v4AsName))
if err != nil {
klog.Errorf("failed to find address_set for sg %s", sgName)
return err
}
outputV6, err := c.ovnNbCommand("--data=bare", "--no-heading", "--columns=_uuid", "find", "address_set", fmt.Sprintf("name=%s", v6AsName))
if err != nil {
klog.Errorf("failed to find address_set for sg %s", sgName)
return err
}
if outputV4 == "" {
_, err = c.ovnNbCommand("create", "address_set", fmt.Sprintf("name=%s", v4AsName), fmt.Sprintf("external_ids:sg=%s", sgName))
if err != nil {
klog.Errorf("failed to create v4 address_set for sg %s", sgName)
return err
}
}
if outputV6 == "" {
_, err = c.ovnNbCommand("create", "address_set", fmt.Sprintf("name=%s", v6AsName), fmt.Sprintf("external_ids:sg=%s", sgName))
if err != nil {
klog.Errorf("failed to create v6 address_set for sg %s", sgName)
return err
}
}
return nil
}
func (c Client) ListSgRuleAddressSet(sgName string, direction AclDirection) ([]string, error) {
ovnCmd := []string{"--data=bare", "--no-heading", "--columns=name", "find", "address_set", fmt.Sprintf("external_ids:sg=%s", sgName)}
if direction != "" {
ovnCmd = append(ovnCmd, fmt.Sprintf("external_ids:direction=%s", direction))
}
output, err := c.ovnNbCommand(ovnCmd...)
if err != nil {
klog.Errorf("failed to list sg address_set of %s, direction %s", sgName, direction)
return nil, err
}
return strings.Split(output, "\n"), nil
}
func (c Client) createSgRuleACL(sgName string, direction AclDirection, rule *kubeovnv1.SgRule, index int) error {
ipSuffix := "ip4"
if rule.IPVersion == "ipv6" {
ipSuffix = "ip6"
}
sgPortGroupName := GetSgPortGroupName(sgName)
var matchArgs []string
if rule.RemoteType == kubeovnv1.SgRemoteTypeAddress {
if direction == SgAclIngressDirection {
matchArgs = append(matchArgs, fmt.Sprintf("outport==@%s && %s && %s.src==%s", sgPortGroupName, ipSuffix, ipSuffix, rule.RemoteAddress))
} else {
matchArgs = append(matchArgs, fmt.Sprintf("inport==@%s && %s && %s.dst==%s", sgPortGroupName, ipSuffix, ipSuffix, rule.RemoteAddress))
}
} else {
if direction == SgAclIngressDirection {
matchArgs = append(matchArgs, fmt.Sprintf("outport==@%s && %s && %s.src==$%s", sgPortGroupName, ipSuffix, ipSuffix, GetSgV4AssociatedName(rule.RemoteSecurityGroup)))
} else {
matchArgs = append(matchArgs, fmt.Sprintf("inport==@%s && %s && %s.dst==$%s", sgPortGroupName, ipSuffix, ipSuffix, GetSgV4AssociatedName(rule.RemoteSecurityGroup)))
}
}
if rule.Protocol == kubeovnv1.ProtocolICMP {
if ipSuffix == "ip4" {
matchArgs = append(matchArgs, "icmp4")
} else {
matchArgs = append(matchArgs, "icmp6")
}
} else if rule.Protocol == kubeovnv1.ProtocolTCP || rule.Protocol == kubeovnv1.ProtocolUDP {
matchArgs = append(matchArgs, fmt.Sprintf("%d<=%s.dst<=%d", rule.PortRangeMin, rule.Protocol, rule.PortRangeMax))
}
matchStr := strings.Join(matchArgs, " && ")
action := "drop"
if rule.Policy == kubeovnv1.PolicyAllow {
action = "allow-related"
}
highestPriority, err := strconv.Atoi(util.SecurityGroupHighestPriority)
if err != nil {
return err
}
_, err = c.ovnNbCommand(MayExist, "--type=port-group", "acl-add", sgPortGroupName, string(direction), strconv.Itoa(highestPriority-rule.Priority), matchStr, action)
return err
}
func (c Client) CreateSgDenyAllACL() error {
portGroupName := GetSgPortGroupName(util.DenyAllSecurityGroup)
if _, err := c.ovnNbCommand(MayExist, "--type=port-group", "acl-add", portGroupName, string(SgAclIngressDirection), util.SecurityGroupDropPriority,
fmt.Sprintf("outport==@%s && ip", portGroupName), "drop"); err != nil {
return err
}
if _, err := c.ovnNbCommand(MayExist, "--type=port-group", "acl-add", portGroupName, string(SgAclEgressDirection), util.SecurityGroupDropPriority,
fmt.Sprintf("inport==@%s && ip", portGroupName), "drop"); err != nil {
return err
}
return nil
}
func (c Client) UpdateSgACL(sg *kubeovnv1.SecurityGroup, direction AclDirection) error {
sgPortGroupName := GetSgPortGroupName(sg.Name)
// clear acl
if err := c.DeleteACL(sgPortGroupName, string(direction)); err != nil {
return err
}
// clear rule address_set
asList, err := c.ListSgRuleAddressSet(sg.Name, direction)
if err != nil {
return err
}
for _, as := range asList {
if err = c.DeleteAddressSet(as); err != nil {
return err
}
}
// create port_group associated acl
if sg.Spec.AllowSameGroupTraffic {
v4AsName := GetSgV4AssociatedName(sg.Name)
v6AsName := GetSgV6AssociatedName(sg.Name)
if direction == SgAclIngressDirection {
if _, err := c.ovnNbCommand(MayExist, "--type=port-group", "acl-add", sgPortGroupName, "to-lport", util.SecurityGroupAllowPriority,
fmt.Sprintf("outport==@%s && ip4 && ip4.src==$%s", sgPortGroupName, v4AsName), "allow-related"); err != nil {
return err
}
if _, err := c.ovnNbCommand(MayExist, "--type=port-group", "acl-add", sgPortGroupName, "to-lport", util.SecurityGroupAllowPriority,
fmt.Sprintf("outport==@%s && ip6 && ip6.src==$%s", sgPortGroupName, v6AsName), "allow-related"); err != nil {
return err
}
} else {
if _, err := c.ovnNbCommand(MayExist, "--type=port-group", "acl-add", sgPortGroupName, "from-lport", util.SecurityGroupAllowPriority,
fmt.Sprintf("inport==@%s && ip4 && ip4.dst==$%s", sgPortGroupName, v4AsName), "allow-related"); err != nil {
return err
}
if _, err := c.ovnNbCommand(MayExist, "--type=port-group", "acl-add", sgPortGroupName, "from-lport", util.SecurityGroupAllowPriority,
fmt.Sprintf("inport==@%s && ip6 && ip6.dst==$%s", sgPortGroupName, v6AsName), "allow-related"); err != nil {
return err
}
}
}
// recreate rule ACL
var sgRules []*kubeovnv1.SgRule
if direction == SgAclIngressDirection {
sgRules = sg.Spec.IngressRules
} else {
sgRules = sg.Spec.EgressRules
}
for index, rule := range sgRules {
if err = c.createSgRuleACL(sg.Name, direction, rule, index); err != nil {
return err
}
}
return nil
}
|
[
"\"ENABLE_SSL\""
] |
[] |
[
"ENABLE_SSL"
] |
[]
|
["ENABLE_SSL"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"fmt"
"io"
"io/ioutil"
"log"
"os"
"strings"
"time"
"github.com/hashicorp/logutils"
backendInit "github.com/hashicorp/terraform/backend/init"
"github.com/hashicorp/terraform/command"
"github.com/hashicorp/terraform/command/arguments"
"github.com/hashicorp/terraform/command/clistate"
"github.com/hashicorp/terraform/command/views"
"github.com/mitchellh/cli"
)
// Version is a version number.
var version = "0.0.3"
// LockCommand is a Command implementation that lock a Terraform state.
type LockCommand struct {
command.StateMeta
}
// Run runs the procedure of this command.
func (c *LockCommand) Run(args []string) int {
// Read the from state
stateFromMgr, err := c.State()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error loading the state: %s", err))
return 1
}
view := views.NewStateLocker(arguments.ViewHuman, c.View)
stateLocker := clistate.NewLocker((0 * time.Second), view)
if err := stateLocker.Lock(stateFromMgr, "tflock"); err != nil {
c.Ui.Error(fmt.Sprintf("Error locking source state: %s", err))
return 1
}
return 0
}
// Help returns long-form help text.
func (*LockCommand) Help() string {
helpText := `
Usage: tflock
`
return strings.TrimSpace(helpText)
}
// Synopsis returns one-line help text.
func (c *LockCommand) Synopsis() string {
return "Lock your Terraform state"
}
func logOutput() io.Writer {
levels := []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"}
minLevel := os.Getenv("TFLOCK_LOG")
// default log writer is null device.
writer := ioutil.Discard
if minLevel != "" {
writer = os.Stderr
}
filter := &logutils.LevelFilter{
Levels: levels,
MinLevel: logutils.LogLevel(minLevel),
Writer: writer,
}
return filter
}
func main() {
log.SetOutput(logOutput())
// Initialize the backends.
// This is needed for registering backend types such as s3.
backendInit.Init(nil)
UI := &cli.BasicUi{
Writer: os.Stdout,
Reader: os.Stdin,
}
meta := command.Meta{
Ui: UI,
}
commands := map[string]cli.CommandFactory{
"": func() (cli.Command, error) {
return &LockCommand{
StateMeta: command.StateMeta{
Meta: meta,
},
}, nil
},
}
args := os.Args[1:]
c := &cli.CLI{
Name: "tflock",
Version: version,
Args: args,
Commands: commands,
HelpWriter: os.Stdout,
}
exitStatus, err := c.Run()
if err != nil {
UI.Error(fmt.Sprintf("Failed to execute CLI: %s", err))
}
os.Exit(exitStatus)
}
|
[
"\"TFLOCK_LOG\""
] |
[] |
[
"TFLOCK_LOG"
] |
[]
|
["TFLOCK_LOG"]
|
go
| 1 | 0 | |
cmd/client/main.go
|
package main
import (
"context"
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
"github.com/joho/godotenv"
"golang.org/x/oauth2"
"golang.org/x/oauth2/clientcredentials"
)
func main() {
err := godotenv.Load("client.env")
checkErr(err)
url := os.Getenv("API_URL")
callAPI(url+"/api/public", nil)
callAPI(url+"/api/private", getToken())
}
func getToken() *oauth2.Token {
conf := clientcredentials.Config{
TokenURL: os.Getenv("AUTH_TOKEN_URL"),
ClientID: os.Getenv("AUTH_CLIENT_ID"),
ClientSecret: os.Getenv("AUTH_CLIENT_SECRET"),
Scopes: strings.Split(os.Getenv("AUTH_SCOPE"), " "),
}
token, err := conf.Token(context.Background())
checkErr(err)
return token
}
func callAPI(url string, token *oauth2.Token) {
req, err := http.NewRequest("GET", url, nil)
checkErr(err)
if token != nil {
token.SetAuthHeader(req)
}
res, err := http.DefaultClient.Do(req)
checkErr(err)
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
checkErr(err)
fmt.Printf("[%s]", url)
fmt.Println()
fmt.Println(string(body))
fmt.Println()
}
func checkErr(err error) {
if err != nil {
panic(err)
}
}
|
[
"\"API_URL\"",
"\"AUTH_TOKEN_URL\"",
"\"AUTH_CLIENT_ID\"",
"\"AUTH_CLIENT_SECRET\"",
"\"AUTH_SCOPE\""
] |
[] |
[
"AUTH_TOKEN_URL",
"AUTH_CLIENT_SECRET",
"AUTH_CLIENT_ID",
"API_URL",
"AUTH_SCOPE"
] |
[]
|
["AUTH_TOKEN_URL", "AUTH_CLIENT_SECRET", "AUTH_CLIENT_ID", "API_URL", "AUTH_SCOPE"]
|
go
| 5 | 0 | |
go/app/config/config_redis.go
|
package config
import (
"fmt"
"os"
"github.com/go-redis/redis/v8"
"github.com/joho/godotenv"
)
func NewRedis() *redis.Client {
if err := godotenv.Load(); err != nil {
panic(err.Error())
}
reddisAddr := fmt.Sprintf(
"%s:%s",
os.Getenv("REDIS_ADDRESS"),
os.Getenv("REDIS_PORT"),
)
return redis.NewClient(&redis.Options{
Addr: reddisAddr,
DB: 0,
})
}
|
[
"\"REDIS_ADDRESS\"",
"\"REDIS_PORT\""
] |
[] |
[
"REDIS_PORT",
"REDIS_ADDRESS"
] |
[]
|
["REDIS_PORT", "REDIS_ADDRESS"]
|
go
| 2 | 0 | |
bot.py
|
import logging
import os
import sys
import json
import hashlib
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, ConversationHandler, Handler
import telegram
from telegram import ReplyKeyboardMarkup
from processImg import processImg
InlineKeyboardButton = telegram.InlineKeyboardButton
ENTRY, ENTER_NAME, AWAIT_IMAGE = range(3)
# Enabling logging
logging.basicConfig(level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logger = logging.getLogger()
# Getting mode, so we could define run function for local and Heroku setup
mode = os.getenv("MODE")
TOKEN = os.getenv("TOKEN")
if mode == "dev":
def run(updater):
updater.start_polling()
updater.idle()
elif mode == "prod":
def run(updater):
PORT = int(os.environ.get("PORT", "8443"))
HEROKU_APP_NAME = os.environ.get("HEROKU_APP_NAME")
# Code from https://github.com/python-telegram-bot/python-telegram-bot/wiki/Webhooks#heroku
updater.start_webhook(listen="0.0.0.0",
port=PORT,
url_path=TOKEN)
updater.bot.set_webhook("https://{}.herokuapp.com/{}".format(HEROKU_APP_NAME, TOKEN))
logger.info("Up and ready to go on heroku!")
else:
logger.error("No MODE specified!")
sys.exit(1)
# open JSON file containing bot commands
with open('commands.json') as f:
data = json.load(f)
def start_handler(update, context):
reply_keyboard = [['Create'],['Cancel']]
markup = ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True)
logger.info("Started")
chat_id = update.message.chat_id
logger.info("User {} started bot".format(chat_id))
update.message.reply_text(data['Commands']['Start']['Text'], reply_markup=markup)
return ENTRY
def help_handler(update, context):
# Create a handler-function /help command
commands = data['Commands'].keys()
text = "/start"
update.message.reply_text(data['Commands']['Help']['Text'] + "{}".format(text))
def image_handler(update, context):
file = update.message.photo[-1].get_file()
file.download('img/{}.jpg'.format(file.file_unique_id))
try:
processImg('img/{}.jpg'.format(file.file_unique_id))
context.bot.send_chat_action(chat_id=update.message.chat_id, action="typing")
except Exception:
update.message.reply_text(data['Commands']['photoError']['Text'])
return ENTRY
stickerImg = open("img/r_{}.png".format(file.file_unique_id), 'rb')
# show the user the cropped image
# update.message.reply_photo(stickerImg)
# create/add to sticker pack and return sticker
packname = context.user_data['name']
username = update.message.from_user['username']
hash = hashlib.sha1(bytearray(update.effective_user.id)).hexdigest()
sticker_set_name = "Stitched_{}_by_stichers_bot".format(hash[:10] + packname[:3])
#TODO get emoji from user
context.user_data['sticker-set-name'] = sticker_set_name
logging.info("creating sticker for: userid: {}, stickersetname: {}".format(update.message.from_user.id, sticker_set_name))
try:
context.bot.addStickerToSet(user_id=update.message.from_user.id, name=sticker_set_name, emojis='😄',
png_sticker=open("img/r_{}.png".format(file.file_unique_id), 'rb'))
except Exception:
context.bot.createNewStickerSet(user_id=update.message.from_user.id, name=sticker_set_name,
title=packname, emojis='😄', png_sticker=open("img/r_{}.png".format(file.file_unique_id), 'rb'))
finally:
update.message.reply_text(data['Commands']['nextSticker']['Text'])
stickerImg.close()
os.remove('img/{}.jpg'.format(file.file_unique_id))
os.remove("img/r_{}.png".format(file.file_unique_id))
return AWAIT_IMAGE
def validate_pack_name(name):
return 1 < len(name) < 64
def name_handler(update, context):
pack_name = update.message.text
update.message.reply_text(data['Commands']['nameConfirmation']['Text'] + "{}".format(pack_name))
context.user_data['name'] = pack_name
if validate_pack_name(pack_name):
update.message.reply_text("Name is valid! " + data['Commands']['newPackAddSticker']['Text'])
return AWAIT_IMAGE
else:
update.message.reply_text(data['Commands']['nameError']['Text'])
return ENTER_NAME
def publish_handler(update, context):
update.message.reply_text(data['Commands']['finalizePack']['Text'])
update.message.reply_text(data['Commands']['createPack']['Text'] + "\n https://t.me/addstickers/{}".format(context.user_data['sticker-set-name']))
def cancel(update, context):
update.message.reply_text(data['Commands']['cancel']['Text'])
def check_user_input(update, context):
user_input = update.message.text
logger.info("User input was {}".format(user_input))
if "Create" in user_input:
update.message.reply_text(data['Commands']['namePack']['Text'])
return ENTER_NAME
elif "Cancel" in user_input:
update.message.reply_text(data['Commands']['exit']['Text'])
else:
# ask again
reply_keyboard = [['Create'],['Cancel']]
markup = ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True)
update.message.reply_text(
("{}?! ".format(user_input) + data['Commands']['askAgain']['Text']),
reply_markup=markup)
return ENTRY
if __name__ == '__main__':
logger.info("Starting bot")
updater = Updater(TOKEN, use_context=True)
dispatcher = updater.dispatcher
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start_handler)],
states={
ENTRY: [MessageHandler(Filters.text,
check_user_input)],
ENTER_NAME: [MessageHandler(Filters.text,
name_handler, pass_user_data=True)],
AWAIT_IMAGE: [MessageHandler(Filters.photo, image_handler, pass_user_data=True), CommandHandler("publish", publish_handler)],
},
fallbacks=[CommandHandler('cancel', cancel)]
)
dispatcher.add_handler(conv_handler)
dispatcher.add_handler(CommandHandler("start", start_handler))
dispatcher.add_handler(CommandHandler("help", help_handler))
run(updater)
|
[] |
[] |
[
"PORT",
"HEROKU_APP_NAME",
"TOKEN",
"MODE"
] |
[]
|
["PORT", "HEROKU_APP_NAME", "TOKEN", "MODE"]
|
python
| 4 | 0 | |
jdollarx-example/src/test/java/com/github/loyada/jdollarxexample/JdollarxExampleSingleBrowserTest.java
|
package com.github.loyada.jdollarxexample;
import com.github.loyada.jdollarx.Path;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import static com.github.loyada.jdollarx.BasicPath.*;
import static com.github.loyada.jdollarx.Operations.*;
import static com.github.loyada.jdollarx.singlebrowser.InBrowserSinglton.*;
import static com.github.loyada.jdollarx.ElementProperties.*;
import static com.github.loyada.jdollarx.singlebrowser.custommatchers.CustomMatchers.isPresent;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.startsWith;
import static org.junit.Assert.assertThat;
public class JdollarxExampleSingleBrowserTest {
private static final String driverPath = System.getenv().get("CHROMEDRIVERPATH");
Path searchFormWrapper = element.that(hasId("searchform")).contains(form).describedBy("search form");
Path google = input.inside(searchFormWrapper);
@BeforeClass
public static void setup() {
driver = new DriverSetup(true).createNewDriver("chrome", driverPath);
}
@Before
public void goToGoogle() {
driver.get("http://www.google.com");
}
@Test
public void googleForAmazonAndVerifyFirstResult() throws OperationFailedException {
//Given
//When
sendKeys("amazon").to(google);
//Then
Path results = div.that(hasId("search"));
Path resultsLink = anchor.inside(results);
Path amazonAsFirstResult = resultsLink.that(isWithIndex(0)).that(hasTextContaining("amazon.com"));
assertThat(amazonAsFirstResult, isPresent());
}
@Test
public void showAUsefulExceptionForOperationError() throws OperationFailedException {
//Given
Path warcraft = input.inside(searchFormWrapper).withText("for the horde!");
try {
// when
sendKeys("amazon").to(warcraft);
//then
} catch (OperationFailedException e) {
assertThat(e.getMessage(), equalTo("could not send keys to input, inside (search form), and has the text \"for the horde!\""));
assertThat(e.getCause().getMessage(), startsWith("could not find input, inside (search form), and has the text \"for the horde!\""));
}
}
@Test
public void googleForAmazonAndFeelingLucky() throws OperationFailedException {
//Given
sendKeys("amazon").to(google);
//When
Path firstSuggestion = firstOccurrenceOf(listItem.inside(form));
hoverOver(firstSuggestion);
Path feelingLucky = anchor.inside(firstSuggestion).withTextContaining("feeling lucky");
clickAt(feelingLucky);
//Then
Path amazonMainTitle = title.that(hasTextContaining("amazon")).describedBy("amazon main title");
assertThat(amazonMainTitle, isPresent());
}
@Test
public void googleForAmazonAssertionError1() throws OperationFailedException {
//Given
Path searchFormWrapper = element.that(hasId("searchform")).contains(form);
Path google = input.inside(searchFormWrapper);
//When
sendKeys("amazon").to(google);
//Then
Path results = div.that(hasId("search"));
Path resultsLink = anchor.inside(results);
Path amazonResult = resultsLink.that(hasTextContaining("amazon.com")).describedBy("search result of amazon");
assertThat(amazonResult, isPresent());
try {
assertThat(amazonResult, isPresent(1000).timesOrMore());
} catch (AssertionError e) {
e.printStackTrace();
}
}
@Test
public void googleForAmazonAssertionError2() throws OperationFailedException {
//Given
//When
sendKeys("amazon").to(google);
//Then
Path results = div.that(hasId("search"));
Path resultsLink = anchor.inside(results);
Path warcraftResult = firstOccurrenceOf(resultsLink).that(hasText("for the horde!"));
try {
assertThat(warcraftResult, isPresent());
} catch (AssertionError e) {
e.printStackTrace();
}
}
@AfterClass
public static void teardown() {
driver.quit();
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
pkg/yurtctl/util/kubernetes/util.go
|
/*
Copyright 2020 The OpenYurt Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubernetes
import (
"bytes"
"context"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"sort"
"strings"
"sync"
"time"
"github.com/spf13/pflag"
"k8s.io/api/admissionregistration/v1beta1"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/fields"
k8sruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/runtime/serializer/yaml"
yamlutil "k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/restmapper"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/homedir"
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
bootstraputil "k8s.io/cluster-bootstrap/token/util"
"k8s.io/klog/v2"
"github.com/openyurtio/openyurt/pkg/util/kubeadmapi"
"github.com/openyurtio/openyurt/pkg/yurtctl/constants"
"github.com/openyurtio/openyurt/pkg/yurtctl/util"
"github.com/openyurtio/openyurt/pkg/yurtctl/util/edgenode"
strutil "github.com/openyurtio/openyurt/pkg/yurtctl/util/strings"
tmplutil "github.com/openyurtio/openyurt/pkg/yurtctl/util/templates"
)
const (
// DisableNodeControllerJobNameBase is the prefix of the DisableNodeControllerJob name
DisableNodeControllerJobNameBase = "yurtctl-disable-node-controller"
// EnableNodeControllerJobNameBase is the prefix of the EnableNodeControllerJob name
EnableNodeControllerJobNameBase = "yurtctl-enable-node-controller"
SystemNamespace = "kube-system"
)
var (
// PropagationPolicy defines the propagation policy used when deleting a resource
PropagationPolicy = metav1.DeletePropagationBackground
// WaitServantJobTimeout specifies the timeout value of waiting for the ServantJob to be succeeded
WaitServantJobTimeout = time.Minute * 2
// CheckServantJobPeriod defines the time interval between two successive ServantJob statu's inspection
CheckServantJobPeriod = time.Second * 10
// ValidServerVersions contains all compatible server version
// yurtctl only support Kubernetes 1.12+ - 1.16+ for now
ValidServerVersions = []string{
"1.12", "1.12+",
"1.13", "1.13+",
"1.14", "1.14+",
"1.16", "1.16+",
"1.18", "1.18+",
"1.19", "1.19+",
"1.20", "1.20+",
"1.21", "1.21+"}
)
// CreateServiceAccountFromYaml creates the ServiceAccount from the yaml template.
func CreateServiceAccountFromYaml(cliSet *kubernetes.Clientset, ns, saTmpl string) error {
obj, err := YamlToObject([]byte(saTmpl))
if err != nil {
return err
}
sa, ok := obj.(*corev1.ServiceAccount)
if !ok {
return fmt.Errorf("fail to assert serviceaccount: %v", err)
}
_, err = cliSet.CoreV1().ServiceAccounts(ns).Create(context.Background(), sa, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("fail to create the serviceaccount/%s: %v", sa.Name, err)
}
klog.V(4).Infof("serviceaccount/%s is created", sa.Name)
return nil
}
// CreateClusterRoleFromYaml creates the ClusterRole from the yaml template.
func CreateClusterRoleFromYaml(cliSet *kubernetes.Clientset, crTmpl string) error {
obj, err := YamlToObject([]byte(crTmpl))
if err != nil {
return err
}
cr, ok := obj.(*rbacv1.ClusterRole)
if !ok {
return fmt.Errorf("fail to assert clusterrole: %v", err)
}
_, err = cliSet.RbacV1().ClusterRoles().Create(context.Background(), cr, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("fail to create the clusterrole/%s: %v", cr.Name, err)
}
klog.V(4).Infof("clusterrole/%s is created", cr.Name)
return nil
}
// CreateClusterRoleBindingFromYaml creates the ClusterRoleBinding from the yaml template.
func CreateClusterRoleBindingFromYaml(cliSet *kubernetes.Clientset, crbTmpl string) error {
obj, err := YamlToObject([]byte(crbTmpl))
if err != nil {
return err
}
crb, ok := obj.(*rbacv1.ClusterRoleBinding)
if !ok {
return fmt.Errorf("fail to assert clusterrolebinding: %v", err)
}
_, err = cliSet.RbacV1().ClusterRoleBindings().Create(context.Background(), crb, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("fail to create the clusterrolebinding/%s: %v", crb.Name, err)
}
klog.V(4).Infof("clusterrolebinding/%s is created", crb.Name)
return nil
}
// CreateConfigMapFromYaml creates the ConfigMap from the yaml template.
func CreateConfigMapFromYaml(cliSet *kubernetes.Clientset, ns, cmTmpl string) error {
obj, err := YamlToObject([]byte(cmTmpl))
if err != nil {
return err
}
cm, ok := obj.(*v1.ConfigMap)
if !ok {
return fmt.Errorf("fail to assert configmap: %v", err)
}
_, err = cliSet.CoreV1().ConfigMaps(ns).Create(context.Background(), cm, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("fail to create the configmap/%s: %v", cm.Name, err)
}
klog.V(4).Infof("configmap/%s is created", cm.Name)
return nil
}
// CreateDeployFromYaml creates the Deployment from the yaml template.
func CreateDeployFromYaml(cliSet *kubernetes.Clientset, ns, dplyTmpl string, ctx interface{}) error {
ycmdp, err := tmplutil.SubsituteTemplate(dplyTmpl, ctx)
if err != nil {
return err
}
dpObj, err := YamlToObject([]byte(ycmdp))
if err != nil {
return err
}
dply, ok := dpObj.(*appsv1.Deployment)
if !ok {
return errors.New("fail to assert Deployment")
}
if _, err = cliSet.AppsV1().Deployments(ns).Create(context.Background(), dply, metav1.CreateOptions{}); err != nil {
return err
}
klog.V(4).Infof("the deployment/%s is deployed", dply.Name)
return nil
}
// CreateDaemonSetFromYaml creates the DaemonSet from the yaml template.
func CreateDaemonSetFromYaml(cliSet *kubernetes.Clientset, dsTmpl string, ctx interface{}) error {
var ytadstmp string
var err error
if ctx != nil {
ytadstmp, err = tmplutil.SubsituteTemplate(dsTmpl, ctx)
if err != nil {
return err
}
} else {
ytadstmp = dsTmpl
}
obj, err := YamlToObject([]byte(ytadstmp))
if err != nil {
return err
}
ds, ok := obj.(*appsv1.DaemonSet)
if !ok {
return fmt.Errorf("fail to assert daemonset: %v", err)
}
_, err = cliSet.AppsV1().DaemonSets(SystemNamespace).Create(context.Background(), ds, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("fail to create the daemonset/%s: %v", ds.Name, err)
}
klog.V(4).Infof("daemonset/%s is created", ds.Name)
return nil
}
// CreateServiceFromYaml creates the Service from the yaml template.
func CreateServiceFromYaml(cliSet *kubernetes.Clientset, svcTmpl string) error {
obj, err := YamlToObject([]byte(svcTmpl))
if err != nil {
return err
}
svc, ok := obj.(*corev1.Service)
if !ok {
return fmt.Errorf("fail to assert service: %v", err)
}
_, err = cliSet.CoreV1().Services(SystemNamespace).Create(context.Background(), svc, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("fail to create the service/%s: %s", svc.Name, err)
}
klog.V(4).Infof("service/%s is created", svc.Name)
return nil
}
//add by yanyhui at 20210611
// CreateRoleFromYaml creates the ClusterRole from the yaml template.
func CreateRoleFromYaml(cliSet *kubernetes.Clientset, ns, crTmpl string) error {
obj, err := YamlToObject([]byte(crTmpl))
if err != nil {
return err
}
cr, ok := obj.(*rbacv1.Role)
if !ok {
return fmt.Errorf("fail to assert role: %v", err)
}
_, err = cliSet.RbacV1().Roles(ns).Create(context.Background(), cr, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("fail to create the role/%s: %v", cr.Name, err)
}
klog.V(4).Infof("role/%s is created", cr.Name)
return nil
}
// CreateRoleBindingFromYaml creates the ClusterRoleBinding from the yaml template.
func CreateRoleBindingFromYaml(cliSet *kubernetes.Clientset, ns, crbTmpl string) error {
obj, err := YamlToObject([]byte(crbTmpl))
if err != nil {
return err
}
crb, ok := obj.(*rbacv1.RoleBinding)
if !ok {
return fmt.Errorf("fail to assert rolebinding: %v", err)
}
_, err = cliSet.RbacV1().RoleBindings(ns).Create(context.Background(), crb, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("fail to create the rolebinding/%s: %v", crb.Name, err)
}
klog.V(4).Infof("rolebinding/%s is created", crb.Name)
return nil
}
// CreateSecretFromYaml creates the Secret from the yaml template.
func CreateSecretFromYaml(cliSet *kubernetes.Clientset, ns, saTmpl string) error {
obj, err := YamlToObject([]byte(saTmpl))
if err != nil {
return err
}
sa, ok := obj.(*corev1.Secret)
if !ok {
return fmt.Errorf("fail to assert secret: %v", err)
}
_, err = cliSet.CoreV1().Secrets(ns).Create(context.Background(), sa, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("fail to create the secret/%s: %v", sa.Name, err)
}
klog.V(4).Infof("secret/%s is created", sa.Name)
return nil
}
// CreateMutatingWebhookConfigurationFromYaml creates the Service from the yaml template.
func CreateMutatingWebhookConfigurationFromYaml(cliSet *kubernetes.Clientset, svcTmpl string) error {
obj, err := YamlToObject([]byte(svcTmpl))
if err != nil {
return err
}
svc, ok := obj.(*v1beta1.MutatingWebhookConfiguration)
if !ok {
return fmt.Errorf("fail to assert mutatingwebhookconfiguration: %v", err)
}
_, err = cliSet.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(context.Background(), svc, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("fail to create the mutatingwebhookconfiguration/%s: %s", svc.Name, err)
}
klog.V(4).Infof("mutatingwebhookconfiguration/%s is created", svc.Name)
return nil
}
// CreateValidatingWebhookConfigurationFromYaml creates the Service from the yaml template.
func CreateValidatingWebhookConfigurationFromYaml(cliSet *kubernetes.Clientset, svcTmpl string) error {
obj, err := YamlToObject([]byte(svcTmpl))
if err != nil {
return err
}
svc, ok := obj.(*v1beta1.ValidatingWebhookConfiguration)
if !ok {
return fmt.Errorf("fail to assert validatingwebhookconfiguration: %v", err)
}
_, err = cliSet.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(context.Background(), svc, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("fail to create the validatingwebhookconfiguration/%s: %s", svc.Name, err)
}
klog.V(4).Infof("validatingwebhookconfiguration/%s is created", svc.Name)
return nil
}
func CreateCRDFromYaml(clientset *kubernetes.Clientset, yurtAppManagerClient dynamic.Interface, nameSpace string, filebytes []byte) error {
var err error
decoder := yamlutil.NewYAMLOrJSONDecoder(bytes.NewReader(filebytes), 10000)
var rawObj k8sruntime.RawExtension
err = decoder.Decode(&rawObj)
if err != nil {
return err
}
obj, gvk, err := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme).Decode(rawObj.Raw, nil, nil)
if err != nil {
return err
}
unstructuredMap, err := k8sruntime.DefaultUnstructuredConverter.ToUnstructured(obj)
if err != nil {
return err
}
unstructuredObj := &unstructured.Unstructured{Object: unstructuredMap}
gr, err := restmapper.GetAPIGroupResources(clientset.Discovery())
if err != nil {
return err
}
mapper := restmapper.NewDiscoveryRESTMapper(gr)
mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
return err
}
var dri dynamic.ResourceInterface
if mapping.Scope.Name() == meta.RESTScopeNameNamespace {
if unstructuredObj.GetNamespace() == "" {
unstructuredObj.SetNamespace(nameSpace)
}
dri = yurtAppManagerClient.Resource(mapping.Resource).Namespace(unstructuredObj.GetNamespace())
} else {
dri = yurtAppManagerClient.Resource(mapping.Resource)
}
objSecond, err := dri.Create(context.Background(), unstructuredObj, metav1.CreateOptions{})
if err != nil {
return err
} else {
fmt.Printf("%s/%s created", objSecond.GetKind(), objSecond.GetName())
}
return nil
}
func DeleteCRDResource(clientset *kubernetes.Clientset, yurtAppManagerClientSet dynamic.Interface, res string, name string, filebytes []byte) error {
var err error
decoder := yamlutil.NewYAMLOrJSONDecoder(bytes.NewReader(filebytes), 10000)
var rawObj k8sruntime.RawExtension
err = decoder.Decode(&rawObj)
if err != nil {
return err
}
obj, gvk, err := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme).Decode(rawObj.Raw, nil, nil)
if err != nil {
return err
}
unstructuredMap, err := k8sruntime.DefaultUnstructuredConverter.ToUnstructured(obj)
if err != nil {
return err
}
unstructuredObj := &unstructured.Unstructured{Object: unstructuredMap}
gr, err := restmapper.GetAPIGroupResources(clientset.Discovery())
if err != nil {
return err
}
mapper := restmapper.NewDiscoveryRESTMapper(gr)
mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
return err
}
var dri dynamic.ResourceInterface
if mapping.Scope.Name() == meta.RESTScopeNameNamespace {
if unstructuredObj.GetNamespace() == "" {
unstructuredObj.SetNamespace("")
}
dri = yurtAppManagerClientSet.Resource(mapping.Resource).Namespace(unstructuredObj.GetNamespace())
} else {
dri = yurtAppManagerClientSet.Resource(mapping.Resource)
}
err = dri.Delete(context.Background(), name, metav1.DeleteOptions{})
if err != nil {
if !apierrors.IsNotFound(err) {
return err
}
} else {
fmt.Printf("%s/%s is deleted ", res, name)
}
return nil
}
// YamlToObject deserializes object in yaml format to a runtime.Object
func YamlToObject(yamlContent []byte) (k8sruntime.Object, error) {
decode := serializer.NewCodecFactory(scheme.Scheme).UniversalDeserializer().Decode
obj, _, err := decode(yamlContent, nil, nil)
if err != nil {
return nil, err
}
return obj, nil
}
// LabelNode add a new label (<key>=<val>) to the given node
func LabelNode(cliSet *kubernetes.Clientset, node *v1.Node, key, val string) (*v1.Node, error) {
node.Labels[key] = val
newNode, err := cliSet.CoreV1().Nodes().Update(context.Background(), node, metav1.UpdateOptions{})
if err != nil {
return nil, err
}
return newNode, nil
}
// AnnotateNode add a new annotation (<key>=<val>) to the given node
func AnnotateNode(cliSet *kubernetes.Clientset, node *v1.Node, key, val string) (*v1.Node, error) {
node.Annotations[key] = val
newNode, err := cliSet.CoreV1().Nodes().Update(context.Background(), node, metav1.UpdateOptions{})
if err != nil {
return nil, err
}
return newNode, nil
}
// RunJobAndCleanup runs the job, wait for it to be complete, and delete it
func RunJobAndCleanup(cliSet *kubernetes.Clientset, job *batchv1.Job, timeout, period time.Duration) error {
job, err := cliSet.BatchV1().Jobs(job.GetNamespace()).Create(context.Background(), job, metav1.CreateOptions{})
if err != nil {
return err
}
waitJobTimeout := time.After(timeout)
for {
select {
case <-waitJobTimeout:
return errors.New("wait for job to be complete timeout")
case <-time.After(period):
job, err := cliSet.BatchV1().Jobs(job.GetNamespace()).
Get(context.Background(), job.GetName(), metav1.GetOptions{})
if err != nil {
klog.Errorf("fail to get job(%s) when waiting for it to be succeeded: %s",
job.GetName(), err)
return err
}
if job.Status.Succeeded == *job.Spec.Completions {
if err := cliSet.BatchV1().Jobs(job.GetNamespace()).
Delete(context.Background(), job.GetName(), metav1.DeleteOptions{
PropagationPolicy: &PropagationPolicy,
}); err != nil {
klog.Errorf("fail to delete succeeded servant job(%s): %s",
job.GetName(), err)
return err
}
return nil
}
continue
}
}
}
// RenderServantJob renders servant job for a specified node.
func RenderServantJob(action string, tmplCtx map[string]string, nodeName string) (*batchv1.Job, error) {
var jobTemplate string
switch action {
case "enable":
jobTemplate = constants.EnableNodeControllerJobTemplate
tmplCtx["jobName"] = EnableNodeControllerJobNameBase + "-" + nodeName
case "disable":
jobTemplate = constants.DisableNodeControllerJobTemplate
tmplCtx["jobName"] = DisableNodeControllerJobNameBase + "-" + nodeName
default:
return nil, fmt.Errorf("unknown action: %s", action)
}
tmplCtx["nodeName"] = nodeName
jobYaml, err := tmplutil.SubsituteTemplate(jobTemplate, tmplCtx)
if err != nil {
return nil, err
}
srvJobObj, err := YamlToObject([]byte(jobYaml))
if err != nil {
return nil, err
}
srvJob, ok := srvJobObj.(*batchv1.Job)
if !ok {
return nil, errors.New("fail to assert yurtctl-servant job")
}
return srvJob, nil
}
// RunServantJobs launch servant jobs on specified nodes and wait all jobs to finish.
// Succeed jobs will be deleted when finished. Failed jobs are preserved for diagnosis.
func RunServantJobs(cliSet *kubernetes.Clientset, getJob func(nodeName string) (*batchv1.Job, error), nodeNames []string) error {
var wg sync.WaitGroup
jobByNodeName := make(map[string]*batchv1.Job)
for _, nodeName := range nodeNames {
job, err := getJob(nodeName)
if err != nil {
return fmt.Errorf("fail to get job for node %s: %s", nodeName, err)
}
jobByNodeName[nodeName] = job
}
for _, nodeName := range nodeNames {
wg.Add(1)
job := jobByNodeName[nodeName]
go func() {
defer wg.Done()
if err := RunJobAndCleanup(cliSet, job,
WaitServantJobTimeout, CheckServantJobPeriod); err != nil {
klog.Errorf("fail to run servant job(%s): %s",
job.GetName(), err)
} else {
klog.Infof("servant job(%s) has succeeded", job.GetName())
}
}()
}
wg.Wait()
return nil
}
// ValidateServerVersion checks if the target server's version is supported
func ValidateServerVersion(cliSet *kubernetes.Clientset) error {
serverVersion, err := discovery.
NewDiscoveryClient(cliSet.RESTClient()).ServerVersion()
if err != nil {
return err
}
completeVersion := serverVersion.Major + "." + serverVersion.Minor
if !strutil.IsInStringLst(ValidServerVersions, completeVersion) {
return fmt.Errorf("server version(%s) is not supported, valid server versions are %v",
completeVersion, ValidServerVersions)
}
return nil
}
// GenClientSet generates the clientset based on command option, environment variable or
// the default kubeconfig file
func GenClientSet(flags *pflag.FlagSet) (*kubernetes.Clientset, error) {
kubeconfigPath, err := PrepareKubeConfigPath(flags)
if err != nil {
return nil, err
}
restCfg, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
if err != nil {
return nil, err
}
return kubernetes.NewForConfig(restCfg)
}
// GenDynamicClientSet generates the clientset based on command option, environment variable or
// the default kubeconfig file
func GenDynamicClientSet(flags *pflag.FlagSet) (dynamic.Interface, error) {
kubeconfigPath, err := PrepareKubeConfigPath(flags)
if err != nil {
return nil, err
}
restCfg, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
if err != nil {
return nil, err
}
return dynamic.NewForConfig(restCfg)
}
// PrepareKubeConfigPath returns the path of cluster kubeconfig file
func PrepareKubeConfigPath(flags *pflag.FlagSet) (string, error) {
kbCfgPath, err := flags.GetString("kubeconfig")
if err != nil {
return "", err
}
if kbCfgPath == "" {
kbCfgPath = os.Getenv("KUBECONFIG")
}
if kbCfgPath == "" {
if home := homedir.HomeDir(); home != "" {
kbCfgPath = filepath.Join(home, ".kube", "config")
}
}
if kbCfgPath == "" {
return "", errors.New("either '--kubeconfig', '$HOME/.kube/config' or '$KUBECONFIG' need to be set")
}
return kbCfgPath, nil
}
func GetOrCreateJoinTokenString(cliSet *kubernetes.Clientset) (string, error) {
tokenSelector := fields.SelectorFromSet(
map[string]string{
// TODO: We hard-code "type" here until `field_constants.go` that is
// currently in `pkg/apis/core/` exists in the external API, i.e.
// k8s.io/api/v1. Should be v1.SecretTypeField
"type": string(bootstrapapi.SecretTypeBootstrapToken),
},
)
listOptions := metav1.ListOptions{
FieldSelector: tokenSelector.String(),
}
klog.V(1).Infoln("[token] retrieving list of bootstrap tokens")
secrets, err := cliSet.CoreV1().Secrets(metav1.NamespaceSystem).List(context.Background(), listOptions)
if err != nil {
return "", fmt.Errorf("%v%s", err, "failed to list bootstrap tokens")
}
for _, secret := range secrets.Items {
// Get the BootstrapToken struct representation from the Secret object
token, err := kubeadmapi.BootstrapTokenFromSecret(&secret)
if err != nil {
klog.Warningf("%v", err)
continue
}
if !usagesAndGroupsAreValid(token) {
continue
}
return token.Token.String(), nil
// Get the human-friendly string representation for the token
}
tokenStr, err := bootstraputil.GenerateBootstrapToken()
if err != nil {
return "", fmt.Errorf("couldn't generate random token, %v", err)
}
token, err := kubeadmapi.NewBootstrapTokenString(tokenStr)
if err != nil {
return "", err
}
klog.V(1).Infoln("[token] creating token")
if err := kubeadmapi.CreateNewTokens(cliSet,
[]kubeadmapi.BootstrapToken{{
Token: token,
Usages: kubeadmapi.DefaultTokenUsages,
Groups: kubeadmapi.DefaultTokenGroups,
}}); err != nil {
return "", err
}
return tokenStr, nil
}
// usagesAndGroupsAreValid checks if the usages and groups in the given bootstrap token are valid
func usagesAndGroupsAreValid(token *kubeadmapi.BootstrapToken) bool {
sliceEqual := func(a, b []string) bool {
if len(a) != len(b) {
return false
}
sort.Strings(a)
sort.Strings(b)
for k, v := range b {
if a[k] != v {
return false
}
}
return true
}
return sliceEqual(token.Usages, kubeadmapi.DefaultTokenUsages) && sliceEqual(token.Groups, kubeadmapi.DefaultTokenGroups)
}
// find kube-controller-manager deployed through static file
func GetKubeControllerManagerHANodes(cliSet *kubernetes.Clientset) ([]string, error) {
var kcmNodeNames []string
podLst, err := cliSet.CoreV1().Pods(SystemNamespace).List(context.Background(), metav1.ListOptions{})
if err != nil {
return nil, err
}
for _, pod := range podLst.Items {
kcmPodName := fmt.Sprintf("kube-controller-manager-%s", pod.Spec.NodeName)
if kcmPodName == pod.Name {
kcmNodeNames = append(kcmNodeNames, pod.Spec.NodeName)
}
}
return kcmNodeNames, nil
}
//CheckAndInstallKubelet install kubelet and kubernetes-cni, skip install if they exist.
func CheckAndInstallKubelet(clusterVersion string) error {
klog.Info("Check and install kubelet.")
kubeletExist := false
if _, err := exec.LookPath("kubelet"); err == nil {
if b, err := exec.Command("kubelet", "--version").CombinedOutput(); err == nil {
kubeletVersion := strings.Split(string(b), " ")[1]
kubeletVersion = strings.TrimSpace(kubeletVersion)
klog.Infof("kubelet --version: %s", kubeletVersion)
if strings.Contains(string(b), clusterVersion) {
klog.Infof("Kubelet %s already exist, skip install.", clusterVersion)
kubeletExist = true
} else {
return fmt.Errorf("The existing kubelet version %s of the node is inconsistent with cluster version %s, please clean it. ", kubeletVersion, clusterVersion)
}
}
}
if !kubeletExist {
//download and install kubernetes-node
packageUrl := fmt.Sprintf(constants.KubeUrlFormat, clusterVersion, runtime.GOARCH)
savePath := fmt.Sprintf("%s/kubernetes-node-linux-%s.tar.gz", constants.TmpDownloadDir, runtime.GOARCH)
klog.V(1).Infof("Download kubelet from: %s", packageUrl)
if err := util.DownloadFile(packageUrl, savePath, 3); err != nil {
return fmt.Errorf("Download kuelet fail: %v", err)
}
if err := util.Untar(savePath, constants.TmpDownloadDir); err != nil {
return err
}
for _, comp := range []string{"kubectl", "kubeadm", "kubelet"} {
target := fmt.Sprintf("/usr/bin/%s", comp)
if err := edgenode.CopyFile(constants.TmpDownloadDir+"/kubernetes/node/bin/"+comp, target, 0755); err != nil {
return err
}
}
}
if _, err := os.Stat(constants.StaticPodPath); os.IsNotExist(err) {
if err := os.MkdirAll(constants.StaticPodPath, 0755); err != nil {
return err
}
}
if _, err := os.Stat(constants.KubeCniDir); err == nil {
klog.Infof("Cni dir %s already exist, skip install.", constants.KubeCniDir)
return nil
}
//download and install kubernetes-cni
cniUrl := fmt.Sprintf(constants.CniUrlFormat, constants.KubeCniVersion, runtime.GOARCH, constants.KubeCniVersion)
savePath := fmt.Sprintf("%s/cni-plugins-linux-%s-%s.tgz", constants.TmpDownloadDir, runtime.GOARCH, constants.KubeCniVersion)
klog.V(1).Infof("Download cni from: %s", cniUrl)
if err := util.DownloadFile(cniUrl, savePath, 3); err != nil {
return err
}
if err := os.MkdirAll(constants.KubeCniDir, 0600); err != nil {
return err
}
if err := util.Untar(savePath, constants.KubeCniDir); err != nil {
return err
}
return nil
}
// SetKubeletService configure kubelet service.
func SetKubeletService() error {
klog.Info("Setting kubelet service.")
kubeletServiceDir := filepath.Dir(constants.KubeletServiceFilepath)
if _, err := os.Stat(kubeletServiceDir); err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(kubeletServiceDir, os.ModePerm); err != nil {
klog.Errorf("Create dir %s fail: %v", kubeletServiceDir, err)
return err
}
} else {
klog.Errorf("Describe dir %s fail: %v", kubeletServiceDir, err)
return err
}
}
if err := ioutil.WriteFile(constants.KubeletServiceFilepath, []byte(constants.KubeletServiceContent), 0644); err != nil {
klog.Errorf("Write file %s fail: %v", constants.KubeletServiceFilepath, err)
return err
}
return nil
}
//SetKubeletUnitConfig configure kubelet startup parameters.
func SetKubeletUnitConfig(nodeType string) error {
kubeletUnitDir := filepath.Dir(edgenode.KubeletSvcPath)
if _, err := os.Stat(kubeletUnitDir); err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(kubeletUnitDir, os.ModePerm); err != nil {
klog.Errorf("Create dir %s fail: %v", kubeletUnitDir, err)
return err
}
} else {
klog.Errorf("Describe dir %s fail: %v", kubeletUnitDir, err)
return err
}
}
if nodeType == constants.EdgeNode {
if err := ioutil.WriteFile(edgenode.KubeletSvcPath, []byte(constants.EdgeKubeletUnitConfig), 0600); err != nil {
return err
}
} else {
if err := ioutil.WriteFile(edgenode.KubeletSvcPath, []byte(constants.CloudKubeletUnitConfig), 0600); err != nil {
return err
}
}
return nil
}
|
[
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
cmd/api/main.go
|
package main
import (
"context"
"fmt"
"os"
"os/signal"
"syscall"
"time"
"github.com/convox/convox/pkg/api"
)
func main() {
if err := run(); err != nil {
fmt.Fprintf(os.Stderr, "ERROR: %s\n", err)
}
}
func run() error {
s, err := api.New()
if err != nil {
return err
}
s.Password = os.Getenv("PASSWORD")
ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
go handleSignals(s, ch)
return s.Listen("https", ":5443")
}
func handleSignals(s *api.Server, ch <-chan os.Signal) {
sig := <-ch
fmt.Printf("ns=rack at=signal signal=%v terminate=true\n", sig)
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
s.Shutdown(ctx)
os.Exit(0)
}
|
[
"\"PASSWORD\""
] |
[] |
[
"PASSWORD"
] |
[]
|
["PASSWORD"]
|
go
| 1 | 0 | |
pkg/console/console.go
|
package console
import (
"context"
"fmt"
"os"
"github.com/jroimartin/gocui"
"github.com/rancher/harvester-installer/pkg/widgets"
"github.com/sirupsen/logrus"
)
var (
Debug bool
)
func init() {
if os.Getenv("DEBUG") == "true" {
Debug = true
logrus.SetLevel(logrus.DebugLevel)
}
f, err := os.OpenFile("/var/log/console.log", os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0755) //0600)
if err != nil {
fmt.Println("failed to open log file")
return
}
logrus.SetOutput(f)
}
// Console is the structure of the harvester console
type Console struct {
context context.Context
*gocui.Gui
elements map[string]widgets.Element
}
// RunConsole starts the console
func RunConsole() error {
c, err := NewConsole()
if err != nil {
return err
}
return c.doRun()
}
// NewConsole initialize the console
func NewConsole() (*Console, error) {
g, err := gocui.NewGui(gocui.OutputNormal)
if err != nil {
return nil, err
}
return &Console{
context: context.Background(),
Gui: g,
elements: make(map[string]widgets.Element),
}, nil
}
// GetElement gets an element by name
func (c *Console) GetElement(name string) (widgets.Element, error) {
e, ok := c.elements[name]
if ok {
return e, nil
}
return nil, fmt.Errorf("element %q is not found", name)
}
// AddElement adds an element with name
func (c *Console) AddElement(name string, element widgets.Element) {
c.elements[name] = element
}
func (c *Console) setContentByName(name string, content string) error {
v, err := c.GetElement(name)
if err != nil {
return err
}
if content == "" {
return v.Close()
}
if err := v.Show(); err != nil {
return err
}
v.SetContent(content)
_, err = c.Gui.SetViewOnTop(name)
return err
}
func (c *Console) doRun() error {
defer c.Close()
if hd, _ := os.LookupEnv("HARVESTER_DASHBOARD"); hd == "true" {
c.SetManagerFunc(c.layoutDashboard)
} else {
c.SetManagerFunc(c.layoutInstall)
}
if err := setGlobalKeyBindings(c.Gui); err != nil {
return err
}
if err := c.MainLoop(); err != nil && err != gocui.ErrQuit {
return err
}
return nil
}
func setGlobalKeyBindings(g *gocui.Gui) error {
g.InputEsc = true
if Debug {
if err := g.SetKeybinding("", gocui.KeyCtrlC, gocui.ModNone, quit); err != nil {
return err
}
}
return nil
}
func quit(g *gocui.Gui, v *gocui.View) error {
return gocui.ErrQuit
}
|
[
"\"DEBUG\""
] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
go
| 1 | 0 | |
web/_benchmark/echo/main.go
|
package main
import (
"log"
"os"
"net/http"
"github.com/labstack/echo"
)
const (
// ContentTypeJSON is the json content type.
ContentTypeJSON = "application/json; charset=UTF-8"
// HeaderContentLength is a header.
HeaderContentLength = "Content-Length"
// HeaderContentType is a header.
HeaderContentType = "Content-Type"
// HeaderServer is a header.
HeaderServer = "Server"
// ServerName is a header.
ServerName = "golang"
// MessageText is a string.
MessageText = "Hello, World!"
)
var (
// MessageBytes is the raw serialized message.
MessageBytes = []byte(`{"message":"Hello, World!"}`)
)
type message struct {
Message string `json:"message"`
}
func port() string {
envPort := os.Getenv("PORT")
if len(envPort) != 0 {
return envPort
}
return "8080"
}
func jsonHandler(c echo.Context) error {
return c.JSON(http.StatusOK, &message{Message: MessageText})
}
func main() {
app := echo.New()
app.GET("/json", jsonHandler)
log.Fatal(app.Start(":" + port()))
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import shutil
import sys
from setuptools import setup
from symvarsub.numtransform import NumTransformer
pkg_name = 'symvarsub'
cmdclass = {}
ext_modules = []
def _path_under_setup(*args):
return os.path.join(os.path.dirname(__file__), *args)
if len(sys.argv) > 1 and '--help' not in sys.argv[1:] and sys.argv[1] not in (
'--help-commands', 'egg_info', 'clean', '--version'):
from pycodeexport import pce_build_ext, PCEExtension
from symvarsub.numtransform._setup_numtransform import prebuild
cmdclass = {'build_ext': pce_build_ext}
ext_modules = [
PCEExtension(
pkg_name + '.numtransform.transform_wrapper',
sources=[],
build_files=[_path_under_setup('symvarsub', 'numtransform',
'transform_wrapper.pyx')],
dist_files=[(_path_under_setup('symvarsub', 'numtransform', 'transform_template.f90'),
None)],
build_callbacks=[
(
prebuild,
(_path_under_setup('symvarsub', 'numtransform', 'transform_wrapper.pyx'),), {}
)
],
link_ext=False,
logger=True,
)
]
RELEASE_VERSION = os.environ.get('SYMVARSUB_RELEASE_VERSION', '')
# http://conda.pydata.org/docs/build.html#environment-variables-set-during-the-build-process
CONDA_BUILD = os.environ.get('CONDA_BUILD', '0') == '1'
if CONDA_BUILD:
try:
RELEASE_VERSION = 'v' + open(
'__conda_version__.txt', 'rt').readline().rstrip()
except IOError:
pass
release_py_path = os.path.join(pkg_name, '_release.py')
if len(RELEASE_VERSION) > 1 and RELEASE_VERSION[0] == 'v':
TAGGED_RELEASE = True
__version__ = RELEASE_VERSION[1:]
else:
TAGGED_RELEASE = False
# read __version__ attribute from _release.py:
exec(open(release_py_path).read())
classifiers = [
"Development Status :: 3 - Alpha",
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
]
pkgs = [
pkg_name,
pkg_name + '.numtransform'
]
tests = [
'symvarsub.tests',
]
descr = 'Convenience functions for use with sympy.'
deps = ['pycompilation', 'pycodeexport', 'numpy', 'sympy', 'mako',
'cython', 'future']
setup_kwargs = dict(
name=pkg_name,
version=__version__,
description=descr,
classifiers=classifiers,
author='Björn Dahlgren',
author_email='[email protected]',
url='https://github.com/bjodah/' + pkg_name,
license='BSD',
packages=pkgs + tests,
ext_modules=ext_modules,
cmdclass=cmdclass,
setup_requires=deps,
install_requires=deps,
eager_resources=[os.path.join('symvarsub', 'numtransform', path)
for path in NumTransformer.build_files],
)
if __name__ == '__main__':
try:
if TAGGED_RELEASE:
# Same commit should generate different sdist
# depending on tagged version (set SYMVARSUB_RELEASE_VERSION)
# this will ensure source distributions contain the correct version
shutil.move(release_py_path, release_py_path+'__temp__')
open(release_py_path, 'wt').write(
"__version__ = '{}'\n".format(__version__))
setup(**setup_kwargs)
finally:
if TAGGED_RELEASE:
shutil.move(release_py_path+'__temp__', release_py_path)
|
[] |
[] |
[
"CONDA_BUILD",
"SYMVARSUB_RELEASE_VERSION"
] |
[]
|
["CONDA_BUILD", "SYMVARSUB_RELEASE_VERSION"]
|
python
| 2 | 0 | |
killer2.py
|
#!/usr/bin/env python
#
# ******APP externa de prueba para matar los flows ******
#---------------------------------> APP de prueba para matar los flows
import argparse
import os
import hpsdnclient as hp
from hpsdnclient.datatypes import Flow, Match, Action
#---------------------------------> Librerias de modulos certifi y requests
import certifi
import requests
requests.packages.urllib3.disable_warnings()
#---------------------------------> def. main
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--ip1', type=str,
help="Introduzca la IP del host 1 ha matar.",
required=True,'--ip2', type=str,
help="Introduzca la IP del host 2 ha matar.",
required=True)
args = parser.parse_args()
args2 = parser.parse_args()
kill_flow(args.ip,args2.ip)
#---------------------------------> def. matamos el flow
def kill_flow(ip,ip2):
#---------------------------------> variables que obtenemos mediante getters
controller = os.getenv("SDNCTL")
user = os.getenv("SDNUSER")
password = os.getenv("SDNPASS")
#controller='127.0.0.1'
#---------------------------------> Autentificación a la Api
auth = hp.XAuthToken(user='sdn', password='skyline', server='127.0.0.1')
api = hp.Api(controller='127.0.0.1', auth=auth)
#---------------------------------> Variables para identificar el host que quertemos matar
match = Match(eth_type="ipv4", ipv4_src=ip)#---->especificamos la ip de origen y el tipo de ip
match2 = Match(eth_type="ipv4", ipv4_src=ip)#---->especificamos la ip de origen y el tipo de ip
action = Action(output=0)#---->paramos la acción a 0
flow = Flow(priority=30000, match=match, actions=action, hard_timeout=30)#---->especificamos el flow y sus detalles en cada campos
flow2 = Flow(priority=30000, match=match2, actions=action, hard_timeout=30)
switches = api.get_datapaths()#---->obtenemos los switches
for s in switches:#---->recorremos los switches
api.add_flows(s.dpid, flow)#---->añadimos el flow a 0 a dicho switch
for x in datapaths:#---->recorremos los switches
api.add_flows(x.dpid, flow2)#---->añadimos el flow a 0 a dicho switch
if __name__ == "__main__":
main()
|
[] |
[] |
[
"SDNPASS",
"SDNUSER",
"SDNCTL"
] |
[]
|
["SDNPASS", "SDNUSER", "SDNCTL"]
|
python
| 3 | 0 | |
venv/lib/python3.8/site-packages/nbclient/tests/conftest.py
|
import os
# This is important for ipykernel to show the same string
# instead of randomly generated file names in outputs.
# See: https://github.com/ipython/ipykernel/blob/360685c6/ipykernel/compiler.py#L50-L55
os.environ["IPYKERNEL_CELL_NAME"] = "<IPY-INPUT>"
|
[] |
[] |
[
"IPYKERNEL_CELL_NAME"
] |
[]
|
["IPYKERNEL_CELL_NAME"]
|
python
| 1 | 0 | |
python/dnn/gan.py
|
import numpy as np
import tensorflow as tf
import numpy.random as rnd
from sklearn import mixture
from common.gen_samples import *
from common.nn_utils import get_train_batches
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
"""
A simple [conditional|Info] GAN with fully connected layers for both generator and discriminator.
Also supports AnoGAN.
See dnn/test_gan.py for usage.
References:
[1] Generative Adversarial Nets by Ian J. Goodfellow, Jean Pouget-Abadi, et al., NIPS 2014
[2] Conditional Generative Adversarial Nets by Mehdi Mirza and Simon Osindero, 2014
[3] Unsupervised Anomaly Detection with Generative Adversarial Networks to Guide Marker Discovery
by Thomas Schlegl, Philipp Seebock, Sebastian M. Waldstein, Ursula Schmidt-Erfurth, Georg Langs, IPMI 2017
[4] InfoGAN: Interpretable Representation Learning by Information Maximizing Generative Adversarial Nets
by Xi Chen, Yan Duan, Rein Houthooft, John Schulman, Ilya Sutskever, Pieter Abbeel
"""
TINY = 1e-8 # as in virtually every InfoGAN implementation on the internet
def set_random_seeds(py_seed=42, np_seed=42, tf_seed=42):
random.seed(py_seed)
rnd.seed(np_seed)
tf.set_random_seed(tf_seed)
class Listener(object):
def __init__(self):
pass
def __call__(self, gan, epoch, epoch_start_tm):
pass
def fit_gmm(x, val_x, min_k=1, max_k=10):
cv_type = 'diag' # ['spherical', 'tied', 'diag', 'full']
lowest_bic = np.infty
bic = []
best_gmm = None
for k in range(min_k, max_k+1):
gmm = mixture.GaussianMixture(n_components=k, covariance_type=cv_type)
gmm.fit(x)
bic.append(gmm.bic(val_x))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
return best_gmm, lowest_bic, bic
def get_cluster_labels(x, min_k=1, max_k=10):
""" Fits data to a Gaussian Mixture Model and assigns clusters """
gmm, _, _ = fit_gmm(x, x, min_k=min_k, max_k=max_k)
logger.debug("best GMM k: %d" % (gmm.n_components))
y = gmm.predict(x)
# logger.debug("y:\n%s" % (str(y)))
return y, gmm
def get_nn_layer(layers, layer_from_top=1):
return layers[len(layers) - layer_from_top]
class GAN(object):
""" A GAN or a conditional GAN for simple i.i.d data """
def __init__(self, data_dim=1, discr_layer_nodes=None, discr_layer_activations=None,
gen_input_dim=None, gen_layer_nodes=None, gen_layer_activations=None,
label_smoothing=False, smoothing_prob=0.9, info_gan=False, info_gan_lambda=1.0,
conditional=False, n_classes=0, pvals=None, enable_ano_gan=False,
n_epochs=10, batch_size=25, shuffle=False, learning_rate=0.005,
l2_lambda=0.001, listener=None, use_adam=False):
""" Create the generator-discriminator networks
:param data_dim: int
number of input dimensions in original data
:param discr_layer_nodes: list of int
number of nodes in each discriminator layer (excluding input)
:param discr_layer_activations: list
list of activation functions for each discriminator layer (excluding input)
:param gen_input_dim: int
number of input dimensions in input generator samples
:param gen_layer_nodes: list of int
number of nodes in each generator layer (excluding input)
:param gen_layer_activations: list
list of activation functions for each generator layer (excluding input)
:param label_smoothing: bool
if True, then use one-sided label smoothing for discriminator loss
:param smoothing_prob: float
label-smoothing probability
:param info_gan: bool
if True, then use InfoGAN, else simple or conditional GAN
:param info_gan_lambda: float
InfoGAN regularization penalty
:param conditional: bool
if True, then use Conditional GAN, else simple or InfoGAN
:param n_classes:
number of class labels in conditional mode
:param pvals: np.array(dtype=np.float32)
probability of each class
:param enable_ano_gan: bool
whether to enable AnoGAN network (for anomaly detection)
:param n_epochs: int
max number of epochs for training
:param batch_size: int
mini-batch size for training
:param shuffle: bool
whether to shuffle the data in each epoch during training
:param learning_rate: float
:param l2_lambda: float
:param listener: Listener
call-back function that gets called at the end of each training epoch
:param use_adam: bool
whether to use ADAM. The default is GradientDescent
"""
self.label_smoothing = label_smoothing
self.smoothing_prob = smoothing_prob
self.info_gan = info_gan
self.info_gan_lambda = info_gan_lambda
self.conditional = conditional
self.n_classes = n_classes
self.pvals = pvals
self.enable_ano_gan = enable_ano_gan
self.n_epochs = n_epochs
self.batch_size = batch_size
self.shuffle = shuffle
self.data_dim = data_dim
self.learning_rate = learning_rate
self.l2_lambda = l2_lambda
self.listener = listener
self.use_adam = use_adam
# first create the generator network
self.gen_input_dim = gen_input_dim
self.gen_layer_nodes = gen_layer_nodes
self.gen_layer_activations = gen_layer_activations
self.z = self.gen = None
# now, create the discriminator network
self.discr_layer_nodes = discr_layer_nodes
self.discr_layer_activations = discr_layer_activations
self.x = self.y = None
self.discr_data = self.discr_gen = None
self.discr_loss = self.gen_loss = self.discr_training_op = self.gen_training_op = None
# InfoGAN variables and losses
self.q_network = self.q_pred = None
self.info_gan_loss = None
# AnoGAN variables and losses
self.ano_gan_lambda = None
self.ano_z = self.ano_gan_net_G = self.ano_gan_net_D = None
self.ano_gan_training_op = self.ano_gan_loss = None
self.ano_gan_loss_R = self.ano_gan_loss_D = self.ano_gan_info_loss = None
self.ano_gan_q_network = None
# Tensoflow session object
self.session = None
self.unif_lo = 0.0 # -1.0
self.unif_hi = 1.0
if self.conditional and self.info_gan:
raise ValueError("Only one of conditional or info_gan should be true")
if (self.conditional or self.info_gan) and self.pvals is None:
raise ValueError("pvals is required for ConditionalGAN and InfoGAN")
self.init_network()
def init_network(self):
self.x = tf.placeholder(tf.float32, shape=(None, self.data_dim), name="x")
self.z = tf.placeholder(tf.float32, shape=(None, self.gen_input_dim), name="z")
if self.conditional:
if self.n_classes <= 0:
raise ValueError("n_classes must be greater than 1 for conditional GAN")
self.y = tf.placeholder(tf.float32, shape=(None, self.n_classes), name="y")
with tf.variable_scope("GAN"):
# here will create the generator and discriminator networks with initial reuse=False
self.gen = self.generator(z=self.z, y=self.y, reuse_gen=False)
self.discr_data, self.discr_gen = self.discriminator(x=self.x, y=self.y, reuse_discr=False)
if not self.label_smoothing:
discr_loss_data = -tf.log(tf.nn.sigmoid(get_nn_layer(self.discr_data, layer_from_top=1)))
else:
logger.debug("Label smoothing enabled with smoothing probability: %f" % self.smoothing_prob)
discr_logit = get_nn_layer(self.discr_data, layer_from_top=1)
discr_loss_data = tf.nn.sigmoid_cross_entropy_with_logits(logits=discr_logit,
labels=tf.ones(shape=tf.shape(discr_logit)) * self.smoothing_prob)
discr_gen_logit = get_nn_layer(self.discr_gen, layer_from_top=1)
discr_gen_probs = tf.nn.sigmoid(discr_gen_logit)
self.discr_loss = tf.reduce_mean(discr_loss_data - tf.log(1 - discr_gen_probs))
self.gen_loss = tf.reduce_mean(-tf.log(discr_gen_probs))
self.info_gan_loss = tf.constant(0.0)
if self.info_gan:
logger.debug("Adding InfoGAN regularization")
with tf.variable_scope("InfoGAN"):
# The last-but-one layer of the discriminator (when the input is from
# fake generated data) will be the input to category prediction layer.
# The expectation is w.r.t generator output.
self.q_network = self.init_info_gan_network(get_nn_layer(self.discr_gen, layer_from_top=2),
reuse=False)
# the below will be used to predict category for debug; it is not required for training
self.q_pred = self.init_info_gan_network(get_nn_layer(self.discr_data, layer_from_top=2),
reuse=True)
# get softmax output layer of q_network that predicts class
q_out = get_nn_layer(self.q_network, layer_from_top=1)
# compute entropy of class predictions
self.info_gan_loss = self.marginal_mutual_info(q_out, self.pvals)
vars = tf.trainable_variables()
for v in vars: logger.debug(v.name)
g_params = [v for v in vars if v.name.startswith('GAN/G/')]
d_params = [v for v in vars if v.name.startswith('GAN/D/')]
q_params = [v for v in vars if v.name.startswith('InfoGAN/')]
if self.info_gan and len(q_params) == 0:
# Just to be sure we do not have programmatic errors
raise ValueError("No q_params found for InfoGAN")
if self.l2_lambda > 0:
# add L2 regularization loss
logger.debug("Adding L2 regularization")
l2_loss_g, l2_loss_d, l2_loss_q = self.get_l2_regularizers(g_params, d_params, q_params)
self.gen_loss += self.l2_lambda * l2_loss_g
self.discr_loss += self.l2_lambda * l2_loss_d
if self.info_gan:
self.info_gan_loss += self.l2_lambda * l2_loss_q
g_params.extend(q_params)
d_params.extend(q_params)
self.gen_training_op = self.training_op(self.gen_loss + self.info_gan_lambda * self.info_gan_loss,
var_list=g_params, use_adam=self.use_adam)
self.discr_training_op = self.training_op(self.discr_loss + self.info_gan_lambda * self.info_gan_loss,
var_list=d_params, use_adam=self.use_adam)
if self.enable_ano_gan:
# Prepare variables required for AnoGAN functionality
#
# Note: AnoGAN functionality will come in use only *after* the
# GAN (simple|conditional|InfoGAN) has been fully trained.
self.ano_gan_lambda = tf.placeholder(tf.float32, shape=(), name="ano_gan_lambda")
self.ano_z = tf.Variable(initial_value=tf.zeros([1, self.gen_input_dim]), trainable=True, name="ano_z")
with tf.variable_scope("GAN", reuse=True):
self.ano_gan_net_G, self.ano_gan_net_D = self.init_ano_gan_network(x=self.x, y=self.y, z=self.ano_z)
ano_gan_G, ano_gan_D, ano_gan_D_features = self.ano_gan_outputs()
# reconstruction loss: generate synthetic data in original
# feature space that is close to input data
self.ano_gan_loss_R = tf.reduce_sum(tf.abs(tf.subtract(self.x, ano_gan_G)))
# ano_gan_loss_R = tf.nn.l2_loss(tf.subtract(self.x, ano_gan_G))
# discrimination loss: encourage generated data to be
# similar to real data
self.ano_gan_loss_D = tf.reduce_sum(-tf.log(tf.nn.sigmoid(ano_gan_D)))
self.ano_gan_info_loss = tf.constant(0.0)
if self.info_gan:
# apply appropriate variable scope for reuse
with tf.variable_scope("InfoGAN"):
# The last-but-one layer of the discriminator will be the input to
# category prediction layer. The expectation is w.r.t generator output.
self.ano_gan_q_network = self.init_info_gan_network(ano_gan_D_features, reuse=True)
# Compute the InfoGAN entropy regularization loss for
# AnoGAN with the output of ano_gan_q_network
self.ano_gan_info_loss = self.marginal_mutual_info(get_nn_layer(self.ano_gan_q_network,
layer_from_top=1),
self.pvals)
self.ano_gan_loss = (1 - self.ano_gan_lambda) * self.ano_gan_loss_R + \
self.ano_gan_lambda * (self.ano_gan_loss_D + self.ano_gan_info_loss)
self.ano_gan_training_op = self.training_op(self.ano_gan_loss, var_list=[self.ano_z], use_adam=self.use_adam)
def marginal_mutual_info(self, q_c_x, c, include_h_c=False):
""" Compute avg. entropy of probability distributions arcoss all rows of q_c_x
Each row of q_c_x contains one probability distribution (likely computed with softmax)
"""
mi = -tf.reduce_mean(tf.reduce_sum(tf.multiply(c, tf.log(q_c_x + TINY)), axis=1))
if include_h_c:
# usually this is constant; hence add this only if asked for
mi += -tf.reduce_mean(tf.reduce_sum(c * tf.log(c + TINY), axis=1))
return mi
def get_l2_regularizers(self, g_params, d_params, q_params=None):
""" Returns L2 regularizers for generator and discriminator variables
:param g_params: list of tf.Variable
The generator parameters
:param d_params: list of tf.Variable
The discriminator parameters
:param q_params: list of tf.Variable
The InfoGAN regularization parameters
:return: generator, discriminator, InfoGAN L2 regularizer losses
"""
l2_loss_g = 0.0
l2_loss_d = 0.0
l2_loss_q = 0.0
for v in g_params:
l2_loss_g += tf.nn.l2_loss(v)
for v in d_params:
l2_loss_d += tf.nn.l2_loss(v)
if q_params is not None:
for v in q_params:
l2_loss_q += tf.nn.l2_loss(v)
return l2_loss_g, l2_loss_d, l2_loss_q
def generator(self, z, y=None, reuse_gen=False):
inp = z
if y is not None:
inp = tf.concat(values=[z, y], axis=1)
with tf.variable_scope('G'):
gen_layer_names = ["g_%d" % (i+1) for i in range(len(self.gen_layer_nodes))]
gen = self.gan_construct(inp, self.gen_layer_nodes, names=gen_layer_names,
activations=self.gen_layer_activations, reuse=reuse_gen)
return gen
def discriminator(self, x, y=None, reuse_discr=False, prep_gen_input=True):
""" Prepares the discriminator network
Note: Assumes that the generator network has already been created so that it
can be reused. The discriminator network is reused if reuse_discr=True.
:param x: np.ndarray
:param y: np.ndarray
TensorFlow Variable that expects one-hot encoded labels
:param reuse_discr: bool
Whether to reuse previously declared discriminator variables in the scope
:param prep_gen_input: bool
Whether to return the network that takes generator output as input to discriminator
:return: tf.Variable, tf.Variable
"""
with tf.variable_scope('D'):
discr_layer_names = ["d_%d" % (i+1) for i in range(len(self.discr_layer_nodes))]
inp = x if y is None else tf.concat(values=[x, y], axis=1)
discr_data = self.gan_construct(inp, self.discr_layer_nodes, names=discr_layer_names,
activations=self.discr_layer_activations, reuse=reuse_discr)
discr_gen = None
if prep_gen_input:
# the discriminator's loss for the generated data needs to back-propagate through
# the same network as that for the real data; hence reuse_discr=True
gen_out = get_nn_layer(self.gen, layer_from_top=1)
inp = gen_out if y is None else tf.concat(values=[gen_out, y], axis=1)
discr_gen = self.gan_construct(inp, self.discr_layer_nodes, names=discr_layer_names,
activations=self.discr_layer_activations, reuse=True)
return discr_data, discr_gen
def init_info_gan_network(self, x, reuse=False):
return self.gan_construct(x, n_neurons=[self.n_classes], names=["q_out"],
activations=[tf.nn.softmax], reuse=reuse)
def init_session(self):
self.session = tf.Session()
init = tf.global_variables_initializer()
self.session.run(init)
def training_op(self, loss, var_list=None, use_adam=False):
if use_adam:
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
else:
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(self.learning_rate, global_step,
200, 0.96, staircase=True)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
return optimizer.minimize(loss, var_list=var_list)
def init_ano_gan_network(self, x=None, y=None, z=None):
# here we assume that all networks have already been created
# and hence we will set reuse=True.
# Might be redundant if reuse=True before entering this method.
ano_gan_net_G = self.generator(z=z, y=y, reuse_gen=True)
ano_gan_G = ano_gan_net_G[len(ano_gan_net_G) - 1]
ano_gan_net_D, _ = self.discriminator(x=ano_gan_G, y=y, reuse_discr=True, prep_gen_input=False)
return ano_gan_net_G, ano_gan_net_D
def ano_gan_outputs(self):
""" Returns layers of generator and discrminator which will be used by AnoGAN
Returns the last layers of discriminator and generator,
and last-but-one of discriminator. The last-but-one layer of
discriminator is used for the entropy regularization if the GAN is InfoGAN variety.
"""
return self.ano_gan_net_G[len(self.ano_gan_net_G) - 1], \
self.ano_gan_net_D[len(self.ano_gan_net_D) - 1], \
self.ano_gan_net_D[len(self.ano_gan_net_D) - 2] if self.info_gan else None
def get_gen_input_samples(self, n=1, gen_y=False):
if gen_y and self.pvals is None:
raise ValueError("pvals is required")
y = None
if gen_y:
y = np.random.multinomial(1, pvals=self.pvals, size=n).astype(float)
return np.random.uniform(low=self.unif_lo, high=self.unif_hi, size=(n, self.gen_input_dim)), y
def get_gen_output_samples(self, z, y=None):
feed_dict = {self.z: z}
if self.conditional: feed_dict.update({self.y: y})
x = self.session.run([get_nn_layer(self.gen, layer_from_top=1)], feed_dict=feed_dict)[0]
return x
def gan_layer(self, x, n_neurons, name, activation=None, reuse=False):
with tf.variable_scope(name, reuse=reuse):
n_inputs = int(x.get_shape()[1])
stddev = 2. / np.sqrt(n_inputs)
init = tf.truncated_normal((n_inputs, n_neurons), stddev=stddev)
W = tf.get_variable("W", initializer=init)
b = tf.get_variable("b", initializer=tf.zeros([n_neurons]))
Z = tf.matmul(x, W) + b
if activation is not None:
return activation(Z)
else:
return Z
def gan_construct(self, x, n_neurons, names, activations, reuse=False):
layer_input = x
layers = list()
for i, name in enumerate(names):
hidden = self.gan_layer(layer_input, n_neurons=n_neurons[i], name=names[i],
activation=activations[i], reuse=reuse)
layers.append(hidden)
layer_input = hidden
return layers
def fit(self, x, y=None):
if self.session is None:
self.init_session()
fit_tm = Timer()
for epoch in range(self.n_epochs):
tm = Timer()
i = 0
for x_batch, y_batch in get_train_batches(x, y=y, batch_size=self.batch_size, shuffle=self.shuffle):
# for the discriminator, use the true y labels
z, _ = self.get_gen_input_samples(n=x_batch.shape[0], gen_y=False)
feed_dict_discr = {self.x: x_batch, self.z: z}
if self.conditional: feed_dict_discr.update({self.y: y_batch})
self.session.run([self.discr_training_op], feed_dict=feed_dict_discr)
if i % 1 == 0: # train gen_loss only half as frequently as discr_loss
# z, y_ = self.get_gen_input_samples(n=x_batch.shape[0], gen_y=False)
feed_dict_gen = {self.z: z}
if self.conditional: feed_dict_gen.update({self.y: y_batch})
self.session.run([self.gen_training_op], feed_dict=feed_dict_gen)
i += 1
if self.listener is not None:
self.listener(self, epoch=epoch, epoch_start_tm=tm)
logger.debug(fit_tm.message("GAN fitted (max epochs: %d)" % self.n_epochs))
def get_discriminator_probability(self, x, y=None):
""" Returns the probability of the input under the current discriminator model
:param x: np.ndarray
:param y: np.array
This is like a list of integers. Should contain the class labels (*not* one-hot-encoded).
:return: np.array
Probability of each input data
"""
discr_data_out = get_nn_layer(self.discr_data, layer_from_top=1)
if not self.conditional:
feed_dict_discr = {self.x: x}
probs = self.session.run([discr_data_out], feed_dict=feed_dict_discr)[0]
probs = probs.reshape(-1)
else:
feed_dict_discr = {self.x: x}
if y is not None:
y_one_hot = np.zeros(shape=(x.shape[0], self.n_classes), dtype=np.float32)
for i, c in enumerate(y):
y_one_hot[i, c] = 1.
feed_dict_discr.update({self.y: y_one_hot})
probs = self.session.run([discr_data_out], feed_dict=feed_dict_discr)[0]
probs = probs.reshape(-1)
else:
# marginalize over all classes
probs = np.zeros(x.shape[0], dtype=np.float32)
for c in range(self.n_classes):
y_one_hot = np.zeros(shape=(x.shape[0], self.n_classes), dtype=np.float32)
y_one_hot[:, c] = 1.
feed_dict_discr.update({self.y: y_one_hot})
probs_c = self.session.run([discr_data_out], feed_dict=feed_dict_discr)[0]
probs += self.pvals[c] * probs_c.reshape(-1)
return probs
def get_log_likelihood(self, x, n_samples=None, n_reps=10, gmm_min_k=2, gmm_max_k=10):
""" Returns the avg. and std. dev. of log-likelihood of samples in x under the trained GAN model
This is a simple but rough technique, and might not be very accurate.
In the original GAN paper (Goodfellow et al. 2014), the authors
employed a parzen-windows based technique. The Gaussian Mixture Model
is a coarse approximation to it.
"""
if n_samples is None:
n_samples = x.shape[0]
ll = []
for i in range(n_reps):
z, y = self.get_gen_input_samples(n=n_samples, gen_y=self.conditional)
x_gen = self.get_gen_output_samples(z=z, y=y)
try:
gmm, _, _ = fit_gmm(x_gen, x_gen, min_k=gmm_min_k, max_k=gmm_max_k)
ll.append(np.mean(gmm.score_samples(x)))
except:
logger.warning("Exception in iter %d/%d of gmm: %s" % (i+1, n_reps, str(sys.exc_info()[0])))
ll = np.array(ll, dtype=np.float32)
return np.mean(ll), np.std(ll)
def get_anomaly_score_z(self, x, y_one_hot=None, z=None, ano_gan_lambda=0.1):
""" Get the anomaly score with an initialized z
This corresponds to one back-prop step in AnoGAN for computing
a reconstructed image, for the input test point x, starting from an initial z
:param x: np.ndarray (one row-vector)
Test instance whose image needs to be reconstructed
:param y_one_hot: np.ndarray (one row-vector)
:param z: np.ndarray (one row-vector)
If this is None, a random z will be sampled, else the input z will be use
:param ano_gan_lambda: float
:return: gen_x, ano_z, loss, loss_R, loss_D
gen_x: the reconstructed image for 'x' starting from latent representation 'z'
ano_z: the optimal computed by back-propagation
loss: AnoGAN loss
loss_R: reconstruction loss component of the AnoGAN loss
loss_D: descrimination loss component of the AnoGAN loss
"""
if not self.enable_ano_gan:
raise RuntimeError("AnoGAN not enabled for this network")
if z is None:
z, _ = self.get_gen_input_samples(n=1)
# assign_z = self.ano_z.assign(z)
# self.session.run(assign_z)
# tf.Variable.load() is less expensive than adding new ops nodes to tf.Graph
self.ano_z.load(z, self.session)
ano_gan_G, ano_gan_D, _ = self.ano_gan_outputs()
feed_dict = {self.x: x, self.ano_gan_lambda: ano_gan_lambda}
if self.conditional:
feed_dict.update({self.y: y_one_hot})
self.session.run([self.ano_gan_training_op], feed_dict=feed_dict)
rets = self.session.run([ano_gan_G, self.ano_gan_loss, self.ano_z,
self.ano_gan_loss_R, self.ano_gan_loss_D, self.ano_gan_info_loss], feed_dict=feed_dict)
gen_x = rets[0]
loss = rets[1]
ano_z = rets[2]
loss_R = rets[3]
loss_D = rets[4] + rets[5]
# make z values in [lo, hi]
ano_z = self.clip(ano_z, lo=self.unif_lo, hi=self.unif_hi)
return gen_x, ano_z, loss, loss_R, loss_D
def get_anomaly_score_xy(self, x, y=None, z=None, ano_gan_lambda=0.1, tol=1e-3, max_iters=100):
""" Computes anomaly score per instance and y (if conditional)
:param x: np.ndarray
:param y: int
if y is None, and self.conditional==True, then pvals will be used
:param z: np.ndarray
:param tol: float
:param max_iters: int
:return: gen_x, z, loss, trace
"""
tm = Timer()
y_one_hot = None
if self.conditional:
if y is None:
y_one_hot = np.array(self.pvals, dtype=np.float32).reshape((1, -1))
else:
y_one_hot = np.zeros(shape=(1, self.n_classes), dtype=np.float32)
y_one_hot[0, y] = 1
gen_x, z, loss, loss_R, loss_D = self.get_anomaly_score_z(x, y_one_hot=y_one_hot, z=z, ano_gan_lambda=ano_gan_lambda)
losses = [loss]
losses_R = [loss_R]
losses_D = [loss_D]
trace = []
i = 0
prev_loss = np.inf
while i < max_iters and abs(loss - prev_loss) > tol:
prev_loss = loss
gen_x, z, loss, loss_R, loss_D = self.get_anomaly_score_z(x, y_one_hot=y_one_hot, z=z, ano_gan_lambda=ano_gan_lambda)
losses.append(loss)
losses_R.append(loss_R)
losses_D.append(loss_D)
trace.append(gen_x)
i += 1
logger.debug(tm.message("AnoGAN loss (iters: %d, final loss: %f)" % (i, losses[-1])))
# logger.debug("losses:\n%s" % (str(losses)))
return gen_x, z, loss, loss_R, loss_D, np.vstack(trace)
def clip(self, z, lo, hi):
z = np.minimum(np.maximum(z, lo), hi)
return z
def get_anomaly_score_x(self, x, ano_gan_lambda=0.1, tol=1e-3, max_iters=100, use_loss=True, mode_avg=True):
""" Try each label and return the generated instance with best metrics (loss or distance)
:param x: np.ndarray
:param tol: float
:param max_iters: int
:param use_loss: bool
if use_loss==True, then use the composite loss, else use the
euclidean distance to find best regenerated point when the GAN is conditional
:param mode_avg: bool
If self.conditional==True and mode_avg==True, then soft-membership
as defined by self.pvals will be used instead of individual
one-hot-encoding membership.
:return:
"""
if mode_avg or not self.conditional:
return self.get_anomaly_score_xy(x, y=None, z=None, ano_gan_lambda=ano_gan_lambda,
tol=tol, max_iters=max_iters)
gen_x = z = loss = loss_R = loss_D = trace = None
best_dist = np.inf
best_loss = np.inf
for y in range(self.n_classes):
gen_x_y, z_y, loss_y, loss_R_y, loss_D_y, trace_y = self.get_anomaly_score_xy(x, y=y, z=None,
ano_gan_lambda=ano_gan_lambda,
tol=tol, max_iters=max_iters)
if use_loss:
if loss_y < best_loss:
best_loss = loss_y
gen_x, z, loss, loss_R, loss_D, trace = (gen_x_y, z_y, loss_y, loss_R_y, loss_D_y, trace_y)
else:
dist = np.sum(np.square(np.subtract(x, gen_x_y)))
if dist < best_dist:
best_dist = dist
gen_x, z, loss, loss_R, loss_D, trace = (gen_x_y, z_y, loss_y, loss_R_y, loss_D_y, trace_y)
return gen_x, z, loss, loss_R, loss_D, trace
def get_anomaly_score(self, x, ano_gan_lambda=0.1, tol=1e-3, max_iters=100, use_loss=True, mode_avg=True):
""" Returns the anomaly score of test instance x
:param x: np.ndarray (one row-vector)
:param ano_gan_lambda: float
:param tol: float
loss tolerance to check for termination of back-propagation
steps when computing reconstruction image
:param max_iters: int
:param use_loss: bool
(applies only to conditional GAN and when mode_avg is False, default: True)
If true, then employs the AnoGAN loss when selecting the best category for test instance
:param mode_avg: bool
(applies only to conditional GAN, default: True)
:return:
"""
losses = np.zeros(x.shape[0], dtype=np.float32)
losses_R = np.zeros(x.shape[0], dtype=np.float32)
losses_D = np.zeros(x.shape[0], dtype=np.float32)
traces = []
new_x = np.zeros(shape=x.shape, dtype=x.dtype)
for i in range(x.shape[0]):
gen_x, z, loss, loss_R, loss_D, trace = self.get_anomaly_score_x(x[[i]], ano_gan_lambda=ano_gan_lambda,
tol=tol, max_iters=max_iters,
use_loss=use_loss, mode_avg=mode_avg)
new_x[i, :] = gen_x[0, :]
losses[i] = loss
losses_R[i] = loss_R
losses_D[i] = loss_D
traces.append(trace)
return new_x, losses, losses_R, losses_D, traces
def save_session(self, file_path, overwrite=False):
if tf.train.checkpoint_exists(file_path):
if overwrite:
logger.debug("Overwriting existing checkpoint for prefix %s" % file_path)
else:
logger.debug("Checkpoint already exists for prefix %s" % file_path)
return None
saver = tf.train.Saver()
save_path = saver.save(self.session, file_path)
logger.debug("Saved session to path %s" % save_path)
return save_path
def load_session(self, file_path):
if not tf.train.checkpoint_exists(file_path):
logger.debug("Checkpoint does not exist for prefix %s" % file_path)
return False
if self.session is None:
self.session = tf.Session()
saver = tf.train.Saver()
saver.restore(self.session, file_path)
logger.debug("Loaded saved session from path %s" % file_path)
return True
def close_session(self):
if self.session is not None:
self.session.close()
self.session = None
def get_gan_option_list():
parser = ArgumentParser()
parser.add_argument("--dataset", type=str, default="airline", required=False,
help="Dataset name")
parser.add_argument("--results_dir", action="store", default="./temp",
help="Folder where the generated metrics will be stored")
parser.add_argument("--randseed", action="store", type=int, default=42,
help="Random seed so that results can be replicated")
parser.add_argument("--label_smoothing", action="store_true", default=False,
help="Whether to use one-sided label smoothing")
parser.add_argument("--smoothing_prob", action="store", type=float, default=0.9,
help="Probability to use for one-sided label smoothing")
parser.add_argument("--ano_gan_lambda", action="store", type=float, default=0.1,
help="The AnoGAN penalty term that balances reconstruction loss and discriminative loss")
parser.add_argument("--info_gan", action="store_true", default=False,
help="Whether to use simple GAN or InfoGAN")
parser.add_argument("--info_gan_lambda", action="store", type=float, default=1.0,
help="The InfoGAN penalty term")
parser.add_argument("--conditional", action="store_true", default=False,
help="Whether to use simple GAN or Conditional GAN")
parser.add_argument("--ano_gan", action="store_true", default=False,
help="Whether to enable AnoGAN functionality")
parser.add_argument("--ano_gan_individual", action="store_true", default=False,
help="Whether to use each class individually for Conditional AnoGAN. "
"By default the pval metric will be used instead of one-hot-encoding during test evaluation")
parser.add_argument("--ano_gan_use_dist", action="store_true", default=False,
help="Whether to use euclidean dist-based reconstruction error for Conditional AnoGAN. "
"By default, the composite loss will be used")
parser.add_argument("--n_ano_gan_test", type=int, default=1, required=False,
help="Number of times AnoGAN loss will be computed for each test instance")
parser.add_argument("--budget", type=int, default=1, required=False,
help="Budget for feedback")
parser.add_argument("--n_epochs", type=int, default=200, required=False,
help="Max training epochs")
parser.add_argument("--train_batch_size", type=int, default=25, required=False,
help="Batch size for stochastic gradient descent based training methods")
parser.add_argument("--log_file", type=str, default="", required=False,
help="File path to debug logs")
parser.add_argument("--debug", action="store_true", default=False,
help="Whether to enable output of debug statements")
parser.add_argument("--plot", action="store_true", default=False,
help="Whether to plot figures")
return parser
class GanOpts(object):
def __init__(self, args):
self.dataset = args.dataset
self.results_dir = args.results_dir
self.randseed = args.randseed
self.label_smoothing = args.label_smoothing
self.smoothing_prob = args.smoothing_prob
self.ano_gan_lambda = args.ano_gan_lambda
self.ano_gan_individual = args.ano_gan_individual
self.ano_gan_use_dist = args.ano_gan_use_dist
self.info_gan = args.info_gan
self.info_gan_lambda = args.info_gan_lambda
self.conditional = args.conditional
self.ano_gan = args.ano_gan
self.ano_gan_individual = args.ano_gan_individual
self.ano_gan_use_dist = args.ano_gan_use_dist
self.n_ano_gan_test = args.n_ano_gan_test
self.budget = args.budget
self.n_epochs = args.n_epochs
self.train_batch_size = args.train_batch_size
self.log_file = args.log_file
self.debug = args.debug
self.plot = args.plot
self.k = 0
def get_opts_name_prefix(self):
# ano_gan_sig = "_ano" if self.ano_gan else ""
info_gan_sig = "_info" if self.info_gan else ""
info_gan_lambda_sig = "" if self.info_gan_lambda == 1.0 else "_il%d" % int(self.info_gan_lambda*10)
cond_sig = "_cond" if self.conditional else ""
algo_sig = "%s%s_gan" % (cond_sig, info_gan_sig)
k_sig = "_k%d" % self.k if self.k > 0 else ""
smoothing_sig = "_ls%d" % (int(self.smoothing_prob*10)) if self.label_smoothing else ""
name = "%s%s%s%s%s_%d" % (self.dataset, algo_sig, k_sig, smoothing_sig, info_gan_lambda_sig, self.n_epochs)
return name
def get_alad_metrics_name_prefix(self):
return self.get_opts_name_prefix()
def str_opts(self):
name = self.get_alad_metrics_name_prefix()
s = "%s" % name
return s
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
fastai/torch_core.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/00_torch_core.ipynb (unless otherwise specified).
__all__ = ['progress_bar', 'master_bar', 'subplots', 'show_image', 'show_titled_image', 'show_images', 'ArrayBase',
'ArrayImageBase', 'ArrayImage', 'ArrayImageBW', 'ArrayMask', 'tensor', 'set_seed', 'get_random_states',
'set_random_states', 'no_random', 'unsqueeze', 'unsqueeze_', 'apply', 'maybe_gather', 'to_detach', 'to_half',
'to_float', 'default_device', 'to_device', 'to_cpu', 'to_np', 'to_concat', 'TensorBase', 'TensorImageBase',
'TensorImage', 'TensorImageBW', 'TensorMask', 'TensorFlowField', 'TensorCategory', 'TensorMultiCategory',
'TitledTensorScalar', 'concat', 'Chunks', 'show_title', 'ShowTitle', 'TitledInt', 'TitledFloat', 'TitledStr',
'TitledTuple', 'get_empty_df', 'display_df', 'get_first', 'one_param', 'item_find', 'find_device', 'find_bs',
'np_func', 'Module', 'get_model', 'one_hot', 'one_hot_decode', 'params', 'trainable_params', 'norm_types',
'norm_bias_params', 'batch_to_samples', 'logit', 'num_distrib', 'rank_distrib', 'distrib_barrier',
'base_doc', 'doc', 'nested_reorder', 'make_cross_image', 'show_image_batch', 'requires_grad', 'init_default',
'cond_init', 'apply_leaf', 'apply_init', 'script_use_ctx', 'script_save_ctx', 'script_fwd', 'script_bwd',
'grad_module', 'flatten_check']
# Cell
from .imports import *
from .torch_imports import *
# Cell
#nbdev_comment _all_ = ['progress_bar','master_bar']
# Cell
if torch.cuda.is_available():
if torch.cuda.current_device()==0:
def_gpu = int(os.environ.get('DEFAULT_GPU') or 0)
if torch.cuda.device_count()>=def_gpu: torch.cuda.set_device(def_gpu)
torch.backends.cudnn.benchmark = True
# Cell
@delegates(plt.subplots, keep=True)
def subplots(nrows=1, ncols=1, figsize=None, imsize=3,suptitle=None, **kwargs):
if figsize is None:
h=nrows*imsize if suptitle is None or imsize>2 else nrows*imsize+0.6 #https://github.com/matplotlib/matplotlib/issues/5355
figsize=(ncols*imsize, h)
fig,ax = plt.subplots(nrows, ncols, figsize=figsize, **kwargs)
if suptitle is not None: fig.suptitle(suptitle)
if nrows*ncols==1: ax = array([ax])
return fig,ax
# Cell
def _fig_bounds(x):
r = x//32
return min(5, max(1,r))
# Cell
@delegates(plt.Axes.imshow, keep=True, but=['shape', 'imlim'])
def show_image(im, ax=None, figsize=None, title=None, ctx=None, **kwargs):
"Show a PIL or PyTorch image on `ax`."
# Handle pytorch axis order
if hasattrs(im, ('data','cpu','permute')):
im = im.data.cpu()
if im.shape[0]<5: im=im.permute(1,2,0)
elif not isinstance(im,np.ndarray): im=array(im)
# Handle 1-channel images
if im.shape[-1]==1: im=im[...,0]
ax = ifnone(ax,ctx)
if figsize is None: figsize = (_fig_bounds(im.shape[0]), _fig_bounds(im.shape[1]))
if ax is None: _,ax = plt.subplots(figsize=figsize)
ax.imshow(im, **kwargs)
if title is not None: ax.set_title(title)
ax.axis('off')
return ax
# Cell
@delegates(show_image, keep=True)
def show_titled_image(o, **kwargs):
"Call `show_image` destructuring `o` to `(img,title)`"
show_image(o[0], title=str(o[1]), **kwargs)
# Cell
@delegates(subplots)
def show_images(ims, nrows=1, ncols=None, titles=None, **kwargs):
"Show all images `ims` as subplots with `rows` using `titles`."
if ncols is None: ncols = int(math.ceil(len(ims)/nrows))
if titles is None: titles = [None]*len(ims)
axs = subplots(nrows, ncols, **kwargs)[1].flat
for im,t,ax in zip(ims, titles, axs): show_image(im, ax=ax, title=t)
# Cell
class ArrayBase(ndarray):
"An `ndarray` that can modify casting behavior"
@classmethod
def _before_cast(cls, x): return x if isinstance(x,ndarray) else array(x)
# Cell
class ArrayImageBase(ArrayBase):
"Base class for arrays representing images"
_show_args = {'cmap':'viridis'}
def show(self, ctx=None, **kwargs):
return show_image(self, ctx=ctx, **{**self._show_args, **kwargs})
# Cell
class ArrayImage(ArrayImageBase):
"An array representing an image"
pass
# Cell
class ArrayImageBW(ArrayImage):
"An array representing an image"
_show_args = {'cmap':'Greys'}
# Cell
class ArrayMask(ArrayImageBase):
"An array representing an image mask"
_show_args = {'alpha':0.5, 'cmap':'tab20', 'interpolation':'nearest'}
# Cell
@patch
def __array_eq__(self:Tensor,b):
return torch.equal(self,b) if self.dim() else self==b
# Cell
def _array2tensor(x):
if x.dtype==np.uint16: x = x.astype(np.float32)
# windows default numpy int dytpe is int32, while torch tensor default int dtype is int64
# https://github.com/numpy/numpy/issues/9464
if sys.platform == "win32":
if x.dtype==np.int: x = x.astype(np.int64)
return torch.from_numpy(x)
# Cell
@use_kwargs_dict(dtype=None, device=None, requires_grad=False, pin_memory=False)
def tensor(x, *rest, **kwargs):
"Like `torch.as_tensor`, but handle lists too, and can pass multiple vector elements directly."
if len(rest): x = (x,)+rest
# There was a Pytorch bug in dataloader using num_workers>0. Haven't confirmed if fixed
# if isinstance(x, (tuple,list)) and len(x)==0: return tensor(0)
res = (x if isinstance(x, Tensor)
else torch.tensor(x, **kwargs) if isinstance(x, (tuple,list))
else _array2tensor(x) if isinstance(x, ndarray)
else as_tensor(x.values, **kwargs) if isinstance(x, (pd.Series, pd.DataFrame))
else as_tensor(x, **kwargs) if hasattr(x, '__array__') or is_iter(x)
else _array2tensor(array(x), **kwargs))
if res.dtype is torch.float64: return res.float()
return res
# Cell
def set_seed(s, reproducible=False):
"Set random seed for `random`, `torch`, and `numpy` (where available)"
try: torch.manual_seed(s)
except NameError: pass
try: torch.cuda.manual_seed_all(s)
except NameError: pass
try: np.random.seed(s%(2**32-1))
except NameError: pass
random.seed(s)
if reproducible:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Cell
def get_random_states():
"Gets states for `random`, `torch`, and `numpy` random number generators"
return {'random_state':random.getstate(),
'numpy_state':np.random.get_state(),
'torch_state':torch.get_rng_state(),
'torch_cuda_state':torch.cuda.get_rng_state_all(),
'torch_deterministic':torch.backends.cudnn.deterministic,
'torch_benchmark':torch.backends.cudnn.benchmark}
# Cell
def set_random_states(random_state,numpy_state,torch_state,torch_cuda_state,torch_deterministic,torch_benchmark):
"Set states for `random`, `torch`, and `numpy` random number generators"
random.setstate(random_state)
np.random.set_state(numpy_state)
torch.set_rng_state(torch_state)
torch.cuda.set_rng_state_all(torch_cuda_state)
torch.backends.cudnn.deterministic=torch_deterministic
torch.backends.cudnn.benchmark=torch_benchmark
# Cell
@contextmanager
def no_random(seed=42,reproducible=True):
"Stores and retrieves state of random number generators. Sets random seed for `random`, `torch`, and `numpy`."
states = get_random_states()
set_seed(seed,reproducible=reproducible)
try:
yield #we are managing global variables
finally:
set_random_states(**states)
# Cell
def unsqueeze(x, dim=-1, n=1):
"Same as `torch.unsqueeze` but can add `n` dims"
for _ in range(n): x = x.unsqueeze(dim)
return x
# Cell
def unsqueeze_(x, dim=-1, n=1):
"Same as `torch.unsqueeze_` but can add `n` dims"
for _ in range(n): x.unsqueeze_(dim)
return x
# Cell
def _fa_rebuild_tensor (cls, *args, **kwargs): return cls(torch._utils._rebuild_tensor_v2(*args, **kwargs))
def _fa_rebuild_qtensor(cls, *args, **kwargs): return cls(torch._utils._rebuild_qtensor (*args, **kwargs))
# Cell
def apply(func, x, *args, **kwargs):
"Apply `func` recursively to `x`, passing on args"
if is_listy(x): return type(x)([apply(func, o, *args, **kwargs) for o in x])
if isinstance(x,dict): return {k: apply(func, v, *args, **kwargs) for k,v in x.items()}
res = func(x, *args, **kwargs)
return res if x is None else retain_type(res, x)
# Cell
def maybe_gather(x, axis=0):
"Gather copies of `x` on `axis` (if training is distributed)"
if num_distrib()<=1: return x
ndim = x.ndim
res = [x.new_zeros(*x.shape if ndim > 0 else (1,)) for _ in range(num_distrib())]
torch.distributed.all_gather(res, x.contiguous() if ndim > 0 else x[None])
return torch.cat(res, dim=axis) if ndim > 0 else torch.cat(res, dim=axis).mean()
# Cell
def to_detach(b, cpu=True, gather=True):
"Recursively detach lists of tensors in `b `; put them on the CPU if `cpu=True`."
def _inner(x, cpu=True, gather=True):
if not isinstance(x,Tensor): return x
x = x.detach()
if gather: x = maybe_gather(x)
return x.cpu() if cpu else x
return apply(_inner, b, cpu=cpu, gather=gather)
# Cell
def to_half(b):
"Recursively map lists of tensors in `b ` to FP16."
return apply(lambda x: x.half() if torch.is_floating_point(x) else x, b)
# Cell
def to_float(b):
"Recursively map lists of int tensors in `b ` to float."
return apply(lambda x: x.float() if torch.is_floating_point(x) else x, b)
# Cell
# None: True if available; True: error if not available; False: use CPU
defaults.use_cuda = None
# Cell
def default_device(use_cuda=-1):
"Return or set default device; `use_cuda`: None - CUDA if available; True - error if not available; False - CPU"
if use_cuda != -1: defaults.use_cuda=use_cuda
use = defaults.use_cuda or (torch.cuda.is_available() and defaults.use_cuda is None)
assert torch.cuda.is_available() or not use
return torch.device(torch.cuda.current_device()) if use else torch.device('cpu')
# Cell
def to_device(b, device=None, non_blocking=False):
"Recursively put `b` on `device`."
if defaults.use_cuda==False: device='cpu'
elif device is None: device=default_device()
def _inner(o):
if isinstance(o,Tensor): return o.to(device, non_blocking=non_blocking)
# if hasattr(o, "to_device"): return o.to_device(device)
return o
return apply(_inner, b)
# Cell
def to_cpu(b):
"Recursively map lists of tensors in `b ` to the cpu."
return to_device(b,'cpu')
# Cell
def to_np(x):
"Convert a tensor to a numpy array."
return apply(lambda o: o.data.cpu().numpy(), x)
# Cell
def to_concat(xs, dim=0):
"Concat the element in `xs` (recursively if they are tuples/lists of tensors)"
if not xs: return xs
if is_listy(xs[0]): return type(xs[0])([to_concat([x[i] for x in xs], dim=dim) for i in range_of(xs[0])])
if isinstance(xs[0],dict): return {k: to_concat([x[k] for x in xs], dim=dim) for k in xs[0].keys()}
#We may receive xs that are not concatenable (inputs of a text classifier for instance),
# in this case we return a big list
try: return retain_type(torch.cat(xs, dim=dim), xs[0])
except: return sum([L(retain_type(o_.index_select(dim, tensor(i)).squeeze(dim), xs[0])
for i in range_of(o_)) for o_ in xs], L())
# Cell
@patch
def set_meta(self:Tensor, x, as_copy=False):
"Set all metadata in `__dict__`"
if not hasattr(x,'__dict__'): return
# XXX: change to `deepcopy` once PyTorch 1.7.1 is out, and check nb 23 segmentation fit works
self.__dict__ = copy(x.__dict__) if as_copy else x.__dict__
# Cell
if not hasattr(torch,'as_subclass'): torch.as_subclass = torch.Tensor.as_subclass
# Cell
@patch
def as_subclass(self:Tensor, typ):
"Cast to `typ` and include `__dict__` and meta"
return retain_meta(self, torch.as_subclass(self, typ))
# Cell
def _torch_handled(args, opt, func):
if func not in opt: return False
for oks in opt[func]:
if all(isinstance(arg,ok) for arg,ok in zip(args,oks) if ok): return True
# Cell
# from https://github.com/pytorch/pytorch/blob/13c975684a220ec096216ec6468ccd0dc90ff50a/torch/_tensor.py#L34
def _rebuild_from_type(func, type, args, dict):
ret = func(*args).as_subclass(type)
ret.__dict__ = dict
return ret
# Cell
class TensorBase(Tensor):
"A `Tensor` which support subclass pickling, and maintains metadata when casting or after methods"
debug,_opt = False,defaultdict(list)
def __new__(cls, x, **kwargs):
res = cast(tensor(x), cls)
for k,v in kwargs.items(): setattr(res, k, v)
return res
@classmethod
def _before_cast(cls, x): return tensor(x)
def __repr__(self): return re.sub('tensor', self.__class__.__name__, super().__repr__())
def __reduce_ex__(self,proto):
torch.utils.hooks.warn_if_has_hooks(self)
args = (self.storage(), self.storage_offset(), tuple(self.size()), self.stride())
if self.is_quantized: args = args + (self.q_scale(), self.q_zero_point())
args = args + (self.requires_grad, OrderedDict())
f = torch._utils._rebuild_qtensor if self.is_quantized else torch._utils._rebuild_tensor_v2
return (_rebuild_from_type, (f, type(self), args, self.__dict__))
@classmethod
def register_func(cls, func, *oks): cls._opt[func].append(oks)
def __torch_function__(self, func, types, args=(), kwargs=None):
if self.debug and func.__name__ not in ('__str__','__repr__'): print(func, types, args, kwargs)
convert=False
if _torch_handled(args, self._opt, func): convert,types = type(self),(torch.Tensor,)
res = super().__torch_function__(func, types, args=args, kwargs=kwargs)
if convert: res = convert(res)
if isinstance(res, TensorBase): res.set_meta(self, as_copy=True)
return res
def new_tensor(self, size, dtype=None, device=None, requires_grad=False):
cls = type(self)
return self.as_subclass(Tensor).new_tensor(size, dtype=dtype, device=device, requires_grad=requires_grad).as_subclass(cls)
def new_ones(self, data, dtype=None, device=None, requires_grad=False):
cls = type(self)
return self.as_subclass(Tensor).new_ones(data, dtype=dtype, device=device, requires_grad=requires_grad).as_subclass(cls)
def new(self, x=None):
cls = type(self)
res = self.as_subclass(Tensor).new() if x is None else self.as_subclass(Tensor).new(x)
return res.as_subclass(cls)
def requires_grad_(self, requires_grad=True):
# Workaround https://github.com/pytorch/pytorch/issues/50219
self.requires_grad = requires_grad
return self
# Cell
class TensorImageBase(TensorBase):
_show_args = ArrayImageBase._show_args
def show(self, ctx=None, **kwargs):
return show_image(self, ctx=ctx, **{**self._show_args, **kwargs})
# Cell
class TensorImage(TensorImageBase): pass
# Cell
class TensorImageBW(TensorImage): _show_args = ArrayImageBW._show_args
# Cell
class TensorMask(TensorImageBase):
_show_args = ArrayMask._show_args
def show(self, ctx=None, **kwargs):
codes = getattr(self, 'codes', None)
if codes is not None: kwargs = merge({'vmin': 0, 'vmax': len(codes)}, kwargs)
return super().show(ctx=ctx, **kwargs)
# Cell
for o in Tensor.__getitem__, Tensor.__ne__,Tensor.__eq__,Tensor.add,Tensor.sub,Tensor.mul,Tensor.div,Tensor.__rsub__,Tensor.__radd__,Tensor.matmul,Tensor.bmm:
TensorBase.register_func(o, TensorMask, TensorImageBase)
TensorBase.register_func(o, TensorImageBase, TensorMask)
TensorMask.register_func(torch.einsum, str, TensorImageBase, TensorMask)
TensorMask.register_func(torch.einsum, str, TensorMask, TensorImageBase)
# Cell
class TensorFlowField(TensorBase): pass
TensorImage.register_func(F.grid_sample, TensorImageBase, TensorFlowField)
# Cell
class TensorCategory(TensorBase): pass
TensorBase.register_func(Tensor.__getitem__, TensorImageBase, TensorCategory)
# Cell
class TensorMultiCategory(TensorCategory): pass
# Cell
class TitledTensorScalar(TensorBase):
"A tensor containing a scalar that has a `show` method"
def show(self, **kwargs): show_title(self.item(), **kwargs)
# Cell
@patch
def tensored(self:L):
"`mapped(tensor)`"
return self.map(tensor)
@patch
def stack(self:L, dim=0):
"Same as `torch.stack`"
return torch.stack(list(self.tensored()), dim=dim)
@patch
def cat (self:L, dim=0):
"Same as `torch.cat`"
return torch.cat (list(self.tensored()), dim=dim)
# Cell
def concat(*ls):
"Concatenate tensors, arrays, lists, or tuples"
if not len(ls): return []
it = ls[0]
if isinstance(it,torch.Tensor): res = torch.cat(ls)
elif isinstance(it,ndarray): res = np.concatenate(ls)
else:
res = itertools.chain.from_iterable(map(L,ls))
if isinstance(it,(tuple,list)): res = type(it)(res)
else: res = L(res)
return retain_type(res, it)
# Cell
class Chunks:
"Slice and int indexing into a list of lists"
def __init__(self, chunks, lens=None):
self.chunks = chunks
self.lens = L(map(len,self.chunks) if lens is None else lens)
self.cumlens = np.cumsum(0+self.lens)
self.totlen = self.cumlens[-1]
def __getitem__(self,i):
if isinstance(i,slice): return retain_type(self.getslice(i), old=self.chunks[0])
di,idx = self.doc_idx(i)
return retain_type(self.chunks[di][idx], old=self.chunks[0])
def getslice(self, i):
st_d,st_i = self.doc_idx(ifnone(i.start,0))
en_d,en_i = self.doc_idx(ifnone(i.stop,self.totlen+1))
res = [self.chunks[st_d][st_i:(en_i if st_d==en_d else sys.maxsize)]]
for b in range(st_d+1,en_d): res.append(self.chunks[b])
if st_d!=en_d and en_d<len(self.chunks): res.append(self.chunks[en_d][:en_i])
return concat(*res)
def doc_idx(self, i):
if i<0: i=self.totlen+i # count from end
docidx = np.searchsorted(self.cumlens, i+1)-1
cl = self.cumlens[docidx]
return docidx,i-cl
# Cell
def show_title(o, ax=None, ctx=None, label=None, color='black', **kwargs):
"Set title of `ax` to `o`, or print `o` if `ax` is `None`"
ax = ifnone(ax,ctx)
if ax is None: print(o)
elif hasattr(ax, 'set_title'):
t = ax.title.get_text()
if len(t) > 0: o = t+'\n'+str(o)
ax.set_title(o, color=color)
elif isinstance(ax, pd.Series):
while label in ax: label += '_'
ax = ax.append(pd.Series({label: o}))
return ax
# Cell
class ShowTitle:
"Base class that adds a simple `show`"
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
class TitledInt(Int, ShowTitle):
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
class TitledFloat(Float, ShowTitle):
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
class TitledStr(Str, ShowTitle):
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
class TitledTuple(fastuple, ShowTitle):
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
add_docs(TitledInt, "An `int` with `show`"); add_docs(TitledStr, "An `str` with `show`");
add_docs(TitledFloat, "A `float` with `show`"); add_docs(TitledTuple, "A `fastuple` with `show`")
# Cell
@patch
def truncate(self:TitledStr, n):
"Truncate self to `n`"
words = self.split(' ')[:n]
return TitledStr(' '.join(words))
# Cell
if not hasattr(pd.DataFrame,'_old_init'): pd.DataFrame._old_init = pd.DataFrame.__init__
# Cell
@patch
def __init__(self:pd.DataFrame, data=None, index=None, columns=None, dtype=None, copy=False):
if data is not None and isinstance(data, Tensor): data = to_np(data)
self._old_init(data, index=index, columns=columns, dtype=dtype, copy=copy)
# Cell
def get_empty_df(n):
"Return `n` empty rows of a dataframe"
df = pd.DataFrame(index = range(n))
return [df.iloc[i] for i in range(n)]
# Cell
def display_df(df):
"Display `df` in a notebook or defaults to print"
try: from IPython.display import display, HTML
except: return print(df)
display(HTML(df.to_html()))
# Cell
def get_first(c):
"Get the first element of c, even if c is a dataframe"
return getattr(c, 'iloc', c)[0]
# Cell
def one_param(m):
"First parameter in `m`"
return first(m.parameters())
# Cell
def item_find(x, idx=0):
"Recursively takes the `idx`-th element of `x`"
if is_listy(x): return item_find(x[idx])
if isinstance(x,dict):
key = list(x.keys())[idx] if isinstance(idx, int) else idx
return item_find(x[key])
return x
# Cell
def find_device(b):
"Recursively search the device of `b`."
return item_find(b).device
# Cell
def find_bs(b):
"Recursively search the batch size of `b`."
return item_find(b).shape[0]
# Cell
def np_func(f):
"Convert a function taking and returning numpy arrays to one taking and returning tensors"
def _inner(*args, **kwargs):
nargs = [to_np(arg) if isinstance(arg,Tensor) else arg for arg in args]
return tensor(f(*nargs, **kwargs))
functools.update_wrapper(_inner, f)
return _inner
# Cell
class Module(nn.Module, metaclass=PrePostInitMeta):
"Same as `nn.Module`, but no need for subclasses to call `super().__init__`"
def __pre_init__(self, *args, **kwargs): super().__init__()
def __init__(self): pass
# Cell
from torch.nn.parallel import DistributedDataParallel
# Cell
def get_model(model):
"Return the model maybe wrapped inside `model`."
return model.module if isinstance(model, (DistributedDataParallel, nn.DataParallel)) else model
# Cell
def one_hot(x, c):
"One-hot encode `x` with `c` classes."
res = torch.zeros(c, dtype=torch.uint8)
if isinstance(x, Tensor) and x.numel()>0: res[x] = 1.
else: res[list(L(x, use_list=None))] = 1.
return res
# Cell
def one_hot_decode(x, vocab=None):
return L(vocab[i] if vocab else i for i,x_ in enumerate(x) if x_==1)
# Cell
def params(m):
"Return all parameters of `m`"
return [p for p in m.parameters()]
# Cell
def trainable_params(m):
"Return all trainable parameters of `m`"
return [p for p in m.parameters() if p.requires_grad]
# Cell
norm_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d, nn.LayerNorm)
# Cell
def norm_bias_params(m, with_bias=True):
"Return all bias and BatchNorm parameters"
if isinstance(m, norm_types): return L(m.parameters())
res = L(m.children()).map(norm_bias_params, with_bias=with_bias).concat()
if with_bias and getattr(m, 'bias', None) is not None: res.append(m.bias)
return res
# Cell
def batch_to_samples(b, max_n=10):
"'Transposes' a batch to (at most `max_n`) samples"
if isinstance(b, Tensor): return retain_types(list(b[:max_n]), [b])
else:
res = L(b).map(partial(batch_to_samples,max_n=max_n))
return retain_types(res.zip(), [b])
# Cell
@patch
def interp_1d(x:Tensor, xp, fp):
"Same as `np.interp`"
slopes = (fp[1:]-fp[:-1])/(xp[1:]-xp[:-1])
incx = fp[:-1] - (slopes*xp[:-1])
locs = (x[:,None]>=xp[None,:]).long().sum(1)-1
locs = locs.clamp(0,len(slopes)-1)
return slopes[locs]*x + incx[locs]
# Cell
@patch
def pca(x:Tensor, k=2):
"Compute PCA of `x` with `k` dimensions."
x = x-torch.mean(x,0)
U,S,V = torch.svd(x.t())
return torch.mm(x,U[:,:k])
# Cell
def logit(x):
"Logit of `x`, clamped to avoid inf."
x = x.clamp(1e-7, 1-1e-7)
return -(1/x-1).log()
# Cell
def num_distrib():
"Return the number of processes in distributed training (if applicable)."
return int(os.environ.get('WORLD_SIZE', 0))
# Cell
def rank_distrib():
"Return the distributed rank of this process (if applicable)."
return int(os.environ.get('RANK', 0))
# Cell
def distrib_barrier():
"Place a synchronization barrier in distributed training"
if num_distrib() > 1 and torch.distributed.is_initialized(): torch.distributed.barrier()
# Cell
# Saving arrays requires pytables - optional dependency
try: import tables
except: pass
# Cell
def _comp_filter(lib='lz4',lvl=3): return tables.Filters(complib=f'blosc:{lib}', complevel=lvl)
# Cell
@patch
def save_array(p:Path, o, complib='lz4', lvl=3):
"Save numpy array to a compressed `pytables` file, using compression level `lvl`"
if isinstance(o,Tensor): o = to_np(o)
with tables.open_file(p, mode='w', filters=_comp_filter(lib=complib,lvl=lvl)) as f: f.create_carray('/', 'data', obj=o)
# Cell
@patch
def load_array(p:Path):
"Save numpy array to a `pytables` file"
with tables.open_file(p, 'r') as f: return f.root.data.read()
# Cell
def base_doc(elt):
"Print a base documentation of `elt`"
name = getattr(elt, '__qualname__', getattr(elt, '__name__', ''))
print(f'{name}{inspect.signature(elt)}\n{inspect.getdoc(elt)}\n')
print('To get a prettier result with hyperlinks to source code and documentation, install nbdev: pip install nbdev')
# Cell
def doc(elt):
"Try to use doc form nbdev and fall back to `base_doc`"
try:
from nbdev.showdoc import doc
doc(elt)
except: base_doc(elt)
# Cell
def nested_reorder(t, idxs):
"Reorder all tensors in `t` using `idxs`"
if isinstance(t, (Tensor,L)): return t[idxs]
elif is_listy(t): return type(t)(nested_reorder(t_, idxs) for t_ in t)
if t is None: return t
raise TypeError(f"Expected tensor, tuple, list or L but got {type(t)}")
# Cell
def make_cross_image(bw=True):
"Create a tensor containing a cross image, either `bw` (True) or color"
if bw:
im = torch.zeros(5,5)
im[2,:] = 1.
im[:,2] = 1.
else:
im = torch.zeros(3,5,5)
im[0,2,:] = 1.
im[1,:,2] = 1.
return im
# Cell
def show_image_batch(b, show=show_titled_image, items=9, cols=3, figsize=None, **kwargs):
"Display batch `b` in a grid of size `items` with `cols` width"
if items<cols: cols=items
rows = (items+cols-1) // cols
if figsize is None: figsize = (cols*3, rows*3)
fig,axs = plt.subplots(rows, cols, figsize=figsize)
for *o,ax in zip(*to_cpu(b), axs.flatten()): show(o, ax=ax, **kwargs)
# Cell
def requires_grad(m):
"Check if the first parameter of `m` requires grad or not"
ps = list(m.parameters())
return ps[0].requires_grad if len(ps)>0 else False
# Cell
def init_default(m, func=nn.init.kaiming_normal_):
"Initialize `m` weights with `func` and set `bias` to 0."
if func:
if hasattr(m, 'weight'): func(m.weight)
if hasattr(m, 'bias') and hasattr(m.bias, 'data'): m.bias.data.fill_(0.)
return m
# Cell
def cond_init(m, func):
"Apply `init_default` to `m` unless it's a batchnorm module"
if (not isinstance(m, norm_types)) and requires_grad(m): init_default(m, func)
# Cell
def apply_leaf(m, f):
"Apply `f` to children of `m`."
c = m.children()
if isinstance(m, nn.Module): f(m)
for l in c: apply_leaf(l,f)
# Cell
def apply_init(m, func=nn.init.kaiming_normal_):
"Initialize all non-batchnorm layers of `m` with `func`."
apply_leaf(m, partial(cond_init, func=func))
# Cell
def script_use_ctx(f):
"Decorator: create jit script and pass everything in `ctx.saved_variables to `f`, after `*args`"
sf = torch.jit.script(f)
def _f(ctx, *args, **kwargs): return sf(*args, *ctx.saved_variables, **kwargs)
return update_wrapper(_f,f)
# Cell
def script_save_ctx(static, *argidx):
"Decorator: create jit script and save args with indices `argidx` using `ctx.save_for_backward`"
def _dec(f):
sf = torch.jit.script(f)
def _f(ctx, *args, **kwargs):
if argidx:
save = [args[o] for o in argidx]
ctx.save_for_backward(*save)
if not argidx: args = [ctx]+args
return sf(*args, **kwargs)
if static: _f = staticmethod(_f)
return update_wrapper(_f,f)
return _dec
# Cell
def script_fwd(*argidx):
"Decorator: create static jit script and save args with indices `argidx` using `ctx.save_for_backward`"
return script_save_ctx(True, *argidx)
# Cell
def script_bwd(f):
"Decorator: create static jit script and pass everything in `ctx.saved_variables to `f`, after `*args`"
return staticmethod(script_use_ctx(f))
# Cell
def grad_module(cls):
"Decorator: convert `cls` into an autograd function"
class _c(nn.Module):
def forward(self, *args, **kwargs): return cls.apply(*args, **kwargs)
return _c
# Comes from 13b_metrics.ipynb, cell
def flatten_check(inp, targ):
"Check that `out` and `targ` have the same number of elements and flatten them."
inp,targ = TensorBase(inp.contiguous()).view(-1),TensorBase(targ.contiguous()).view(-1)
test_eq(len(inp), len(targ))
return inp,targ
|
[] |
[] |
[
"DEFAULT_GPU",
"RANK",
"WORLD_SIZE"
] |
[]
|
["DEFAULT_GPU", "RANK", "WORLD_SIZE"]
|
python
| 3 | 0 | |
tools/harness-automation/autothreadharness/harness_controller.py
|
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import ConfigParser
import logging
import os
import subprocess
import time
from autothreadharness import settings
logger = logging.getLogger(__name__)
HARNESS_SVN_VERSION_R44 = 1471
"""int: this is the first published release that miniweb was removed from Harness"""
def _try_kill(proc):
logger.info('Try kill process')
times = 1
while proc.poll() is None:
proc.kill()
time.sleep(5)
if proc.poll() is not None:
logger.info('Process has been killed')
break
logger.info('Trial %d failed', times)
times += 1
if times > 3:
raise SystemExit()
class HarnessController(object):
"""Harness service control
This controls harness service, including the harness back-end and front-end.
"""
harness = None
"""harness back-end"""
miniweb = None
"""harness front-end"""
def __init__(self, result_dir=None):
self.result_dir = result_dir
self.harness_file = ''
harness_info = ConfigParser.ConfigParser()
harness_info.read('%s\\info.ini' % settings.HARNESS_HOME)
self.version = harness_info.getint('Thread_Harness_Info', 'SVN')
def start(self):
logger.info('Starting harness service')
if self.harness:
logger.warning('Harness already started')
else:
env = dict(os.environ, PYTHONPATH='%s\\Thread_Harness;%s\\ThirdParty\\hsdk-python\\src'
% (settings.HARNESS_HOME, settings.HARNESS_HOME))
self.harness_file = '%s\\harness-%s.log' % (self.result_dir, time.strftime('%Y%m%d%H%M%S'))
with open(self.harness_file, 'w') as harness_out:
self.harness = subprocess.Popen([settings.HARNESS_HOME + '\\Python27\\python.exe',
settings.HARNESS_HOME + '\\Thread_Harness\\Run.py'],
cwd=settings.HARNESS_HOME,
stdout=harness_out,
stderr=harness_out,
env=env)
time.sleep(2)
if self.version >= HARNESS_SVN_VERSION_R44:
return
if self.miniweb:
logger.warning('Miniweb already started')
else:
with open('%s\\miniweb-%s.log' % (self.result_dir, time.strftime('%Y%m%d%H%M%S')), 'w') as miniweb_out:
self.miniweb = subprocess.Popen([settings.HARNESS_HOME + '\\MiniWeb\\miniweb.exe'],
stdout=miniweb_out,
stderr=miniweb_out,
cwd=settings.HARNESS_HOME + '\\MiniWeb')
def stop(self):
logger.info('Stopping harness service')
if self.harness:
_try_kill(self.harness)
self.harness = None
else:
logger.warning('Harness not started yet')
if self.version >= HARNESS_SVN_VERSION_R44:
return
if self.miniweb:
_try_kill(self.miniweb)
self.miniweb = None
else:
logger.warning('Miniweb not started yet')
def tail(self):
with open(self.harness_file) as harness_out:
harness_out.seek(-100, 2)
return ''.join(harness_out.readlines())
def __del__(self):
self.stop()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/spinn_front_end_common/abstract_models/abstract_provides_outgoing_partition_constraints.py
|
from six import add_metaclass
from abc import ABCMeta
from abc import abstractmethod
@add_metaclass(ABCMeta)
class AbstractProvidesOutgoingPartitionConstraints(object):
""" A vertex that can provide constraints for its outgoing partitioned\
edges
"""
@abstractmethod
def get_outgoing_partition_constraints(self, partition, graph_mapper):
""" Get constraints to be added to the given edge that comes out of\
this vertex
:param partition: An edge that comes out of this vertex
:type partitioned_edge:\
:py:class:`pacman.model.partitioned_graph.partitioned_edge.PartitionedEdge`
:param graph_mapper: A mapper between the partitioned edge and the \
associated partitionable edge
:type graph_mapper:\
:py:class:`pacman.model.graph_mapper.graph_mapper.GraphMapper`
:return: A list of constraints
:rtype: list of\
:py:class:`pacman.model.constraints.abstract_constraint.AbstractConstraint`
"""
pass
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
example/object/put_with_timeout.go
|
package main
import (
"context"
"fmt"
"net/url"
"os"
"time"
"net/http"
"github.com/tencentyun/cos-go-sdk-v5"
"github.com/tencentyun/cos-go-sdk-v5/debug"
)
func log_status(err error) {
if err == nil {
return
}
if cos.IsNotFoundError(err) {
// WARN
fmt.Println("WARN: Resource is not existed")
} else if e, ok := cos.IsCOSError(err); ok {
fmt.Printf("ERROR: Code: %v\n", e.Code)
fmt.Printf("ERROR: Message: %v\n", e.Message)
fmt.Printf("ERROR: Resource: %v\n", e.Resource)
fmt.Printf("ERROR: RequestId: %v\n", e.RequestID)
// ERROR
} else {
fmt.Printf("ERROR: %v\n", err)
// ERROR
}
}
func main() {
u, _ := url.Parse("https://test-1259654469.cos.ap-guangzhou.myqcloud.com")
b := &cos.BaseURL{BucketURL: u}
c := cos.NewClient(b, &http.Client{
Transport: &cos.AuthorizationTransport{
SecretID: os.Getenv("COS_SECRETID"),
SecretKey: os.Getenv("COS_SECRETKEY"),
Transport: &debug.DebugRequestTransport{
RequestHeader: true,
// Notice when put a large file and set need the request body, might happend out of memory error.
RequestBody: false,
ResponseHeader: true,
ResponseBody: false,
},
},
Timeout: 5 * time.Second, // HTTP超时时间
})
// Case1 上传对象
name := "test/example"
// Case3 通过本地文件上传对象
ctx, _ := context.WithTimeout(context.Background(), 2*time.Second) // context超时时间
_, err := c.Object.PutFromFile(ctx, name, "./test", nil) // 请求的超时时间为 min{context超时时间, HTTP超时时间}
log_status(err)
}
|
[
"\"COS_SECRETID\"",
"\"COS_SECRETKEY\""
] |
[] |
[
"COS_SECRETKEY",
"COS_SECRETID"
] |
[]
|
["COS_SECRETKEY", "COS_SECRETID"]
|
go
| 2 | 0 | |
build/gyp_crashpad_android.py
|
#!/usr/bin/env python
# coding: utf-8
# Copyright 2017 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import gyp_crashpad
import os
import re
import subprocess
import sys
def main(args):
parser = argparse.ArgumentParser(
description='Set up an Android cross build',
epilog='Additional arguments will be passed to gyp_crashpad.py.')
parser.add_argument('--ndk', required=True, help='Standalone NDK toolchain')
parser.add_argument('--compiler',
default='clang',
choices=('clang', 'gcc'),
help='The compiler to use, clang by default')
(parsed, extra_command_line_args) = parser.parse_known_args(args)
NDK_ERROR=(
'NDK must be a valid standalone NDK toolchain.\n' +
'See https://developer.android.com/ndk/guides/standalone_toolchain.html')
arch_dirs = glob.glob(os.path.join(parsed.ndk, '*-linux-android*'))
if len(arch_dirs) != 1:
parser.error(NDK_ERROR)
arch_triplet = os.path.basename(arch_dirs[0])
ARCH_TRIPLET_TO_ARCH = {
'arm-linux-androideabi': 'arm',
'aarch64-linux-android': 'arm64',
'i686-linux-android': 'ia32',
'x86_64-linux-android': 'x64',
'mipsel-linux-android': 'mips',
'mips64el-linux-android': 'mips64',
}
if arch_triplet not in ARCH_TRIPLET_TO_ARCH:
parser.error(NDK_ERROR)
arch = ARCH_TRIPLET_TO_ARCH[arch_triplet]
ndk_bin_dir = os.path.join(parsed.ndk, 'bin')
clang_path = os.path.join(ndk_bin_dir, 'clang')
extra_args = []
if parsed.compiler == 'clang':
os.environ['CC_target'] = clang_path
os.environ['CXX_target'] = os.path.join(ndk_bin_dir, 'clang++')
elif parsed.compiler == 'gcc':
os.environ['CC_target'] = os.path.join(ndk_bin_dir,
'%s-gcc' % arch_triplet)
os.environ['CXX_target'] = os.path.join(ndk_bin_dir,
'%s-g++' % arch_triplet)
# Unlike the Clang build, when using GCC with unified headers, __ANDROID_API__
# isn’t set automatically and must be pushed in to the build. Fish the correct
# value out of the Clang wrapper script. If deprecated headers are in use, the
# Clang wrapper won’t mention __ANDROID_API__, but the standalone toolchain’s
# <android/api-level.h> will #define it for both Clang and GCC.
#
# android_api_level is extracted in this manner even when compiling with Clang
# so that it’s available for use in GYP conditions that need to test the API
# level, but beware that it’ll only be available when unified headers are in
# use.
#
# Unified headers are the way of the future, according to
# https://android.googlesource.com/platform/ndk/+/ndk-r14/CHANGELOG.md and
# https://android.googlesource.com/platform/ndk/+/master/docs/UnifiedHeaders.md.
# Traditional (deprecated) headers have been removed entirely as of NDK r16.
# https://android.googlesource.com/platform/ndk/+/ndk-release-r16/CHANGELOG.md.
with open(clang_path, 'r') as file:
clang_script_contents = file.read()
matches = re.finditer(r'\s-D__ANDROID_API__=([\d]+)\s',
clang_script_contents)
match = next(matches, None)
if match:
android_api = int(match.group(1))
extra_args.extend(['-D', 'android_api_level=%d' % android_api])
if next(matches, None):
raise AssertionError('__ANDROID_API__ defined too many times')
for tool in ('ar', 'nm', 'readelf'):
os.environ['%s_target' % tool.upper()] = (
os.path.join(ndk_bin_dir, '%s-%s' % (arch_triplet, tool)))
return gyp_crashpad.main(
['-D', 'OS=android',
'-D', 'target_arch=%s' % arch,
'-D', 'clang=%d' % (1 if parsed.compiler == 'clang' else 0),
'-f', 'ninja-android'] +
extra_args +
extra_command_line_args)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
[] |
[] |
[
"CC_target",
"CXX_target",
"%s_target' % tool.upper("
] |
[]
|
["CC_target", "CXX_target", "%s_target' % tool.upper("]
|
python
| 3 | 0 | |
src/main/java/com/example/employees/Main.java
|
/* Copyright 2015 Oracle and/or its affiliates. All rights reserved. */
package com.example.employees;
import org.apache.catalina.startup.Tomcat;
//Import the Optional class
import java.util.Optional;
public class Main {
public static final Optional<String> PORT = Optional.ofNullable(System.getenv("PORT"));
//Create a PORT instance variable
public static void main(String[] args) throws Exception {
String contextPath = "/" ;
String appBase = ".";
Tomcat tomcat = new Tomcat();
//Use this PORT variable to set the port on Tomcat instance
tomcat.setPort(Integer.valueOf(PORT.orElse("8080") ));
tomcat.getHost().setAppBase(appBase);
tomcat.addWebapp(contextPath, appBase);
tomcat.start();
tomcat.getServer().await();
}
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
java
| 1 | 0 | |
scripts/grab_missing_object_vals_from_solr.py
|
import sys
py_version = sys.version_info
if py_version.major == 2 and py_version.minor == 7 and py_version.micro > 8:
#disable ssl verification
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
import os
from harvester.post_processing.run_transform_on_couchdb_docs import run_on_couchdb_by_collection
import solr
SOLR_URL = os.environ.get('URL_SOLR_API', None)
SOLR_API_KEY = os.environ.get('SOLR_API_KEY', None)
SOLR = solr.SearchHandler(
solr.Solr(
SOLR_URL,
post_headers={
'X-Authentication-Token': SOLR_API_KEY,
},
),
"/query"
)
def fill_object_values_from_solr(doc):
'''If no object field, try to get from current solr
'''
if 'object' not in doc:
query='harvest_id_s:"{}"'.format(doc['_id'])
msg = "NO OBJECT FOR {}".format(doc['_id'])
resp = SOLR(q=query,
fields='harvest_id_s, reference_image_md5, id, collection_url, reference_image_dimensions',
)
results = resp.results
if results:
solr_doc = results[0]
if 'reference_image_md5' in solr_doc:
doc['object'] = solr_doc['reference_image_md5']
doc['object_dimensions'] = solr_doc['reference_image_dimensions'].split(':')
print "OBJ DIM:{}".format(doc['object_dimensions'])
print 'UPDATING OBJECT -- {}'.format(doc['_id'])
return doc
else:
print 'NOT IN SOLR -- {}'.format(msg)
return None
run_on_couchdb_by_collection(fill_object_values_from_solr,
#collection_key="23066")
collection_key="26094")
|
[] |
[] |
[
"SOLR_API_KEY",
"URL_SOLR_API"
] |
[]
|
["SOLR_API_KEY", "URL_SOLR_API"]
|
python
| 2 | 0 | |
widgets/colorize.go
|
package widgets
import (
"errors"
"github.com/alecthomas/chroma/formatters"
"github.com/alecthomas/chroma/lexers"
"github.com/alecthomas/chroma/styles"
"github.com/rivo/tview"
"github.com/stephane-martin/vssh/textconv"
"io"
"os"
"path/filepath"
"strings"
)
func Colorize(name string, content []byte, out io.Writer) error {
ext := strings.ToLower(filepath.Ext(name))
if ext == ".pdf" {
return textconv.PDFToText(content, out)
} else if ext == ".docx" {
return textconv.ConvertDocx(content, out)
}
if textconv.IsBinary(content) {
return errors.New("looks like binary")
}
lexer := lexers.Match(filepath.Base(name))
if lexer == nil {
_, err := out.Write(content)
return err
}
styleName := os.Getenv("VSSH_THEME")
if styleName == "" {
styleName = "monokai"
}
style := styles.Get(styleName)
if style == nil {
return errors.New("style not found")
}
formatter := formatters.Get("terminal256")
if formatter == nil {
return errors.New("formatter not found")
}
iterator, err := lexer.Tokenise(nil, string(content))
if err != nil {
return err
}
if box, ok := out.(*tview.TextView); ok {
box.SetDynamicColors(true)
out = tview.ANSIWriter(out)
}
return formatter.Format(out, style, iterator)
}
|
[
"\"VSSH_THEME\""
] |
[] |
[
"VSSH_THEME"
] |
[]
|
["VSSH_THEME"]
|
go
| 1 | 0 | |
cloud_deploy_system/cloud_deploy_system/wsgi.py
|
"""
WSGI config for cloud_deploy_system project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cloud_deploy_system.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
server/services/db/database.go
|
package db
import (
"database/sql"
"fmt"
"os"
_ "github.com/lib/pq" // Needed for the postgres driver
)
// DB is the struct that we're going to use to implement all of our
// Datasbase interfaces; All of the methods defined on each of our
// interfaces will be implemented on this DB struct
type DB struct {
*sql.DB
}
// DatabaseManager combines all of the database interactions into one
type DatabaseManager interface {
CardManager
SetManager
}
// New initializes a new postgres database connection and attaches
// said connection to our DB struct, which we can then call all of
// the methods described by the our varies Database interfaces
func New() (*DB, error) {
connStr := fmt.Sprintf(
"user=%s password=%s dbname=%s host=%s port=%s",
os.Getenv("USER_NAME"),
os.Getenv("USER_PASSWORD"),
os.Getenv("DB_NAME"),
os.Getenv("DB_HOST"),
os.Getenv("DB_PORT"),
)
db, err := sql.Open("postgres", connStr)
if err != nil {
return nil, err
}
if err = db.Ping(); err != nil {
return nil, err
}
return &DB{db}, nil
}
|
[
"\"USER_NAME\"",
"\"USER_PASSWORD\"",
"\"DB_NAME\"",
"\"DB_HOST\"",
"\"DB_PORT\""
] |
[] |
[
"USER_PASSWORD",
"DB_HOST",
"DB_PORT",
"DB_NAME",
"USER_NAME"
] |
[]
|
["USER_PASSWORD", "DB_HOST", "DB_PORT", "DB_NAME", "USER_NAME"]
|
go
| 5 | 0 | |
mixer/adapter/kubernetesenv/kubernetesenv.go
|
// Copyright 2017 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// nolint: lll
//go:generate $GOPATH/src/istio.io/istio/bin/mixer_codegen.sh -a mixer/adapter/kubernetesenv/config/config.proto -x "-n kubernetesenv"
//go:generate $GOPATH/src/istio.io/istio/bin/mixer_codegen.sh -t mixer/adapter/kubernetesenv/template/template.proto
// Package kubernetesenv provides functionality to adapt mixer behavior to the
// kubernetes environment. Primarily, it is used to generate values as part
// of Mixer's attribute generation preprocessing phase. These values will be
// transformed into attributes that can be used for subsequent config
// resolution and adapter dispatch and execution.
package kubernetesenv
import (
"context"
"errors"
"fmt"
"net"
"os"
"strings"
"sync"
"time"
"k8s.io/api/core/v1"
k8s "k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth" // needed for auth
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
"istio.io/istio/mixer/adapter/kubernetesenv/config"
ktmpl "istio.io/istio/mixer/adapter/kubernetesenv/template"
"istio.io/istio/mixer/pkg/adapter"
"istio.io/istio/pkg/kube/secretcontroller"
)
const (
// parsing
kubePrefix = "kubernetes://"
// k8s cache invalidation
// TODO: determine a reasonable default
defaultRefreshPeriod = 5 * time.Minute
defaultClusterRegistriesNamespace = "istio-system"
)
var (
conf = &config.Params{
KubeconfigPath: "",
CacheRefreshDuration: defaultRefreshPeriod,
ClusterRegistriesNamespace: "",
}
)
type (
builder struct {
adapterConfig *config.Params
newClientFn clientFactoryFn
sync.Mutex
controllers map[string]cacheController
kubeHandler *handler
}
handler struct {
sync.RWMutex
k8sCache map[string]cacheController
env adapter.Env
params *config.Params
}
// used strictly for testing purposes
clientFactoryFn func(kubeconfigPath string, env adapter.Env) (k8s.Interface, error)
)
// compile-time validation
var _ ktmpl.Handler = &handler{}
var _ ktmpl.HandlerBuilder = &builder{}
// GetInfo returns the Info associated with this adapter implementation.
func GetInfo() adapter.Info {
return adapter.Info{
Name: "kubernetesenv",
Impl: "istio.io/istio/mixer/adapter/kubernetesenv",
Description: "Provides platform specific functionality for the kubernetes environment",
SupportedTemplates: []string{
ktmpl.TemplateName,
},
DefaultConfig: conf,
NewBuilder: func() adapter.HandlerBuilder { return newBuilder(newKubernetesClient) },
}
}
func (b *builder) SetAdapterConfig(c adapter.Config) {
b.adapterConfig = c.(*config.Params)
}
// Validate is responsible for ensuring that all the configuration state given to the builder is
// correct.
func (b *builder) Validate() (ce *adapter.ConfigErrors) {
return
}
func (b *builder) Build(ctx context.Context, env adapter.Env) (adapter.Handler, error) {
paramsProto := b.adapterConfig
var controller cacheController
var controllers = make(map[string]cacheController)
path, exists := os.LookupEnv("KUBECONFIG")
if !exists {
path = paramsProto.KubeconfigPath
}
// only ever build a controller for a config once. this potential blocks
// the Build() for multiple handlers using the same config until the first
// one has synced. This should be OK, as the WaitForCacheSync was meant to
// provide this basic functionality before.
b.Lock()
defer b.Unlock()
_, found := b.controllers[path]
if !found {
clientset, err := b.newClientFn(path, env)
if err != nil {
return nil, fmt.Errorf("could not build kubernetes client: %v", err)
}
controller, err = runNewController(b, clientset, env)
if err != nil {
return nil, fmt.Errorf("could not create new cache controller: %v", err)
}
controllers[path] = controller
b.controllers[path] = controller
} else {
for clusterID := range b.controllers {
controllers[clusterID] = b.controllers[clusterID]
}
}
kubeHandler := handler{
env: env,
k8sCache: controllers,
params: paramsProto,
}
b.kubeHandler = &kubeHandler
if !found {
if err := initMultiClusterSecretController(b, path, env); err != nil {
return nil, fmt.Errorf("could not create remote controllers: %v", err)
}
}
return &kubeHandler, nil
}
func runNewController(b *builder, clientset k8s.Interface, env adapter.Env) (cacheController, error) {
paramsProto := b.adapterConfig
stopChan := make(chan struct{})
refresh := paramsProto.CacheRefreshDuration
controller := newCacheController(clientset, refresh, env, stopChan)
env.ScheduleDaemon(func() { controller.Run(stopChan) })
// ensure that any request is only handled after
// a sync has occurred
env.Logger().Infof("Waiting for kubernetes cache sync...")
if success := cache.WaitForCacheSync(stopChan, controller.HasSynced); !success {
stopChan <- struct{}{}
return nil, errors.New("cache sync failure")
}
env.Logger().Infof("Cache sync successful.")
return controller, nil
}
func newBuilder(clientFactory clientFactoryFn) *builder {
return &builder{
newClientFn: clientFactory,
controllers: make(map[string]cacheController),
adapterConfig: conf,
}
}
func (h *handler) GenerateKubernetesAttributes(ctx context.Context, inst *ktmpl.Instance) (*ktmpl.Output, error) {
out := ktmpl.NewOutput()
if inst.DestinationUid != "" {
if p, found := h.findPod(inst.DestinationUid); found {
h.fillDestinationAttrs(p, inst.DestinationPort, out)
}
} else if inst.DestinationIp != nil && !inst.DestinationIp.IsUnspecified() {
if p, found := h.findPod(inst.DestinationIp.String()); found {
h.fillDestinationAttrs(p, inst.DestinationPort, out)
}
}
if inst.SourceUid != "" {
if p, found := h.findPod(inst.SourceUid); found {
h.fillSourceAttrs(p, out)
}
} else if inst.SourceIp != nil && !inst.SourceIp.IsUnspecified() {
if p, found := h.findPod(inst.SourceIp.String()); found {
h.fillSourceAttrs(p, out)
}
}
return out, nil
}
func (h *handler) Close() error {
return nil
}
func (h *handler) findPod(uid string) (*v1.Pod, bool) {
podKey := keyFromUID(uid)
var found bool
var pod *v1.Pod
h.RLock()
defer h.RUnlock()
for _, controller := range h.k8sCache {
pod, found = controller.Pod(podKey)
if found {
break
}
}
if !found {
h.env.Logger().Debugf("could not find pod for (uid: %s, key: %s)", uid, podKey)
}
return pod, found
}
func keyFromUID(uid string) string {
if ip := net.ParseIP(uid); ip != nil {
return uid
}
fullname := strings.TrimPrefix(uid, kubePrefix)
if strings.Contains(fullname, ".") {
parts := strings.Split(fullname, ".")
if len(parts) == 2 {
return key(parts[1], parts[0])
}
}
return fullname
}
func findContainer(p *v1.Pod, port int64) string {
if port <= 0 {
return ""
}
for _, c := range p.Spec.Containers {
for _, cp := range c.Ports {
if int64(cp.ContainerPort) == port {
return c.Name
}
}
}
return ""
}
func (h *handler) fillDestinationAttrs(p *v1.Pod, port int64, o *ktmpl.Output) {
if len(p.Labels) > 0 {
o.SetDestinationLabels(p.Labels)
}
if len(p.Name) > 0 {
o.SetDestinationPodName(p.Name)
}
if len(p.Namespace) > 0 {
o.SetDestinationNamespace(p.Namespace)
}
if len(p.Name) > 0 && len(p.Namespace) > 0 {
o.SetDestinationPodUid(kubePrefix + p.Name + "." + p.Namespace)
}
if len(p.Spec.ServiceAccountName) > 0 {
o.SetDestinationServiceAccountName(p.Spec.ServiceAccountName)
}
if len(p.Status.PodIP) > 0 {
o.SetDestinationPodIp(net.ParseIP(p.Status.PodIP))
}
if len(p.Status.HostIP) > 0 {
o.SetDestinationHostIp(net.ParseIP(p.Status.HostIP))
}
h.RLock()
defer h.RUnlock()
for _, controller := range h.k8sCache {
if wl, found := controller.Workload(p); found {
o.SetDestinationWorkloadUid(wl.uid)
o.SetDestinationWorkloadName(wl.name)
o.SetDestinationWorkloadNamespace(wl.namespace)
if len(wl.selfLinkURL) > 0 {
o.SetDestinationOwner(wl.selfLinkURL)
}
break
}
}
if cn := findContainer(p, port); cn != "" {
o.SetDestinationContainerName(cn)
}
}
func (h *handler) fillSourceAttrs(p *v1.Pod, o *ktmpl.Output) {
if len(p.Labels) > 0 {
o.SetSourceLabels(p.Labels)
}
if len(p.Name) > 0 {
o.SetSourcePodName(p.Name)
}
if len(p.Namespace) > 0 {
o.SetSourceNamespace(p.Namespace)
}
if len(p.Name) > 0 && len(p.Namespace) > 0 {
o.SetSourcePodUid(kubePrefix + p.Name + "." + p.Namespace)
}
if len(p.Spec.ServiceAccountName) > 0 {
o.SetSourceServiceAccountName(p.Spec.ServiceAccountName)
}
if len(p.Status.PodIP) > 0 {
o.SetSourcePodIp(net.ParseIP(p.Status.PodIP))
}
if len(p.Status.HostIP) > 0 {
o.SetSourceHostIp(net.ParseIP(p.Status.HostIP))
}
h.RLock()
defer h.RUnlock()
for _, controller := range h.k8sCache {
if wl, found := controller.Workload(p); found {
o.SetSourceWorkloadUid(wl.uid)
o.SetSourceWorkloadName(wl.name)
o.SetSourceWorkloadNamespace(wl.namespace)
if len(wl.selfLinkURL) > 0 {
o.SetSourceOwner(wl.selfLinkURL)
}
break
}
}
}
func newKubernetesClient(kubeconfigPath string, env adapter.Env) (k8s.Interface, error) {
env.Logger().Infof("getting kubeconfig from: %#v", kubeconfigPath)
config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
if err != nil || config == nil {
return nil, fmt.Errorf("could not retrieve kubeconfig: %v", err)
}
return k8s.NewForConfig(config)
}
func (b *builder) createCacheController(k8sInterface k8s.Interface, clusterID string) error {
controller, err := runNewController(b, k8sInterface, b.kubeHandler.env)
if err == nil {
b.Lock()
b.controllers[clusterID] = controller
b.Unlock()
b.kubeHandler.Lock()
b.kubeHandler.k8sCache[clusterID] = controller
b.kubeHandler.Unlock()
b.kubeHandler.env.Logger().Infof("created remote controller %s", clusterID)
} else {
b.kubeHandler.env.Logger().Errorf("error on creating remote controller %s err = %v", clusterID, err)
}
return err
}
func (b *builder) deleteCacheController(clusterID string) error {
b.Lock()
delete(b.controllers, clusterID)
b.Unlock()
b.kubeHandler.Lock()
defer b.kubeHandler.Unlock()
b.kubeHandler.k8sCache[clusterID].StopControlChannel()
delete(b.kubeHandler.k8sCache, clusterID)
b.kubeHandler.env.Logger().Infof("deleted remote controller %s", clusterID)
return nil
}
func initMultiClusterSecretController(b *builder, kubeconfig string, env adapter.Env) (err error) {
var clusterNs string
paramsProto := b.adapterConfig
if clusterNs = paramsProto.ClusterRegistriesNamespace; clusterNs == "" {
if clusterNs = os.Getenv("POD_NAMESPACE"); clusterNs == "" {
clusterNs = defaultClusterRegistriesNamespace
}
}
kubeClient, err := b.newClientFn(kubeconfig, env)
if err != nil {
return fmt.Errorf("could not create K8s client: %v", err)
}
err = secretcontroller.StartSecretController(kubeClient, b.createCacheController, b.deleteCacheController, clusterNs)
if err != nil {
return fmt.Errorf("could not start secret controller: %v", err)
}
return nil
}
|
[
"\"POD_NAMESPACE\""
] |
[] |
[
"POD_NAMESPACE"
] |
[]
|
["POD_NAMESPACE"]
|
go
| 1 | 0 | |
scan.py
|
# -*- coding: utf-8 -*-
# Upside Travel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
from urllib.parse import unquote_plus
from distutils.util import strtobool
import boto3
import clamav
import metrics
from common import AV_DEFINITION_S3_BUCKET
from common import AV_DEFINITION_S3_PREFIX
from common import AV_DELETE_INFECTED_FILES
from common import AV_PROCESS_ORIGINAL_VERSION_ONLY
from common import AV_SCAN_START_METADATA
from common import AV_SCAN_START_SNS_ARN
from common import AV_SIGNATURE_METADATA
from common import AV_STATUS_CLEAN
from common import AV_STATUS_INFECTED
from common import AV_STATUS_FAILED
from common import AV_STATUS_METADATA
from common import AV_STATUS_SNS_ARN
from common import AV_STATUS_SNS_PUBLISH_CLEAN
from common import AV_STATUS_SNS_PUBLISH_INFECTED
from common import AV_TIMESTAMP_METADATA
from common import create_dir
from common import get_timestamp
def event_object(event, event_source="s3"):
# SNS events are slightly different
if event_source.upper() == "SNS":
event = json.loads(event["Records"][0]["Sns"]["Message"])
# Break down the record
records = event["Records"]
if len(records) == 0:
raise Exception("No records found in event!")
record = records[0]
s3_obj = record["s3"]
# Get the bucket name
if "bucket" not in s3_obj:
raise Exception("No bucket found in event!")
bucket_name = s3_obj["bucket"].get("name", None)
# Get the key name
if "object" not in s3_obj:
raise Exception("No key found in event!")
key_name = s3_obj["object"].get("key", None)
if key_name:
key_name = unquote_plus(key_name)
# Ensure both bucket and key exist
if (not bucket_name) or (not key_name):
raise Exception("Unable to retrieve object from event.\n{}".format(event))
# Create and return the object
s3 = boto3.resource("s3")
return s3.Object(bucket_name, key_name)
def verify_s3_object_version(s3, s3_object):
# validate that we only process the original version of a file, if asked to do so
# security check to disallow processing of a new (possibly infected) object version
# while a clean initial version is getting processed
# downstream services may consume latest version by mistake and get the infected version instead
bucket_versioning = s3.BucketVersioning(s3_object.bucket_name)
if bucket_versioning.status == "Enabled":
bucket = s3.Bucket(s3_object.bucket_name)
versions = list(bucket.object_versions.filter(Prefix=s3_object.key))
if len(versions) > 1:
raise Exception(
"Detected multiple object versions in %s.%s, aborting processing"
% (s3_object.bucket_name, s3_object.key)
)
else:
# misconfigured bucket, left with no or suspended versioning
raise Exception(
"Object versioning is not enabled in bucket %s" % s3_object.bucket_name
)
def get_local_path(s3_object, local_prefix):
return os.path.join(local_prefix, s3_object.bucket_name, s3_object.key)
def delete_s3_object(s3_object):
try:
s3_object.delete()
except Exception:
raise Exception(
"Failed to delete infected file: %s.%s"
% (s3_object.bucket_name, s3_object.key)
)
else:
print("Infected file deleted: %s.%s" % (s3_object.bucket_name, s3_object.key))
def set_av_metadata(s3_object, scan_result, scan_signature, timestamp):
content_type = s3_object.content_type
metadata = s3_object.metadata
metadata[AV_SIGNATURE_METADATA] = scan_signature
metadata[AV_STATUS_METADATA] = scan_result
metadata[AV_TIMESTAMP_METADATA] = timestamp
s3_object.copy(
{"Bucket": s3_object.bucket_name, "Key": s3_object.key},
ExtraArgs={
"ContentType": content_type,
"Metadata": metadata,
"MetadataDirective": "REPLACE",
},
)
def set_av_tags(s3_client, s3_object, scan_result, scan_signature, timestamp):
curr_tags = s3_client.get_object_tagging(
Bucket=s3_object.bucket_name, Key=s3_object.key
)["TagSet"]
new_tags = copy.copy(curr_tags)
for tag in curr_tags:
if tag["Key"] in [
AV_SIGNATURE_METADATA,
AV_STATUS_METADATA,
AV_TIMESTAMP_METADATA,
]:
new_tags.remove(tag)
new_tags.append({"Key": AV_SIGNATURE_METADATA, "Value": scan_signature})
new_tags.append({"Key": AV_STATUS_METADATA, "Value": scan_result})
new_tags.append({"Key": AV_TIMESTAMP_METADATA, "Value": timestamp})
s3_client.put_object_tagging(
Bucket=s3_object.bucket_name, Key=s3_object.key, Tagging={"TagSet": new_tags}
)
def sns_start_scan(sns_client, s3_object, scan_start_sns_arn, timestamp):
message = {
"bucket": s3_object.bucket_name,
"key": s3_object.key,
"version": s3_object.version_id,
AV_SCAN_START_METADATA: True,
AV_TIMESTAMP_METADATA: timestamp,
}
sns_client.publish(
TargetArn=scan_start_sns_arn,
Message=json.dumps({"default": json.dumps(message)}),
MessageStructure="json",
)
def sns_scan_results(
sns_client, s3_object, sns_arn, scan_result, scan_signature, timestamp
):
# Don't publish if scan_result is CLEAN and CLEAN results should not be published
if scan_result == AV_STATUS_CLEAN and not str_to_bool(AV_STATUS_SNS_PUBLISH_CLEAN):
return
# Don't publish if scan_result is INFECTED and INFECTED results should not be published
if scan_result == AV_STATUS_INFECTED and not str_to_bool(
AV_STATUS_SNS_PUBLISH_INFECTED
):
return
message = {
"bucket": s3_object.bucket_name,
"key": s3_object.key,
"version": s3_object.version_id,
AV_SIGNATURE_METADATA: scan_signature,
AV_STATUS_METADATA: scan_result,
AV_TIMESTAMP_METADATA: get_timestamp(),
}
sns_client.publish(
TargetArn=sns_arn,
Message=json.dumps({"default": json.dumps(message)}),
MessageStructure="json",
MessageAttributes={
AV_STATUS_METADATA: {"DataType": "String", "StringValue": scan_result},
AV_SIGNATURE_METADATA: {
"DataType": "String",
"StringValue": scan_signature,
},
},
)
def download_clamav_databases():
s3_client = boto3.client("s3")
s3 = boto3.resource("s3")
to_download = clamav.update_defs_from_s3(
s3_client, AV_DEFINITION_S3_BUCKET, AV_DEFINITION_S3_PREFIX
)
for download in to_download.values():
s3_path = download["s3_path"]
local_path = download["local_path"]
print("Downloading definition file %s from s3://%s" % (local_path, s3_path))
s3.Bucket(AV_DEFINITION_S3_BUCKET).download_file(s3_path, local_path)
print("Downloading definition file %s complete!" % (local_path))
def remove_file(file_path):
try:
os.remove(file_path)
except OSError:
pass
def publish_results(s3_object, scan_result, scan_signature):
result_time = get_timestamp()
sns_client = boto3.client("sns")
s3_client = boto3.client("s3")
ENV = os.getenv("ENV", "")
# Set the properties on the object with the scan results
if "AV_UPDATE_METADATA" in os.environ:
set_av_metadata(s3_object, scan_result, scan_signature, result_time)
set_av_tags(s3_client, s3_object, scan_result, scan_signature, result_time)
# Publish the scan results
if AV_STATUS_SNS_ARN not in [None, ""]:
sns_scan_results(
sns_client,
s3_object,
AV_STATUS_SNS_ARN,
scan_result,
scan_signature,
result_time,
)
metrics.send(
env=ENV, bucket=s3_object.bucket_name, key=s3_object.key, status=scan_result
)
def lambda_handler(event, context):
s3 = boto3.resource("s3")
sns_client = boto3.client("sns")
# Get some environment variables
EVENT_SOURCE = os.getenv("EVENT_SOURCE", "S3")
start_time = get_timestamp()
print("Script starting at %s\n" % (start_time))
s3_object = event_object(event, event_source=EVENT_SOURCE)
if str_to_bool(AV_PROCESS_ORIGINAL_VERSION_ONLY):
verify_s3_object_version(s3, s3_object)
# Publish the start time of the scan
if AV_SCAN_START_SNS_ARN not in [None, ""]:
start_scan_time = get_timestamp()
sns_start_scan(sns_client, s3_object, AV_SCAN_START_SNS_ARN, start_scan_time)
file_path = get_local_path(s3_object, "/tmp")
create_dir(os.path.dirname(file_path))
try:
s3_object.download_file(file_path)
except OSError as e:
remove_file(file_path)
if e.errno == 28:
print("Ran out of disk space. Scan failed")
publish_results(s3_object, AV_STATUS_FAILED, "File too large to scan")
return
else:
raise
download_clamav_databases()
scan_result, scan_signature = clamav.scan_file(file_path)
print(
"Scan of s3://%s resulted in %s\n"
% (os.path.join(s3_object.bucket_name, s3_object.key), scan_result)
)
publish_results(s3_object, scan_result, scan_signature)
# Delete downloaded file to free up room on re-usable lambda function container
remove_file(file_path)
if str_to_bool(AV_DELETE_INFECTED_FILES) and scan_result == AV_STATUS_INFECTED:
delete_s3_object(s3_object)
stop_scan_time = get_timestamp()
print("Script finished at %s\n" % stop_scan_time)
def str_to_bool(s):
return bool(strtobool(str(s)))
|
[] |
[] |
[
"ENV",
"EVENT_SOURCE"
] |
[]
|
["ENV", "EVENT_SOURCE"]
|
python
| 2 | 0 | |
tests/utils/itutils.go
|
package utils
import (
"bytes"
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"os"
"os/exec"
"strings"
"testing"
"text/template"
"time"
"github.com/ThomasRooney/gexpect"
)
// Deis points to the CLI used to run tests.
var Deis = "deis "
// DeisTestConfig allows tests to be repeated against different
// targets, with different example apps, using specific credentials, and so on.
type DeisTestConfig struct {
AuthKey string
Hosts string
Domain string
SSHKey string
ClusterName string
UserName string
Password string
Email string
ExampleApp string
AppName string
ProcessNum string
ImageID string
Version string
AppUser string
}
// randomApp is used for the test run if DEIS_TEST_APP isn't set
var randomApp = GetRandomApp()
// GetGlobalConfig returns a test configuration object.
func GetGlobalConfig() *DeisTestConfig {
authKey := os.Getenv("DEIS_TEST_AUTH_KEY")
if authKey == "" {
authKey = "deis"
}
hosts := os.Getenv("DEIS_TEST_HOSTS")
if hosts == "" {
hosts = "172.17.8.100"
}
domain := os.Getenv("DEIS_TEST_DOMAIN")
if domain == "" {
domain = "local.deisapp.com"
}
sshKey := os.Getenv("DEIS_TEST_SSH_KEY")
if sshKey == "" {
sshKey = "~/.vagrant.d/insecure_private_key"
}
exampleApp := os.Getenv("DEIS_TEST_APP")
if exampleApp == "" {
exampleApp = randomApp
}
var envCfg = DeisTestConfig{
AuthKey: authKey,
Hosts: hosts,
Domain: domain,
SSHKey: sshKey,
ClusterName: "dev",
UserName: "test",
Password: "asdf1234",
Email: "[email protected]",
ExampleApp: exampleApp,
AppName: "sample",
ProcessNum: "2",
ImageID: "buildtest",
Version: "2",
AppUser: "test1",
}
return &envCfg
}
func doCurl(url string) ([]byte, error) {
response, err := http.Get(url)
defer response.Body.Close()
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(response.Body)
if !strings.Contains(string(body), "Powered by Deis") {
return nil, fmt.Errorf("App not started (%d)", response.StatusCode)
}
return body, nil
}
// Curl connects to a Deis endpoint to see if the example app is running.
func Curl(t *testing.T, params *DeisTestConfig) {
url := "http://" + params.AppName + "." + params.Domain
// FIXME: try the curl a few times
for i := 0; i < 20; i++ {
body, err := doCurl(url)
if err == nil {
fmt.Println(string(body))
return
}
time.Sleep(1 * time.Second)
}
// once more to fail with an error
body, err := doCurl(url)
if err != nil {
t.Fatal(err)
}
fmt.Println(string(body))
}
// AuthCancel tests whether `deis auth:cancel` destroys a user's account.
func AuthCancel(t *testing.T, params *DeisTestConfig) {
fmt.Println("deis auth:cancel")
child, err := gexpect.Spawn(Deis + " auth:cancel")
if err != nil {
t.Fatalf("command not started\n%v", err)
}
fmt.Println("username:")
err = child.Expect("username:")
if err != nil {
t.Fatalf("expect username failed\n%v", err)
}
child.SendLine(params.UserName)
fmt.Print("password:")
err = child.Expect("password:")
if err != nil {
t.Fatalf("expect password failed\n%v", err)
}
child.SendLine(params.Password)
err = child.ExpectRegex("(y/n)")
if err != nil {
t.Fatalf("expect cancel \n%v", err)
}
child.SendLine("y")
err = child.Expect("Account cancelled")
if err != nil {
t.Fatalf("command executiuon failed\n%v", err)
}
child.Close()
}
// AuthPasswd tests whether `deis auth:passwd` updates a user's password.
func AuthPasswd(t *testing.T, params *DeisTestConfig, password string) {
fmt.Println("deis auth:passwd")
child, err := gexpect.Spawn(Deis + " auth:passwd")
if err != nil {
t.Fatalf("command not started\n%v", err)
}
fmt.Println("current password:")
err = child.Expect("current password: ")
if err != nil {
t.Fatalf("expect password failed\n%v", err)
}
child.SendLine(params.Password)
fmt.Println("new password:")
err = child.Expect("new password: ")
if err != nil {
t.Fatalf("expect password failed\n%v", err)
}
child.SendLine(password)
fmt.Println("new password (confirm):")
err = child.Expect("new password (confirm): ")
if err != nil {
t.Fatalf("expect password failed\n%v", err)
}
child.SendLine(password)
err = child.Expect("Password change succeeded")
if err != nil {
t.Fatalf("command executiuon failed\n%v", err)
}
child.Close()
}
// CheckList executes a command and optionally tests whether its output does
// or does not contain a given string.
func CheckList(
t *testing.T, cmd string, params interface{}, contain string, notflag bool) {
var cmdBuf bytes.Buffer
tmpl := template.Must(template.New("cmd").Parse(cmd))
if err := tmpl.Execute(&cmdBuf, params); err != nil {
t.Fatal(err)
}
cmdString := cmdBuf.String()
fmt.Println(cmdString)
var cmdl *exec.Cmd
if strings.Contains(cmd, "cat") {
cmdl = exec.Command("sh", "-c", cmdString)
} else {
cmdl = exec.Command("sh", "-c", Deis+cmdString)
}
stdout, _, err := RunCommandWithStdoutStderr(cmdl)
if err != nil {
t.Fatal(err)
}
if strings.Contains(stdout.String(), contain) == notflag {
if notflag {
t.Fatalf(
"Didn't expect '%s' in command output:\n%v", contain, stdout)
}
t.Fatalf("Expected '%s' in command output:\n%v", contain, stdout)
}
}
// Execute takes command string and parameters required to execute the command,
// a failflag to check whether the command is expected to fail, and an expect
// string to check whether the command has failed according to failflag.
//
// If failflag is true and the command failed, check the stdout and stderr for
// the expect string.
func Execute(t *testing.T, cmd string, params interface{}, failFlag bool, expect string) {
var cmdBuf bytes.Buffer
tmpl := template.Must(template.New("cmd").Parse(cmd))
if err := tmpl.Execute(&cmdBuf, params); err != nil {
t.Fatal(err)
}
cmdString := cmdBuf.String()
fmt.Println(cmdString)
var cmdl *exec.Cmd
if strings.Contains(cmd, "git") {
cmdl = exec.Command("sh", "-c", cmdString)
} else {
cmdl = exec.Command("sh", "-c", Deis+cmdString)
}
switch failFlag {
case true:
if stdout, stderr, err := RunCommandWithStdoutStderr(cmdl); err != nil {
if strings.Contains(stdout.String(), expect) || strings.Contains(stderr.String(), expect) {
fmt.Println("(Error expected...ok)")
} else {
t.Fatal(err)
}
} else {
if strings.Contains(stdout.String(), expect) || strings.Contains(stderr.String(), expect) {
fmt.Println("(Error expected...ok)" + expect)
} else {
t.Fatal(err)
}
}
case false:
if _, _, err := RunCommandWithStdoutStderr(cmdl); err != nil {
t.Fatal(err)
} else {
fmt.Println("ok")
}
}
}
// AppsDestroyTest destroys a Deis app and checks that it was successful.
func AppsDestroyTest(t *testing.T, params *DeisTestConfig) {
cmd := "apps:destroy --app={{.AppName}} --confirm={{.AppName}}"
if err := Chdir(params.ExampleApp); err != nil {
t.Fatal(err)
}
Execute(t, cmd, params, false, "")
if err := Chdir(".."); err != nil {
t.Fatal(err)
}
if err := Rmdir(params.ExampleApp); err != nil {
t.Fatal(err)
}
}
// GetRandomApp returns a known working example app at random for testing.
func GetRandomApp() string {
rand.Seed(int64(time.Now().Unix()))
apps := []string{
"example-clojure-ring",
// "example-dart",
"example-dockerfile-python",
"example-go",
"example-java-jetty",
"example-nodejs-express",
// "example-php",
"example-play",
"example-python-django",
"example-python-flask",
"example-ruby-sinatra",
"example-scala",
}
return apps[rand.Intn(len(apps))]
}
|
[
"\"DEIS_TEST_AUTH_KEY\"",
"\"DEIS_TEST_HOSTS\"",
"\"DEIS_TEST_DOMAIN\"",
"\"DEIS_TEST_SSH_KEY\"",
"\"DEIS_TEST_APP\""
] |
[] |
[
"DEIS_TEST_HOSTS",
"DEIS_TEST_DOMAIN",
"DEIS_TEST_AUTH_KEY",
"DEIS_TEST_APP",
"DEIS_TEST_SSH_KEY"
] |
[]
|
["DEIS_TEST_HOSTS", "DEIS_TEST_DOMAIN", "DEIS_TEST_AUTH_KEY", "DEIS_TEST_APP", "DEIS_TEST_SSH_KEY"]
|
go
| 5 | 0 | |
backend/bleacherstime_33873/settings.py
|
"""
Django settings for bleacherstime_33873 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import io
import environ
import logging
import google.auth
from google.cloud import secretmanager
from google.auth.exceptions import DefaultCredentialsError
from google.api_core.exceptions import PermissionDenied
from modules.manifest import get_modules
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
env_file = os.path.join(BASE_DIR, ".env")
env = environ.Env()
env.read_env(env_file)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
try:
# Pull secrets from Secret Manager
_, project = google.auth.default()
client = secretmanager.SecretManagerServiceClient()
settings_name = os.environ.get("SETTINGS_NAME", "django_settings")
name = client.secret_version_path(project, settings_name, "latest")
payload = client.access_secret_version(name=name).payload.data.decode("UTF-8")
env.read_env(io.StringIO(payload))
except (DefaultCredentialsError, PermissionDenied):
pass
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bleacherstime_33873.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bleacherstime_33873.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# GCP config
GS_BUCKET_NAME = env.str("GS_BUCKET_NAME", "")
if GS_BUCKET_NAME:
DEFAULT_FILE_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
STATICFILES_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
GS_DEFAULT_ACL = "publicRead"
|
[] |
[] |
[
"SETTINGS_NAME"
] |
[]
|
["SETTINGS_NAME"]
|
python
| 1 | 0 | |
examples/pwr_run/checkpointing/debug/ovhd_profile/job28.py
|
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
load_start = time.time()
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.004
args_model = 'densenet169'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_test/' + job_name + '*'
total_epochs = 46
starting_epoch = 0
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '121' in args_model:
base_model = DenseNet121(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '169' in args_model:
base_model = DenseNet169(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '201' in args_model:
base_model = DenseNet201(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
model.add(base_model)
#model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess():
save_start = time.time()
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_test/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
save_time = int(time.time() - save_start)
message = job_name + ' save ' + str(save_time)
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
load_time = int(time.time() - load_start)
if args.resume:
message = job_name + ' load ' + str(load_time)
send_signal.send(args.node, 10002, message)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
sys.exit()
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=1,
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
if not args.resume:
terminateProcess()
|
[] |
[] |
[
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"]
|
python
| 2 | 0 | |
fw_django/wsgi.py
|
"""
WSGI config for fw_django project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fw_django.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
test/integration/crcsuite/crcsuite.go
|
// +build integration
/*
Copyright (C) 2018 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package crcsuite
import (
"crypto/tls"
"fmt"
"github.com/DATA-DOG/godog"
"github.com/DATA-DOG/godog/gherkin"
"io/ioutil"
"net/http"
"os"
"os/user"
"path/filepath"
"runtime"
"strings"
"time"
clicumber "github.com/code-ready/clicumber/testsuite"
"github.com/code-ready/crc/pkg/crc/oc"
)
var (
CRCHome string
CRCBinary string
bundleEmbedded bool
bundleName string
bundleURL string
bundleVersion string
pullSecretFile string
goPath string
)
// FeatureContext defines godog.Suite steps for the test suite.
func FeatureContext(s *godog.Suite) {
// CRC related steps
s.Step(`^removing CRC home directory succeeds$`,
RemoveCRCHome)
s.Step(`^starting CRC with default bundle and default hypervisor (succeeds|fails)$`,
StartCRCWithDefaultBundleAndDefaultHypervisorSucceedsOrFails)
s.Step(`^starting CRC with default bundle and hypervisor "(.*)" (succeeds|fails)$`,
StartCRCWithDefaultBundleAndHypervisorSucceedsOrFails)
s.Step(`^starting CRC with default bundle and nameserver "(.*)" (succeeds|fails)$`,
StartCRCWithDefaultBundleAndNameServerSucceedsOrFails)
s.Step(`^setting config property "(.*)" to value "(.*)" (succeeds|fails)$`,
SetConfigPropertyToValueSucceedsOrFails)
s.Step(`^unsetting config property "(.*)" (succeeds|fails)$`,
UnsetConfigPropertySucceedsOrFails)
s.Step(`^login to the oc cluster (succeeds|fails)$`,
LoginToOcClusterSucceedsOrFails)
s.Step(`^with up to "(\d+)" retries with wait period of "(\d*(?:ms|s|m))" all cluster operators are running$`,
CheckClusterOperatorsWithRetry)
s.Step(`^with up to "(\d+)" retries with wait period of "(\d*(?:ms|s|m))" http response from "(.*)" has status code "(\d+)"$`,
CheckHTTPResponseWithRetry)
s.Step(`^with up to "(\d+)" retries with wait period of "(\d*(?:ms|s|m))" command "(.*)" output (?:should match|matches) "(.*)"$`,
CheckOutputMatchWithRetry)
s.Step(`stdout (?:should contain|contains) "(.*)" if bundle (is|is not) embedded$`,
StdoutContainsIfBundleEmbeddedOrNot)
// CRC file operations
s.Step(`^file "([^"]*)" exists in CRC home folder$`,
FileExistsInCRCHome)
s.Step(`"(JSON|YAML)" config file "(.*)" in CRC home folder (contains|does not contain) key "(.*)" with value matching "(.*)"$`,
ConfigFileInCRCHomeContainsKeyMatchingValue)
s.Step(`"(JSON|YAML)" config file "(.*)" in CRC home folder (contains|does not contain) key "(.*)"$`,
ConfigFileInCRCHomeContainsKey)
s.Step(`removing file "(.*)" from CRC home folder succeeds$`,
DeleteFileFromCRCHome)
s.BeforeSuite(func() {
usr, _ := user.Current()
CRCHome = filepath.Join(usr.HomeDir, ".crc")
// init CRCBinary if no location provided by user
if CRCBinary == "" {
fmt.Println("Expecting the CRC binary to be in $HOME/go/bin.")
usr, _ := user.Current()
CRCBinary = filepath.Join(usr.HomeDir, "go", "bin")
}
// put CRC binary location on top of PATH
path := os.Getenv("PATH")
newPath := fmt.Sprintf("%s%c%s", CRCBinary, os.PathListSeparator, path)
err := os.Setenv("PATH", newPath)
if err != nil {
fmt.Println("Could not put CRC location on top of PATH")
os.Exit(1)
}
if bundleURL == "embedded" {
fmt.Println("Expecting the bundle to be embedded in the CRC binary.")
bundleEmbedded = true
if bundleVersion == "" {
fmt.Println("User must specify --bundle-version if bundle is embedded")
os.Exit(1)
}
// assume default hypervisor
var hypervisor string
switch platform := runtime.GOOS; platform {
case "darwin":
hypervisor = "hyperkit"
case "linux":
hypervisor = "libvirt"
case "windows":
hypervisor = "hyperv"
default:
fmt.Printf("Unsupported OS: %s", platform)
os.Exit(1)
}
bundleName = fmt.Sprintf("crc_%s_%s.crcbundle", hypervisor, bundleVersion)
} else {
bundleEmbedded = false
_, bundleName = filepath.Split(bundleURL)
}
if pullSecretFile == "" {
fmt.Println("User must specify the pull secret file via --pull-secret-file flag.")
os.Exit(1)
}
// remove $HOME/.crc
err = RemoveCRCHome()
if err != nil {
fmt.Println(err)
}
})
s.AfterSuite(func() {
err := DeleteCRC()
if err != nil {
fmt.Printf("Could not delete CRC VM: %s.", err)
}
})
s.BeforeFeature(func(this *gherkin.Feature) {
if bundleEmbedded == false {
if _, err := os.Stat(bundleName); os.IsNotExist(err) {
// Obtain the bundle to current dir
fmt.Println("Obtaining bundle...")
bundle, err := DownloadBundle(bundleURL, ".")
if err != nil {
fmt.Printf("Failed to obtain CRC bundle, %v\n", err)
os.Exit(1)
}
fmt.Println("Using bundle:", bundle)
} else if err != nil {
fmt.Printf("Unexpected error obtaining the bundle %v.\n", bundleName)
os.Exit(1)
} else {
fmt.Println("Using existing bundle:", bundleName)
}
}
})
}
func CheckClusterOperatorsWithRetry(retryCount int, retryWait string) error {
retryDuration, err := time.ParseDuration(retryWait)
if err != nil {
return err
}
ocConfig := oc.UseOCWithConfig("crc")
for i := 0; i < retryCount; i++ {
s, err := oc.GetClusterOperatorStatus(ocConfig)
if err != nil {
return err
}
if s == true {
return nil
}
time.Sleep(retryDuration)
}
return fmt.Errorf("Some cluster operators are still not running.\n")
}
func CheckHTTPResponseWithRetry(retryCount int, retryWait string, address string, expectedStatusCode int) error {
retryDuration, err := time.ParseDuration(retryWait)
if err != nil {
return err
}
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: tr}
var resp *http.Response
for i := 0; i < retryCount; i++ {
resp, err = client.Get(address)
if err != nil {
return err
}
if resp.StatusCode == expectedStatusCode {
return nil
}
time.Sleep(retryDuration)
}
return fmt.Errorf("Got %d as Status Code instead of expected %d.", resp.StatusCode, expectedStatusCode)
}
func CheckOutputMatchWithRetry(retryCount int, retryTime string, command string, expected string) error {
retryDuration, err := time.ParseDuration(retryTime)
if err != nil {
return err
}
var match_err error
for i := 0; i < retryCount; i++ {
exec_err := clicumber.ExecuteCommand(command)
if exec_err == nil {
match_err = clicumber.CommandReturnShouldMatch("stdout", expected)
if match_err == nil {
return nil
}
}
time.Sleep(retryDuration)
}
return match_err
}
func DeleteFileFromCRCHome(fileName string) error {
theFile := filepath.Join(CRCHome, fileName)
if _, err := os.Stat(theFile); os.IsNotExist(err) {
return nil
}
err := clicumber.DeleteFile(theFile)
if err != nil {
fmt.Errorf("Error deleting file %v", theFile)
}
return nil
}
func FileExistsInCRCHome(fileName string) error {
theFile := filepath.Join(CRCHome, fileName)
_, err := os.Stat(theFile)
if os.IsNotExist(err) {
return fmt.Errorf("file %s does not exists, error: %v ", theFile, err)
}
return err
}
func ConfigFileInCRCHomeContainsKeyMatchingValue(format string, configFile string, condition string, keyPath string, expectedValue string) error {
if expectedValue == "current bundle" {
expectedValue = bundleName
}
configPath := filepath.Join(CRCHome, configFile)
config, err := clicumber.GetFileContent(configPath)
if err != nil {
return err
}
keyValue, err := clicumber.GetConfigKeyValue([]byte(config), format, keyPath)
if err != nil {
return err
}
matches, err := clicumber.PerformRegexMatch(expectedValue, keyValue)
if err != nil {
return err
} else if (condition == "contains") && !matches {
return fmt.Errorf("For key '%s' config contains unexpected value '%s'", keyPath, keyValue)
} else if (condition == "does not contain") && matches {
return fmt.Errorf("For key '%s' config contains value '%s', which it should not contain", keyPath, keyValue)
}
return nil
}
func ConfigFileInCRCHomeContainsKey(format string, configFile string, condition string, keyPath string) error {
configPath := filepath.Join(CRCHome, configFile)
config, err := clicumber.GetFileContent(configPath)
if err != nil {
return err
}
keyValue, err := clicumber.GetConfigKeyValue([]byte(config), format, keyPath)
if err != nil {
return err
}
if (condition == "contains") && (keyValue == "<nil>") {
return fmt.Errorf("Config does not contain any value for key %s", keyPath)
} else if (condition == "does not contain") && (keyValue != "<nil>") {
return fmt.Errorf("Config contains key %s with assigned value: %s", keyPath, keyValue)
}
return nil
}
func LoginToOcClusterSucceedsOrFails(expected string) error {
bundle := strings.Split(bundleName, ".crcbundle")[0]
pswdLocation := filepath.Join(CRCHome, "cache", bundle, "kubeadmin-password")
pswd, err := ioutil.ReadFile(pswdLocation)
if err != nil {
return err
}
cmd := fmt.Sprintf("oc login --insecure-skip-tls-verify -u kubeadmin -p %s https://api.crc.testing:6443", pswd)
err = clicumber.ExecuteCommandSucceedsOrFails(cmd, expected)
return err
}
func StartCRCWithDefaultBundleAndDefaultHypervisorSucceedsOrFails(expected string) error {
var cmd string
var extraBundleArgs string
if bundleEmbedded == false {
extraBundleArgs = fmt.Sprintf("-b %s", bundleName)
}
cmd = fmt.Sprintf("crc start -p '%s' %s --log-level debug", pullSecretFile, extraBundleArgs)
err := clicumber.ExecuteCommandSucceedsOrFails(cmd, expected)
return err
}
func StartCRCWithDefaultBundleAndHypervisorSucceedsOrFails(hypervisor string, expected string) error {
var cmd string
var extraBundleArgs string
if bundleEmbedded == false {
extraBundleArgs = fmt.Sprintf("-b %s", bundleName)
}
cmd = fmt.Sprintf("crc start -d %s -p '%s' %s --log-level debug", hypervisor, pullSecretFile, extraBundleArgs)
err := clicumber.ExecuteCommandSucceedsOrFails(cmd, expected)
return err
}
func StartCRCWithDefaultBundleAndNameServerSucceedsOrFails(nameserver string, expected string) error {
var extraBundleArgs string
if bundleEmbedded == false {
extraBundleArgs = fmt.Sprintf("-b %s", bundleName)
}
var cmd string
cmd = fmt.Sprintf("crc start -n %s -p '%s' %s --log-level debug", nameserver, pullSecretFile, extraBundleArgs)
err := clicumber.ExecuteCommandSucceedsOrFails(cmd, expected)
return err
}
func StdoutContainsIfBundleEmbeddedOrNot(value string, expected string) error {
if expected == "is" { // expect embedded
if bundleEmbedded { // really embedded
return clicumber.CommandReturnShouldContain("stdout", value)
} else {
return clicumber.CommandReturnShouldNotContain("stdout", value)
}
} else { // expect not embedded
if !bundleEmbedded { // really not embedded
return clicumber.CommandReturnShouldContain("stdout", value)
} else {
return clicumber.CommandReturnShouldNotContain("stdout", value)
}
}
}
func SetConfigPropertyToValueSucceedsOrFails(property string, value string, expected string) error {
if value == "current bundle" {
if bundleEmbedded {
value = filepath.Join(CRCHome, bundleName)
} else {
value = bundleName
}
}
cmd := "crc config set " + property + " " + value
err := clicumber.ExecuteCommandSucceedsOrFails(cmd, expected)
return err
}
func UnsetConfigPropertySucceedsOrFails(property string, expected string) error {
cmd := "crc config unset " + property
err := clicumber.ExecuteCommandSucceedsOrFails(cmd, expected)
return err
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
pkg/fanal/artifact/remote/git.go
|
package remote
import (
"context"
"io/ioutil"
"net/url"
"os"
git "github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing/transport/http"
"golang.org/x/xerrors"
"github.com/aquasecurity/trivy/pkg/fanal/artifact"
"github.com/aquasecurity/trivy/pkg/fanal/artifact/local"
"github.com/aquasecurity/trivy/pkg/fanal/cache"
"github.com/aquasecurity/trivy/pkg/fanal/types"
)
type Artifact struct {
url string
local artifact.Artifact
}
func NewArtifact(rawurl string, c cache.ArtifactCache, artifactOpt artifact.Option) (
artifact.Artifact, func(), error) {
cleanup := func() {}
u, err := newURL(rawurl)
if err != nil {
return nil, cleanup, err
}
tmpDir, err := ioutil.TempDir("", "fanal-remote")
if err != nil {
return nil, cleanup, err
}
cloneOptions := git.CloneOptions{
URL: u.String(),
Auth: gitAuth(),
Progress: os.Stdout,
Depth: 1,
InsecureSkipTLS: artifactOpt.InsecureSkipTLS,
}
// suppress clone output if noProgress
if artifactOpt.NoProgress {
cloneOptions.Progress = nil
}
_, err = git.PlainClone(tmpDir, false, &cloneOptions)
if err != nil {
return nil, cleanup, xerrors.Errorf("git error: %w", err)
}
cleanup = func() {
_ = os.RemoveAll(tmpDir)
}
art, err := local.NewArtifact(tmpDir, c, artifactOpt)
if err != nil {
return nil, cleanup, xerrors.Errorf("fs artifact: %w", err)
}
return Artifact{
url: rawurl,
local: art,
}, cleanup, nil
}
func (a Artifact) Inspect(ctx context.Context) (types.ArtifactReference, error) {
ref, err := a.local.Inspect(ctx)
if err != nil {
return types.ArtifactReference{}, xerrors.Errorf("remote repository error: %w", err)
}
ref.Name = a.url
ref.Type = types.ArtifactRemoteRepository
return ref, nil
}
func (Artifact) Clean(_ types.ArtifactReference) error {
return nil
}
func newURL(rawurl string) (*url.URL, error) {
u, err := url.Parse(rawurl)
if err != nil {
return nil, xerrors.Errorf("url parse error: %w", err)
}
// "https://" can be omitted
// e.g. github.com/aquasecurity/fanal
if u.Scheme == "" {
u.Scheme = "https"
}
return u, nil
}
// Helper function to check for a GitHub/GitLab token from env vars in order to
// make authenticated requests to access private repos
func gitAuth() *http.BasicAuth {
var auth *http.BasicAuth
// The username can be anything for HTTPS Git operations
gitUsername := "fanal-aquasecurity-scan"
// We first check if a GitHub token was provided
githubToken := os.Getenv("GITHUB_TOKEN")
if githubToken != "" {
auth = &http.BasicAuth{
Username: gitUsername,
Password: githubToken,
}
return auth
}
// Otherwise we check if a GitLab token was provided
gitlabToken := os.Getenv("GITLAB_TOKEN")
if gitlabToken != "" {
auth = &http.BasicAuth{
Username: gitUsername,
Password: gitlabToken,
}
return auth
}
// If no token was provided, we simply return a nil,
// which will make the request to be unauthenticated
return nil
}
|
[
"\"GITHUB_TOKEN\"",
"\"GITLAB_TOKEN\""
] |
[] |
[
"GITHUB_TOKEN",
"GITLAB_TOKEN"
] |
[]
|
["GITHUB_TOKEN", "GITLAB_TOKEN"]
|
go
| 2 | 0 | |
vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"crypto/tls"
"crypto/x509"
"io/ioutil"
"strings"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/storage/storagebackend"
"k8s.io/apiserver/pkg/storage/value"
utilfeature "k8s.io/apiserver/pkg/util/feature"
)
// Backend describes the storage servers, the information here should be enough
// for health validations.
type Backend struct {
// the url of storage backend like: https://etcd.domain:2379
Server string
// the required tls config
TLSConfig *tls.Config
}
// StorageFactory is the interface to locate the storage for a given GroupResource
type StorageFactory interface {
// New finds the storage destination for the given group and resource. It will
// return an error if the group has no storage destination configured.
NewConfig(groupResource schema.GroupResource) (*storagebackend.Config, error)
// ResourcePrefix returns the overridden resource prefix for the GroupResource
// This allows for cohabitation of resources with different native types and provides
// centralized control over the shape of etcd directories
ResourcePrefix(groupResource schema.GroupResource) string
// Backends gets all backends for all registered storage destinations.
// Used for getting all instances for health validations.
Backends() []Backend
}
// DefaultStorageFactory takes a GroupResource and returns back its storage interface. This result includes:
// 1. Merged etcd config, including: auth, server locations, prefixes
// 2. Resource encodings for storage: group,version,kind to store as
// 3. Cohabitating default: some resources like hpa are exposed through multiple APIs. They must agree on 1 and 2
type DefaultStorageFactory struct {
// StorageConfig describes how to create a storage backend in general.
// Its authentication information will be used for every storage.Interface returned.
StorageConfig storagebackend.Config
Overrides map[schema.GroupResource]groupResourceOverrides
DefaultResourcePrefixes map[schema.GroupResource]string
// DefaultMediaType is the media type used to store resources. If it is not set, "application/json" is used.
DefaultMediaType string
// DefaultSerializer is used to create encoders and decoders for the storage.Interface.
DefaultSerializer runtime.StorageSerializer
// ResourceEncodingConfig describes how to encode a particular GroupVersionResource
ResourceEncodingConfig ResourceEncodingConfig
// APIResourceConfigSource indicates whether the *storage* is enabled, NOT the API
// This is discrete from resource enablement because those are separate concerns. How this source is configured
// is left to the caller.
APIResourceConfigSource APIResourceConfigSource
// newStorageCodecFn exists to be overwritten for unit testing.
newStorageCodecFn func(opts StorageCodecConfig) (codec runtime.Codec, err error)
}
type groupResourceOverrides struct {
// etcdLocation contains the list of "special" locations that are used for particular GroupResources
// These are merged on top of the StorageConfig when requesting the storage.Interface for a given GroupResource
etcdLocation []string
// etcdPrefix is the base location for a GroupResource.
etcdPrefix string
// etcdResourcePrefix is the location to use to store a particular type under the `etcdPrefix` location
// If empty, the default mapping is used. If the default mapping doesn't contain an entry, it will use
// the ToLowered name of the resource, not including the group.
etcdResourcePrefix string
// mediaType is the desired serializer to choose. If empty, the default is chosen.
mediaType string
// serializer contains the list of "special" serializers for a GroupResource. Resource=* means for the entire group
serializer runtime.StorageSerializer
// cohabitatingResources keeps track of which resources must be stored together. This happens when we have multiple ways
// of exposing one set of concepts. autoscaling.HPA and extensions.HPA as a for instance
// The order of the slice matters! It is the priority order of lookup for finding a storage location
cohabitatingResources []schema.GroupResource
// encoderDecoratorFn is optional and may wrap the provided encoder prior to being serialized.
encoderDecoratorFn func(runtime.Encoder) runtime.Encoder
// decoderDecoratorFn is optional and may wrap the provided decoders (can add new decoders). The order of
// returned decoders will be priority for attempt to decode.
decoderDecoratorFn func([]runtime.Decoder) []runtime.Decoder
// transformer is optional and shall encrypt that resource at rest.
transformer value.Transformer
// disablePaging will prevent paging on the provided resource.
disablePaging bool
}
// Apply overrides the provided config and options if the override has a value in that position
func (o groupResourceOverrides) Apply(config *storagebackend.Config, options *StorageCodecConfig) {
if len(o.etcdLocation) > 0 {
config.ServerList = o.etcdLocation
}
if len(o.etcdPrefix) > 0 {
config.Prefix = o.etcdPrefix
}
if len(o.mediaType) > 0 {
options.StorageMediaType = o.mediaType
}
if o.serializer != nil {
options.StorageSerializer = o.serializer
}
if o.encoderDecoratorFn != nil {
options.EncoderDecoratorFn = o.encoderDecoratorFn
}
if o.decoderDecoratorFn != nil {
options.DecoderDecoratorFn = o.decoderDecoratorFn
}
if o.transformer != nil {
config.Transformer = o.transformer
}
if o.disablePaging {
config.Paging = false
}
}
var _ StorageFactory = &DefaultStorageFactory{}
const AllResources = "*"
func NewDefaultStorageFactory(config storagebackend.Config, defaultMediaType string, defaultSerializer runtime.StorageSerializer, resourceEncodingConfig ResourceEncodingConfig, resourceConfig APIResourceConfigSource, specialDefaultResourcePrefixes map[schema.GroupResource]string) *DefaultStorageFactory {
config.Paging = utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking)
if len(defaultMediaType) == 0 {
defaultMediaType = runtime.ContentTypeJSON
}
return &DefaultStorageFactory{
StorageConfig: config,
Overrides: map[schema.GroupResource]groupResourceOverrides{},
DefaultMediaType: defaultMediaType,
DefaultSerializer: defaultSerializer,
ResourceEncodingConfig: resourceEncodingConfig,
APIResourceConfigSource: resourceConfig,
DefaultResourcePrefixes: specialDefaultResourcePrefixes,
newStorageCodecFn: NewStorageCodec,
}
}
func (s *DefaultStorageFactory) SetEtcdLocation(groupResource schema.GroupResource, location []string) {
overrides := s.Overrides[groupResource]
overrides.etcdLocation = location
s.Overrides[groupResource] = overrides
}
func (s *DefaultStorageFactory) SetEtcdPrefix(groupResource schema.GroupResource, prefix string) {
overrides := s.Overrides[groupResource]
overrides.etcdPrefix = prefix
s.Overrides[groupResource] = overrides
}
// SetDisableAPIListChunking allows a specific resource to disable paging at the storage layer, to prevent
// exposure of key names in continuations. This may be overriden by feature gates.
func (s *DefaultStorageFactory) SetDisableAPIListChunking(groupResource schema.GroupResource) {
overrides := s.Overrides[groupResource]
overrides.disablePaging = true
s.Overrides[groupResource] = overrides
}
// SetResourceEtcdPrefix sets the prefix for a resource, but not the base-dir. You'll end up in `etcdPrefix/resourceEtcdPrefix`.
func (s *DefaultStorageFactory) SetResourceEtcdPrefix(groupResource schema.GroupResource, prefix string) {
overrides := s.Overrides[groupResource]
overrides.etcdResourcePrefix = prefix
s.Overrides[groupResource] = overrides
}
func (s *DefaultStorageFactory) SetSerializer(groupResource schema.GroupResource, mediaType string, serializer runtime.StorageSerializer) {
overrides := s.Overrides[groupResource]
overrides.mediaType = mediaType
overrides.serializer = serializer
s.Overrides[groupResource] = overrides
}
func (s *DefaultStorageFactory) SetTransformer(groupResource schema.GroupResource, transformer value.Transformer) {
overrides := s.Overrides[groupResource]
overrides.transformer = transformer
s.Overrides[groupResource] = overrides
}
// AddCohabitatingResources links resources together the order of the slice matters! its the priority order of lookup for finding a storage location
func (s *DefaultStorageFactory) AddCohabitatingResources(groupResources ...schema.GroupResource) {
for _, groupResource := range groupResources {
overrides := s.Overrides[groupResource]
overrides.cohabitatingResources = groupResources
s.Overrides[groupResource] = overrides
}
}
func (s *DefaultStorageFactory) AddSerializationChains(encoderDecoratorFn func(runtime.Encoder) runtime.Encoder, decoderDecoratorFn func([]runtime.Decoder) []runtime.Decoder, groupResources ...schema.GroupResource) {
for _, groupResource := range groupResources {
overrides := s.Overrides[groupResource]
overrides.encoderDecoratorFn = encoderDecoratorFn
overrides.decoderDecoratorFn = decoderDecoratorFn
s.Overrides[groupResource] = overrides
}
}
func getAllResourcesAlias(resource schema.GroupResource) schema.GroupResource {
return schema.GroupResource{Group: resource.Group, Resource: AllResources}
}
func (s *DefaultStorageFactory) getStorageGroupResource(groupResource schema.GroupResource) schema.GroupResource {
for _, potentialStorageResource := range s.Overrides[groupResource].cohabitatingResources {
if s.APIResourceConfigSource.AnyVersionOfResourceEnabled(potentialStorageResource) {
return potentialStorageResource
}
}
return groupResource
}
// New finds the storage destination for the given group and resource. It will
// return an error if the group has no storage destination configured.
func (s *DefaultStorageFactory) NewConfig(groupResource schema.GroupResource) (*storagebackend.Config, error) {
chosenStorageResource := s.getStorageGroupResource(groupResource)
// operate on copy
storageConfig := s.StorageConfig
codecConfig := StorageCodecConfig{
StorageMediaType: s.DefaultMediaType,
StorageSerializer: s.DefaultSerializer,
}
if override, ok := s.Overrides[getAllResourcesAlias(chosenStorageResource)]; ok {
override.Apply(&storageConfig, &codecConfig)
}
if override, ok := s.Overrides[chosenStorageResource]; ok {
override.Apply(&storageConfig, &codecConfig)
}
var err error
codecConfig.StorageVersion, err = s.ResourceEncodingConfig.StorageEncodingFor(chosenStorageResource)
if err != nil {
return nil, err
}
codecConfig.MemoryVersion, err = s.ResourceEncodingConfig.InMemoryEncodingFor(groupResource)
if err != nil {
return nil, err
}
codecConfig.Config = storageConfig
storageConfig.Codec, err = s.newStorageCodecFn(codecConfig)
if err != nil {
return nil, err
}
glog.V(3).Infof("storing %v in %v, reading as %v from %#v", groupResource, codecConfig.StorageVersion, codecConfig.MemoryVersion, codecConfig.Config)
return &storageConfig, nil
}
// Backends returns all backends for all registered storage destinations.
// Used for getting all instances for health validations.
func (s *DefaultStorageFactory) Backends() []Backend {
servers := sets.NewString(s.StorageConfig.ServerList...)
for _, overrides := range s.Overrides {
servers.Insert(overrides.etcdLocation...)
}
tlsConfig := &tls.Config{
InsecureSkipVerify: true,
}
if len(s.StorageConfig.CertFile) > 0 && len(s.StorageConfig.KeyFile) > 0 {
cert, err := tls.LoadX509KeyPair(s.StorageConfig.CertFile, s.StorageConfig.KeyFile)
if err != nil {
glog.Errorf("failed to load key pair while getting backends: %s", err)
} else {
tlsConfig.Certificates = []tls.Certificate{cert}
}
}
if len(s.StorageConfig.CAFile) > 0 {
if caCert, err := ioutil.ReadFile(s.StorageConfig.CAFile); err != nil {
glog.Errorf("failed to read ca file while getting backends: %s", err)
} else {
caPool := x509.NewCertPool()
caPool.AppendCertsFromPEM(caCert)
tlsConfig.RootCAs = caPool
tlsConfig.InsecureSkipVerify = false
}
}
backends := []Backend{}
for server := range servers {
backends = append(backends, Backend{
Server: server,
// We can't share TLSConfig across different backends to avoid races.
// For more details see: http://pr.k8s.io/59338
TLSConfig: tlsConfig.Clone(),
})
}
return backends
}
func (s *DefaultStorageFactory) ResourcePrefix(groupResource schema.GroupResource) string {
chosenStorageResource := s.getStorageGroupResource(groupResource)
groupOverride := s.Overrides[getAllResourcesAlias(chosenStorageResource)]
exactResourceOverride := s.Overrides[chosenStorageResource]
etcdResourcePrefix := s.DefaultResourcePrefixes[chosenStorageResource]
if len(groupOverride.etcdResourcePrefix) > 0 {
etcdResourcePrefix = groupOverride.etcdResourcePrefix
}
if len(exactResourceOverride.etcdResourcePrefix) > 0 {
etcdResourcePrefix = exactResourceOverride.etcdResourcePrefix
}
if len(etcdResourcePrefix) == 0 {
etcdResourcePrefix = strings.ToLower(chosenStorageResource.Resource)
}
return etcdResourcePrefix
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jimongit.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
startup/gui/shaderPresets.py
|
##########################################################################
#
# Copyright (c) 2018, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import IECore
import Gaffer
import GafferScene
def __registerShaderPresets( presets ) :
for name, value in presets :
Gaffer.Metadata.registerValue( GafferScene.ShaderTweaks, "shader", "preset:" + name, value )
Gaffer.Metadata.registerValue( GafferScene.ShaderQuery, "shader", "preset:" + name, value )
with IECore.IgnoredExceptions( ImportError ) :
import GafferArnold
__registerShaderPresets( [
( "Arnold Surface", "ai:surface" ),
( "Arnold Displacement", "ai:disp_map" ),
( "Arnold Light", "ai:light" ),
( "Arnold Gobo", "ai:lightFilter:gobo" ),
( "Arnold Decay", "ai:lightFilter:light_decay" ),
( "Arnold Barndoor", "ai:lightFilter:barndoor" ),
( "Arnold Blocker", "ai:lightFilter:filter" )
] )
if os.environ.get( "GAFFERAPPLESEED_HIDE_UI", "" ) != "1" :
with IECore.IgnoredExceptions( ImportError ) :
import GafferAppleseed
__registerShaderPresets( [
( "Appleseed Light", "as:light" ),
] )
with IECore.IgnoredExceptions( ImportError ) :
import GafferOSL
__registerShaderPresets( [
( "OSL Surface", "osl:surface" ),
( "OSL Light", "osl:light" ),
] )
__registerShaderPresets( [ ( "OpenGL Surface", "gl:surface" ) ] )
|
[] |
[] |
[
"GAFFERAPPLESEED_HIDE_UI"
] |
[]
|
["GAFFERAPPLESEED_HIDE_UI"]
|
python
| 1 | 0 | |
src/testcases/CWE789_Uncontrolled_Mem_Alloc/s01/CWE789_Uncontrolled_Mem_Alloc__Environment_ArrayList_41.java
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE789_Uncontrolled_Mem_Alloc__Environment_ArrayList_41.java
Label Definition File: CWE789_Uncontrolled_Mem_Alloc.int.label.xml
Template File: sources-sink-41.tmpl.java
*/
/*
* @description
* CWE: 789 Uncontrolled Memory Allocation
* BadSource: Environment Read data from an environment variable
* GoodSource: A hardcoded non-zero, non-min, non-max, even number
* BadSink: ArrayList Create an ArrayList using data as the initial size
* Flow Variant: 41 Data flow: data passed as an argument from one method to another in the same class
*
* */
package testcases.CWE789_Uncontrolled_Mem_Alloc.s01;
import testcasesupport.*;
import javax.servlet.http.*;
import java.util.logging.Level;
import java.util.ArrayList;
public class CWE789_Uncontrolled_Mem_Alloc__Environment_ArrayList_41 extends AbstractTestCase
{
private void badSink(int data ) throws Throwable
{
/* POTENTIAL FLAW: Create an ArrayList using data as the initial size. data may be very large, creating memory issues */
ArrayList intArrayList = new ArrayList(data);
}
public void bad() throws Throwable
{
int data;
data = Integer.MIN_VALUE; /* Initialize data */
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
{
String stringNumber = System.getenv("ADD");
if (stringNumber != null) // avoid NPD incidental warnings
{
try
{
data = Integer.parseInt(stringNumber.trim());
}
catch(NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
badSink(data );
}
public void good() throws Throwable
{
goodG2B();
}
private void goodG2BSink(int data ) throws Throwable
{
/* POTENTIAL FLAW: Create an ArrayList using data as the initial size. data may be very large, creating memory issues */
ArrayList intArrayList = new ArrayList(data);
}
/* goodG2B() - use goodsource and badsink */
private void goodG2B() throws Throwable
{
int data;
/* FIX: Use a hardcoded number that won't cause underflow, overflow, divide by zero, or loss-of-precision issues */
data = 2;
goodG2BSink(data );
}
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
public static void main(String[] args) throws ClassNotFoundException,
InstantiationException, IllegalAccessException
{
mainFromParent(args);
}
}
|
[
"\"ADD\""
] |
[] |
[
"ADD"
] |
[]
|
["ADD"]
|
java
| 1 | 0 | |
example.py
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example client using IBM Event Streams SDK for Python.
"""
# Code Setup
from typing import Set
from ibm_cloud_sdk_core.authenticators import BasicAuthenticator
from eventstreams_sdk.adminrest_v1 import *
import os
from http import HTTPStatus
SERVICE_NAME = 'adminrest_v1'
KAFKA_ADMIN_URL = os.getenv('KAFKA_ADMIN_URL')
BEARER_TOKEN= os.getenv('BEARER_TOKEN')
API_KEY= os.getenv('API_KEY')
# End Code Setup
# Create Authenticator
if not KAFKA_ADMIN_URL:
print("Please set env KAFKA_ADMIN_URL")
exit(1)
if not API_KEY and not BEARER_TOKEN:
print("Please set either an API_KEY or a BEARER_TOKEN")
exit(1)
if API_KEY and BEARER_TOKEN:
print("Please set either an API_KEY or a BEARER_TOKEN not both")
exit(1)
if API_KEY:
# Create an Basic IAM authenticator.
authenticator = BasicAuthenticator('token', API_KEY)
else :
# Create an IAM Bearer Token authenticator.
authenticator = BasicAuthenticator('token', BEARER_TOKEN)
service = AdminrestV1(
authenticator = authenticator
)
# End Authenticator
# Create Service
base_url = KAFKA_ADMIN_URL
service.set_service_url(base_url)
# End Create Service
def list_topics(service):
# Set up parameter values
topic_filter = ''
# Invoke list method.
try:
response = service.list_topics(
topic_filter=topic_filter,
)
if response.status_code == HTTPStatus.OK:
if not response.result :
print("\tnothing to list")
return
for topic in response.result:
print("\t" + topic["name"])
except:
print("\tError Listing Topics")
# func.end
def create_topic(service,topic_name):
# Set up parameter values
partition_count = 1
configs = []
# Invoke create method.
try:
response = service.create_topic(
name=topic_name,
partition_count=partition_count,
configs=configs,
)
if response.status_code == HTTPStatus.ACCEPTED :
print("\ttopic created: " + topic_name)
except:
print("\tError Creating Topic: " + topic_name)
# func.End
def delete_topic(service,topic_name):
# Lets try to delete it.
try:
response = service.delete_topic(
topic_name,
)
if response.status_code == HTTPStatus.ACCEPTED:
print("\ttopic deleted: "+topic_name)
except:
print("\tError Deleting Topic: " + topic_name)
# func.End
def topic_details(service,topic_name):
# Invoke get method.
try:
response = service.get_topic(
topic_name,
)
if response.status_code == HTTPStatus.OK:
for key, value in response.result.items():
print("\t" +key + ":" + str(value) )
except:
print("\tError Getting Topic Details: " + topic_name)
# func.End
def update_topic(service,topic_name):
# Set up parameter values.
new_total_partition_count = 6
configs = []
# Invoke update method.
try:
response = service.update_topic(
topic_name,
new_total_partition_count=new_total_partition_count,
configs=configs,
)
if response.status_code == HTTPStatus.ACCEPTED :
print("\ttopic updated: "+topic_name)
except:
print("\tError Updating Topic Details: " + topic_name)
# func.End
def get_mirroring_topic_selection(service):
# Invoke get selection method.
try:
response = service.get_mirroring_topic_selection()
if response.status_code == HTTPStatus.OK :
for topic in response.result:
print("\t" + topic["name"])
except:
print("\tError Listing Mirroring Topics:")
# func.End
def get_list_mirroring_active_topics(service):
# Invoke active method.
try:
response = service.get_list_mirroring_active_topics()
if response.status_code == HTTPStatus.OK :
for topic in response.result:
print("\t" + topic["name"])
print("\tactive mirroring topics updated:")
except:
print("\tError Listing Active Mirroring Topics:")
# func.End
def replace_mirroring_topic_selection(service,topic_name):
# Set up parameter values
includes = [topic_name]
# Invoke replace method.
try:
response = service.replace_mirroring_topic_selection(
includes=[topic_name],
)
if response.status_code == HTTPStatus.OK :
print("\tmirroring topic selection updated: "+includes)
except:
print("\tError Replacing Mirroring Topics:")
# func.End
# Start examples.
print("List Topics")
list_topics(service)
print("Create Topic")
create_topic(service,"test-topic")
print("Print Topic Details")
topic_details(service,"test-topic")
print("List Topics")
list_topics(service)
print("Update Topic Details")
update_topic(service,"test-topic")
print("Print Topic Details")
topic_details(service,"test-topic")
# Uncomment these examples if you are running against a Event Streams Mirrored Target Cluster.
# print("List Active Mirroring Topics\n")
# get_list_mirroring_active_topics(service)
# print("Replace Mirroring Topics\n")
# replace_mirroring_topic_selection(service,"test-topic")
# print("List Mirroring Topic Selection\n")
# get_mirroring_topic_selection(service)
print("Delete Topic")
delete_topic(service,"test-topic")
print("List Topics")
list_topics(service)
|
[] |
[] |
[
"API_KEY",
"KAFKA_ADMIN_URL",
"BEARER_TOKEN"
] |
[]
|
["API_KEY", "KAFKA_ADMIN_URL", "BEARER_TOKEN"]
|
python
| 3 | 0 | |
pkg/box/verbs.go
|
package box
// This file implements the business logic related to a black box.
// These functions are usually called from cmd/blackbox/drive.go or
// external sytems that use box as a module.
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"github.com/StackExchange/blackbox/v2/pkg/bbutil"
"github.com/StackExchange/blackbox/v2/pkg/makesafe"
"github.com/olekukonko/tablewriter"
)
// AdminAdd adds admins.
func (bx *Box) AdminAdd(nom string, sdir string) error {
err := bx.getAdmins()
if err != nil {
return err
}
//fmt.Printf("ADMINS=%q\n", bx.Admins)
// Check for duplicates.
if i := sort.SearchStrings(bx.Admins, nom); i < len(bx.Admins) && bx.Admins[i] == nom {
return fmt.Errorf("Admin %v already an admin", nom)
}
bx.logDebug.Printf("ADMIN ADD rbd=%q\n", bx.RepoBaseDir)
changedFiles, err := bx.Crypter.AddNewKey(nom, bx.RepoBaseDir, sdir, bx.ConfigPath)
if err != nil {
return fmt.Errorf("AdminAdd failed AddNewKey: %v", err)
}
// TODO(tlim): Try the json file.
// Try the legacy file:
fn := filepath.Join(bx.ConfigPath, "blackbox-admins.txt")
bx.logDebug.Printf("Admins file: %q", fn)
err = bbutil.AddLinesToSortedFile(fn, nom)
if err != nil {
return fmt.Errorf("could not update file (%q,%q): %v", fn, nom, err)
}
changedFiles = append([]string{fn}, changedFiles...)
bx.Vcs.NeedsCommit("NEW ADMIN: "+nom, bx.RepoBaseDir, changedFiles)
return nil
}
// AdminList lists the admin id's.
func (bx *Box) AdminList() error {
err := bx.getAdmins()
if err != nil {
return err
}
for _, v := range bx.Admins {
fmt.Println(v)
}
return nil
}
// AdminRemove removes an id from the admin list.
func (bx *Box) AdminRemove([]string) error {
return fmt.Errorf("NOT IMPLEMENTED: AdminRemove")
}
// Cat outputs a file, unencrypting if needed.
func (bx *Box) Cat(names []string) error {
if err := anyGpg(names); err != nil {
return fmt.Errorf("cat: %w", err)
}
err := bx.getFiles()
if err != nil {
return err
}
for _, name := range names {
var out []byte
var err error
if _, ok := bx.FilesSet[name]; ok {
out, err = bx.Crypter.Cat(name)
} else {
out, err = ioutil.ReadFile(name)
}
if err != nil {
bx.logErr.Printf("BX_CRY3\n")
return fmt.Errorf("cat: %w", err)
}
fmt.Print(string(out))
}
return nil
}
// Decrypt decrypts a file.
func (bx *Box) Decrypt(names []string, overwrite bool, bulkpause bool, setgroup string) error {
var err error
if err := anyGpg(names); err != nil {
return err
}
err = bx.getFiles()
if err != nil {
return err
}
if bulkpause {
gpgAgentNotice()
}
groupchange := false
gid := -1
if setgroup != "" {
gid, err = parseGroup(setgroup)
if err != nil {
return fmt.Errorf("Invalid group name or gid: %w", err)
}
groupchange = true
}
bx.logDebug.Printf("DECRYPT GROUP %q %v,%v\n", setgroup, groupchange, gid)
if len(names) == 0 {
names = bx.Files
}
return decryptMany(bx, names, overwrite, groupchange, gid)
}
func decryptMany(bx *Box, names []string, overwrite bool, groupchange bool, gid int) error {
// TODO(tlim): If we want to decrypt them in parallel, go has a helper function
// called "sync.WaitGroup()"" which would be useful here. We would probably
// want to add a flag on the command line (stored in a field such as bx.ParallelMax)
// that limits the amount of parallelism. The default for the flag should
// probably be runtime.NumCPU().
for _, name := range names {
fmt.Printf("========== DECRYPTING %q\n", name)
if !bx.FilesSet[name] {
bx.logErr.Printf("Skipping %q: File not registered with Blackbox", name)
continue
}
if (!overwrite) && bbutil.FileExistsOrProblem(name) {
bx.logErr.Printf("Skipping %q: Will not overwrite existing file", name)
continue
}
// TODO(tlim) v1 detects zero-length files and removes them, even
// if overwrite is disabled. I don't think anyone has ever used that
// feature. That said, if we want to do that, we would implement it here.
// TODO(tlim) v1 takes the md5 hash of the plaintext before it decrypts,
// then compares the new plaintext's md5. It prints "EXTRACTED" if
// there is a change.
err := bx.Crypter.Decrypt(name, bx.Umask, overwrite)
if err != nil {
bx.logErr.Printf("%q: %v", name, err)
continue
}
// FIXME(tlim): Clone the file perms from the .gpg file to the plaintext file.
if groupchange {
// FIXME(tlim): Also "chmod g+r" the file.
os.Chown(name, -1, gid)
}
}
return nil
}
// Diff ...
func (bx *Box) Diff([]string) error {
return fmt.Errorf("NOT IMPLEMENTED: Diff")
}
// Edit unencrypts, calls editor, calls encrypt.
func (bx *Box) Edit(names []string) error {
if err := anyGpg(names); err != nil {
return err
}
err := bx.getFiles()
if err != nil {
return err
}
for _, name := range names {
if _, ok := bx.FilesSet[name]; ok {
if !bbutil.FileExistsOrProblem(name) {
err := bx.Crypter.Decrypt(name, bx.Umask, false)
if err != nil {
return fmt.Errorf("edit failed %q: %w", name, err)
}
}
}
err := bbutil.RunBash(bx.Editor, name)
if err != nil {
return err
}
}
return nil
}
// Encrypt encrypts a file.
func (bx *Box) Encrypt(names []string, shred bool) error {
var err error
if err = anyGpg(names); err != nil {
return err
}
err = bx.getAdmins()
if err != nil {
return err
}
err = bx.getFiles()
if err != nil {
return err
}
if len(names) == 0 {
names = bx.Files
}
enames, err := encryptMany(bx, names, shred)
bx.Vcs.NeedsCommit(
PrettyCommitMessage("ENCRYPTED", names),
bx.RepoBaseDir,
enames,
)
return err
}
func encryptMany(bx *Box, names []string, shred bool) ([]string, error) {
var enames []string
for _, name := range names {
fmt.Printf("========== ENCRYPTING %q\n", name)
if !bx.FilesSet[name] {
bx.logErr.Printf("Skipping %q: File not registered with Blackbox", name)
continue
}
if !bbutil.FileExistsOrProblem(name) {
bx.logErr.Printf("Skipping. Plaintext does not exist: %q", name)
continue
}
ename, err := bx.Crypter.Encrypt(name, bx.Umask, bx.Admins)
if err != nil {
bx.logErr.Printf("Failed to encrypt %q: %v", name, err)
continue
}
enames = append(enames, ename)
if shred {
bx.Shred([]string{name})
}
}
return enames, nil
}
// FileAdd enrolls files.
func (bx *Box) FileAdd(names []string, shred bool) error {
bx.logDebug.Printf("FileAdd(shred=%v, %v)", shred, names)
// Check for dups.
// Encrypt them all.
// If that succeeds, add to the blackbox-files.txt file.
// (optionally) shred the plaintext.
// FIXME(tlim): Check if the plaintext is in GIT. If it is,
// remove it from Git and print a warning that they should
// eliminate the history or rotate any secrets.
if err := anyGpg(names); err != nil {
return err
}
err := bx.getAdmins()
if err != nil {
return err
}
err = bx.getFiles()
if err != nil {
return err
}
if err := anyGpg(names); err != nil {
return err
}
// Check for newlines
for _, n := range names {
if strings.ContainsAny(n, "\n") {
return fmt.Errorf("file %q contains a newlineregistered", n)
}
}
// Check for duplicates.
for _, n := range names {
if i := sort.SearchStrings(bx.Files, n); i < len(bx.Files) && bx.Files[i] == n {
return fmt.Errorf("file %q already registered", n)
}
}
// Encrypt
var needsCommit []string
for _, name := range names {
s, err := bx.Crypter.Encrypt(name, bx.Umask, bx.Admins)
if err != nil {
return fmt.Errorf("AdminAdd failed AddNewKey: %v", err)
}
needsCommit = append(needsCommit, s)
}
// TODO(tlim): Try the json file.
// Try the legacy file:
fn := filepath.Join(bx.ConfigPath, "blackbox-files.txt")
bx.logDebug.Printf("Files file: %q", fn)
err = bbutil.AddLinesToSortedFile(fn, names...)
if err != nil {
return fmt.Errorf("could not update file (%q,%q): %v", fn, names, err)
}
err = bx.Shred(names)
if err != nil {
bx.logErr.Printf("Error while shredding: %v", err)
}
bx.Vcs.CommitTitle("BLACKBOX ADD FILE: " + makesafe.FirstFew(makesafe.ShellMany(names)))
bx.Vcs.IgnoreFiles(bx.RepoBaseDir, names)
bx.Vcs.NeedsCommit(
PrettyCommitMessage("blackbox-files.txt add", names),
bx.RepoBaseDir,
append([]string{filepath.Join(bx.ConfigPath, "blackbox-files.txt")}, needsCommit...),
)
return nil
}
// FileList lists the files.
func (bx *Box) FileList() error {
err := bx.getFiles()
if err != nil {
return err
}
for _, v := range bx.Files {
fmt.Println(v)
}
return nil
}
// FileRemove de-enrolls files.
func (bx *Box) FileRemove(names []string) error {
return fmt.Errorf("NOT IMPLEMENTED: FileRemove")
}
// Info prints debugging info.
func (bx *Box) Info() error {
err := bx.getFiles()
if err != nil {
bx.logErr.Printf("Info getFiles: %v", err)
}
err = bx.getAdmins()
if err != nil {
bx.logErr.Printf("Info getAdmins: %v", err)
}
fmt.Println("BLACKBOX:")
fmt.Printf(" Debug: %v\n", bx.Debug)
fmt.Printf(" Team: %q\n", bx.Team)
fmt.Printf(" RepoBaseDir: %q\n", bx.RepoBaseDir)
fmt.Printf(" ConfigPath: %q\n", bx.ConfigPath)
fmt.Printf(" Umask: %04o\n", bx.Umask)
fmt.Printf(" Editor: %v\n", bx.Editor)
fmt.Printf(" Shredder: %v\n", bbutil.ShredInfo())
fmt.Printf(" Admins: count=%v\n", len(bx.Admins))
fmt.Printf(" Files: count=%v\n", len(bx.Files))
fmt.Printf(" FilesSet: count=%v\n", len(bx.FilesSet))
fmt.Printf(" Vcs: %v\n", bx.Vcs)
fmt.Printf(" VcsName: %q\n", bx.Vcs.Name())
fmt.Printf(" Crypter: %v\n", bx.Crypter)
fmt.Printf(" CrypterName: %q\n", bx.Crypter.Name())
return nil
}
// Init initializes a repo.
func (bx *Box) Init(yes, vcsname string) error {
fmt.Printf("VCS root is: %q\n", bx.RepoBaseDir)
fmt.Printf("team is: %q\n", bx.Team)
fmt.Printf("configdir will be: %q\n", bx.ConfigPath)
if yes != "yes" {
fmt.Printf("Enable blackbox for this %v repo? (yes/no)? ", bx.Vcs.Name())
input := bufio.NewScanner(os.Stdin)
input.Scan()
ans := input.Text()
b, err := strconv.ParseBool(ans)
if err != nil {
b = false
if len(ans) > 0 {
if ans[0] == 'y' || ans[0] == 'Y' {
b = true
}
}
}
if !b {
fmt.Println("Ok. Maybe some other time.")
return nil
}
}
err := os.Mkdir(bx.ConfigPath, 0o750)
if err != nil {
return err
}
ba := filepath.Join(bx.ConfigPath, "blackbox-admins.txt")
bf := filepath.Join(bx.ConfigPath, "blackbox-files.txt")
bbutil.Touch(ba)
bbutil.Touch(bf)
bx.Vcs.SetFileTypeUnix(bx.RepoBaseDir, ba, bf)
bx.Vcs.IgnoreAnywhere(bx.RepoBaseDir, []string{
"pubring.gpg~",
"pubring.kbx~",
"secring.gpg",
})
fs := []string{ba, bf}
bx.Vcs.NeedsCommit(
"NEW: "+strings.Join(makesafe.RedactMany(fs), " "),
bx.RepoBaseDir,
fs,
)
bx.Vcs.CommitTitle("INITIALIZE BLACKBOX")
return nil
}
// Reencrypt decrypts and reencrypts files.
func (bx *Box) Reencrypt(names []string, overwrite bool, bulkpause bool) error {
allFiles := false
if err := anyGpg(names); err != nil {
return err
}
if err := bx.getAdmins(); err != nil {
return err
}
if err := bx.getFiles(); err != nil {
return err
}
if len(names) == 0 {
names = bx.Files
allFiles = true
}
if bulkpause {
gpgAgentNotice()
}
fmt.Println("========== blackbox administrators are:")
bx.AdminList()
fmt.Println("========== (the above people will be able to access the file)")
if overwrite {
bbutil.ShredFiles(names)
} else {
warned := false
for _, n := range names {
if bbutil.FileExistsOrProblem(n) {
if !warned {
fmt.Printf("========== Shred these files?\n")
warned = true
}
fmt.Println("SHRED?", n)
}
}
if warned {
shouldWeOverwrite()
}
}
// Decrypt
if err := decryptMany(bx, names, overwrite, false, 0); err != nil {
return fmt.Errorf("reencrypt failed decrypt: %w", err)
}
enames, err := encryptMany(bx, names, false)
if err != nil {
return fmt.Errorf("reencrypt failed encrypt: %w", err)
}
if err := bbutil.ShredFiles(names); err != nil {
return fmt.Errorf("reencrypt failed shred: %w", err)
}
if allFiles {
// If the "--all" flag was used, don't try to list all the files.
bx.Vcs.NeedsCommit(
"REENCRYPT all files",
bx.RepoBaseDir,
enames,
)
} else {
bx.Vcs.NeedsCommit(
PrettyCommitMessage("REENCRYPT", names),
bx.RepoBaseDir,
enames,
)
}
return nil
}
// Shred shreds files.
func (bx *Box) Shred(names []string) error {
if err := anyGpg(names); err != nil {
return err
}
err := bx.getFiles()
// Calling getFiles() has the benefit of making sure we are in a repo.
if err != nil {
return err
}
if len(names) == 0 {
names = bx.Files
}
return bbutil.ShredFiles(names)
}
// Status prints the status of files.
func (bx *Box) Status(names []string, nameOnly bool, match string) error {
err := bx.getFiles()
if err != nil {
return err
}
var flist []string
if len(names) == 0 {
flist = bx.Files
} else {
flist = names
}
var data [][]string
var onlylist []string
thirdColumn := false
var tcData bool
for _, name := range flist {
var stat string
var err error
if _, ok := bx.FilesSet[name]; ok {
stat, err = FileStatus(name)
} else {
stat, err = "NOTREG", nil
}
if (match == "") || (stat == match) {
if err == nil {
data = append(data, []string{stat, name})
onlylist = append(onlylist, name)
} else {
thirdColumn = tcData
data = append(data, []string{stat, name, fmt.Sprintf("%v", err)})
onlylist = append(onlylist, fmt.Sprintf("%v: %v", name, err))
}
}
}
if nameOnly {
fmt.Println(strings.Join(onlylist, "\n"))
return nil
}
table := tablewriter.NewWriter(os.Stdout)
table.SetAutoWrapText(false)
if thirdColumn {
table.SetHeader([]string{"Status", "Name", "Error"})
} else {
table.SetHeader([]string{"Status", "Name"})
}
for _, v := range data {
table.Append(v)
}
table.Render() // Send output
return nil
}
// TestingInitRepo initializes a repo.
// Uses bx.Vcs to create ".git" or whatever.
// Uses bx.Vcs to discover what was created, testing its work.
func (bx *Box) TestingInitRepo() error {
if bx.Vcs == nil {
fmt.Println("bx.Vcs is nil")
fmt.Printf("BLACKBOX_VCS=%q\n", os.Getenv("BLACKBOX_VCS"))
os.Exit(1)
}
fmt.Printf("ABOUT TO CALL TestingInitRepo\n")
fmt.Printf("vcs = %v\n", bx.Vcs.Name())
err := bx.Vcs.TestingInitRepo()
fmt.Printf("RETURNED from TestingInitRepo: %v\n", err)
fmt.Println(os.Getwd())
if err != nil {
return fmt.Errorf("TestingInitRepo returned: %w", err)
}
if b, _ := bx.Vcs.Discover(); !b {
return fmt.Errorf("TestingInitRepo failed Discovery")
}
return nil
}
|
[
"\"BLACKBOX_VCS\""
] |
[] |
[
"BLACKBOX_VCS"
] |
[]
|
["BLACKBOX_VCS"]
|
go
| 1 | 0 | |
app/conf/development.py
|
from .common import *
## Database
## Imported from dbsettings file now
## EVE Proxy
import raven
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'krabacus3',
'USER': 'root',
'PASSWORD': 'getfuckedlol',
'HOST': 'localhost', # Or an IP Address that your DB is hosted on
'PORT': '3306',
#'CONN_MAX_AGE': 60
}
}
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': 'redis://localhost:6379/0',
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
'CONNECTION_POOL_CLASS': 'redis.BlockingConnectionPool',
'CONNECTION_POOL_CLASS_KWARGS': {
'max_connections': 50,
'timeout': 20,
},
},
"KEY_PREFIX": "krabacus"
}
}
DEMO_FILE_LOCATION = "/home/bsamuels/krabacus3/app/"
LOGGING = {}
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
sentry_sdk.init(
dsn="put sentry dsn here",
integrations=[DjangoIntegration()]
)
EVE_API_URL = "https://api.eveonline.com"
EVE_PROXY_KEEP_LOGS = 30
## SSO
DISABLE_SERVICES = False
GENERATE_SERVICE_PASSWORD = False
IGNORE_CORP_GROUPS = [29]
## Server Mail
SERVER_EMAIL = ''
DEFAULT_FROM_EMAIL = ""
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
## Django
DEBUG = True
SECRET_KEY = ''
ALLOWED_HOSTS = ['localhost', '127.0.0.1', '[::1]', '*']
ADMINS = ()
MANAGERS = ADMINS
TEMPLATE_DEBUG=True
# Debug Toolbar
INTERNAL_IPS = ['127.0.0.1']
LAMBDA_URL_ROOT = ""
EVEOAUTH["CONSUMER_KEY"] = "eve api consumer key debug"
EVEOAUTH["CONSUMER_SECRET"] = "eve api consumer secret debug"
logging.config.dictConfig(logging_config)
if DEBUG:
MIDDLEWARE.insert(0,'debug_toolbar.middleware.DebugToolbarMiddleware')
INSTALLED_APPS.insert(0,'debug_toolbar')
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
|
[] |
[] |
[
"OAUTHLIB_INSECURE_TRANSPORT"
] |
[]
|
["OAUTHLIB_INSECURE_TRANSPORT"]
|
python
| 1 | 0 | |
cmd/main.go
|
package main
import (
"log"
"os"
jobcontrol "github.com/ifosch/synthetic/pkg/job_control"
"github.com/ifosch/synthetic/pkg/k8s"
"github.com/ifosch/synthetic/pkg/slack"
"github.com/ifosch/synthetic/pkg/synthetic"
)
func replyHello(msg synthetic.Message) {
msg.Reply("hello", msg.Thread())
}
func reactHello(msg synthetic.Message) {
msg.React("wave")
}
func main() {
slackToken, ok := os.LookupEnv("SLACK_TOKEN")
if !ok {
log.Fatalf("No SLACK_TOKEN environment variable defined")
}
debug := false
client := slack.NewChat(slackToken, true, debug)
client.RegisterMessageProcessor(
slack.NewMessageProcessor(
"github.com/ifosch/synthetic/main.replyHello",
slack.Mentioned(slack.Contains(replyHello, "hello")),
),
)
client.RegisterMessageProcessor(
slack.NewMessageProcessor(
"github.com/ifosch/synthetic/main.reactHello",
slack.NotMentioned(slack.Contains(reactHello, "hello")),
),
)
jenkins := jobcontrol.NewJenkins(
os.Getenv("JENKINS_URL"),
os.Getenv("JENKINS_USER"),
os.Getenv("JENKINS_PASSWORD"),
)
if err := jenkins.Connect(); err != nil {
log.Fatalf("error connecting to jenkins: %s", err.Error())
}
registerJenkinsCommands(client, jenkins)
registerK8sCommands(client)
client.Start()
}
func registerJenkinsCommands(client *slack.Chat, jenkins *jobcontrol.Jenkins) {
client.RegisterMessageProcessor(
slack.NewMessageProcessor(
"github.com/ifosch/synthetic/pkg/jenkins.List",
slack.Exactly(slack.Mentioned(jenkins.List), "list"),
),
)
client.RegisterMessageProcessor(
slack.NewMessageProcessor(
"github.com/ifosch/synthetic/pkg/jenkins.Describe",
slack.Mentioned(slack.Contains(jenkins.Describe, "describe")),
),
)
client.RegisterMessageProcessor(
slack.NewMessageProcessor(
"github.com/ifosch/synthetic/pkg/jenkins.Build",
slack.Mentioned(slack.Contains(jenkins.Build, "build")),
),
)
client.RegisterMessageProcessor(
slack.NewMessageProcessor(
"github.com/ifosch/synthetic/pkg/jenkins.Reload",
slack.Mentioned(slack.Contains(jenkins.Reload, "reload")),
),
)
}
func registerK8sCommands(client *slack.Chat) {
client.RegisterMessageProcessor(
slack.NewMessageProcessor(
"github.com/ifosch/synthetic/pkg/k8s.listClusters",
slack.Exactly(slack.Mentioned(k8s.ListClusters), "list clusters"),
),
)
client.RegisterMessageProcessor(
slack.NewMessageProcessor(
"github.com/ifosch/synthetic/pkg/k8s.listPods",
slack.Contains(slack.Mentioned(k8s.ListPods), "list pods"),
),
)
}
|
[
"\"JENKINS_URL\"",
"\"JENKINS_USER\"",
"\"JENKINS_PASSWORD\""
] |
[] |
[
"JENKINS_PASSWORD",
"JENKINS_USER",
"JENKINS_URL"
] |
[]
|
["JENKINS_PASSWORD", "JENKINS_USER", "JENKINS_URL"]
|
go
| 3 | 0 | |
web/web.go
|
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package web
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
stdlog "log"
"math"
"net"
"net/http"
"net/http/pprof"
"net/url"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"sort"
"strings"
"sync"
"sync/atomic"
template_text "text/template"
"time"
"github.com/alecthomas/units"
"github.com/blastbao/prometheus/tsdb"
"github.com/blastbao/prometheus/tsdb/index"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
conntrack "github.com/mwitkow/go-conntrack"
"github.com/opentracing-contrib/go-stdlib/nethttp"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
io_prometheus_client "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
"github.com/prometheus/common/route"
"github.com/prometheus/common/server"
"github.com/soheilhy/cmux"
"golang.org/x/net/netutil"
"google.golang.org/grpc"
"github.com/blastbao/prometheus/config"
"github.com/blastbao/prometheus/notifier"
"github.com/blastbao/prometheus/promql"
"github.com/blastbao/prometheus/rules"
"github.com/blastbao/prometheus/scrape"
"github.com/blastbao/prometheus/storage"
"github.com/blastbao/prometheus/template"
"github.com/blastbao/prometheus/util/httputil"
api_v1 "github.com/blastbao/prometheus/web/api/v1"
api_v2 "github.com/blastbao/prometheus/web/api/v2"
"github.com/blastbao/prometheus/web/ui"
)
// Paths that are handled by the React / Reach router that should all be served the main React app's index.html.
var reactRouterPaths = []string{
"/",
"/alerts",
"/config",
"/flags",
"/graph",
"/rules",
"/service-discovery",
"/status",
"/targets",
"/tsdb-status",
"/version",
}
// withStackTrace logs the stack trace in case the request panics. The function
// will re-raise the error which will then be handled by the net/http package.
// It is needed because the go-kit log package doesn't manage properly the
// panics from net/http (see https://github.com/go-kit/kit/issues/233).
func withStackTracer(h http.Handler, l log.Logger) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer func() {
if err := recover(); err != nil {
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
level.Error(l).Log("msg", "panic while serving request", "client", r.RemoteAddr, "url", r.URL, "err", err, "stack", buf)
panic(err)
}
}()
h.ServeHTTP(w, r)
})
}
type metrics struct {
requestCounter *prometheus.CounterVec
requestDuration *prometheus.HistogramVec
responseSize *prometheus.HistogramVec
}
func newMetrics(r prometheus.Registerer) *metrics {
m := &metrics{
requestCounter: prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "prometheus_http_requests_total",
Help: "Counter of HTTP requests.",
},
[]string{"handler", "code"},
),
requestDuration: prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "prometheus_http_request_duration_seconds",
Help: "Histogram of latencies for HTTP requests.",
Buckets: []float64{.1, .2, .4, 1, 3, 8, 20, 60, 120},
},
[]string{"handler"},
),
responseSize: prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "prometheus_http_response_size_bytes",
Help: "Histogram of response size for HTTP requests.",
Buckets: prometheus.ExponentialBuckets(100, 10, 8),
},
[]string{"handler"},
),
}
if r != nil {
r.MustRegister(m.requestCounter, m.requestDuration, m.responseSize)
registerFederationMetrics(r)
}
return m
}
func (m *metrics) instrumentHandlerWithPrefix(prefix string) func(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return func(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return m.instrumentHandler(prefix+handlerName, handler)
}
}
func (m *metrics) instrumentHandler(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return promhttp.InstrumentHandlerCounter(
m.requestCounter.MustCurryWith(prometheus.Labels{"handler": handlerName}),
promhttp.InstrumentHandlerDuration(
m.requestDuration.MustCurryWith(prometheus.Labels{"handler": handlerName}),
promhttp.InstrumentHandlerResponseSize(
m.responseSize.MustCurryWith(prometheus.Labels{"handler": handlerName}),
handler,
),
),
)
}
// PrometheusVersion contains build information about Prometheus.
type PrometheusVersion = api_v1.PrometheusVersion
// Handler serves various HTTP endpoints of the Prometheus server
type Handler struct {
logger log.Logger
gatherer prometheus.Gatherer
metrics *metrics
scrapeManager *scrape.Manager
ruleManager *rules.Manager
queryEngine *promql.Engine
lookbackDelta time.Duration
context context.Context
tsdb func() *tsdb.DB
storage storage.Storage
localStorage storage.Storage
notifier *notifier.Manager
apiV1 *api_v1.API
router *route.Router
quitCh chan struct{}
reloadCh chan chan error
options *Options
config *config.Config
versionInfo *PrometheusVersion
birth time.Time
cwd string
flagsMap map[string]string
mtx sync.RWMutex
now func() model.Time
ready uint32 // ready is uint32 rather than boolean to be able to use atomic functions.
}
// ApplyConfig updates the config field of the Handler struct
func (h *Handler) ApplyConfig(conf *config.Config) error {
h.mtx.Lock()
defer h.mtx.Unlock()
h.config = conf
return nil
}
// Options for the web Handler.
type Options struct {
Context context.Context
TSDB func() *tsdb.DB
TSDBRetentionDuration model.Duration
TSDBMaxBytes units.Base2Bytes
Storage storage.Storage
QueryEngine *promql.Engine
LookbackDelta time.Duration
ScrapeManager *scrape.Manager
RuleManager *rules.Manager
Notifier *notifier.Manager
Version *PrometheusVersion
Flags map[string]string
ListenAddress string
CORSOrigin *regexp.Regexp
ReadTimeout time.Duration
MaxConnections int
ExternalURL *url.URL
RoutePrefix string
UseLocalAssets bool
UserAssetsPath string
ConsoleTemplatesPath string
ConsoleLibrariesPath string
EnableLifecycle bool
EnableAdminAPI bool
PageTitle string
RemoteReadSampleLimit int
RemoteReadConcurrencyLimit int
RemoteReadBytesInFrame int
Gatherer prometheus.Gatherer
Registerer prometheus.Registerer
}
// New initializes a new web Handler.
func New(logger log.Logger, o *Options) *Handler {
if logger == nil {
logger = log.NewNopLogger()
}
m := newMetrics(o.Registerer)
router := route.New().
WithInstrumentation(m.instrumentHandler).
WithInstrumentation(setPathWithPrefix(""))
cwd, err := os.Getwd()
if err != nil {
cwd = "<error retrieving current working directory>"
}
h := &Handler{
logger: logger,
gatherer: o.Gatherer,
metrics: m,
router: router,
quitCh: make(chan struct{}),
reloadCh: make(chan chan error),
options: o,
versionInfo: o.Version,
birth: time.Now().UTC(),
cwd: cwd,
flagsMap: o.Flags,
context: o.Context,
scrapeManager: o.ScrapeManager,
ruleManager: o.RuleManager,
queryEngine: o.QueryEngine,
lookbackDelta: o.LookbackDelta,
tsdb: o.TSDB,
storage: o.Storage,
localStorage: o.TSDB(),
notifier: o.Notifier,
now: model.Now,
ready: 0,
}
h.apiV1 = api_v1.NewAPI(h.queryEngine, h.storage, h.scrapeManager, h.notifier,
func() config.Config {
h.mtx.RLock()
defer h.mtx.RUnlock()
return *h.config
},
o.Flags,
api_v1.GlobalURLOptions{
ListenAddress: o.ListenAddress,
Host: o.ExternalURL.Host,
Scheme: o.ExternalURL.Scheme,
},
h.testReady,
func() api_v1.TSDBAdmin {
return h.options.TSDB()
},
h.options.EnableAdminAPI,
logger,
h.ruleManager,
h.options.RemoteReadSampleLimit,
h.options.RemoteReadConcurrencyLimit,
h.options.RemoteReadBytesInFrame,
h.options.CORSOrigin,
h.runtimeInfo,
h.versionInfo,
)
if o.RoutePrefix != "/" {
// If the prefix is missing for the root path, prepend it.
router.Get("/", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, o.RoutePrefix, http.StatusFound)
})
router = router.WithPrefix(o.RoutePrefix)
}
readyf := h.testReady
router.Get("/", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, path.Join(o.ExternalURL.Path, "/graph"), http.StatusFound)
})
router.Get("/alerts", readyf(h.alerts))
router.Get("/graph", readyf(h.graph))
router.Get("/status", readyf(h.status))
router.Get("/flags", readyf(h.flags))
router.Get("/config", readyf(h.serveConfig))
router.Get("/rules", readyf(h.rules))
router.Get("/targets", readyf(h.targets))
router.Get("/version", readyf(h.version))
router.Get("/service-discovery", readyf(h.serviceDiscovery))
router.Get("/metrics", promhttp.Handler().ServeHTTP)
router.Get("/federate", readyf(httputil.CompressionHandler{
Handler: http.HandlerFunc(h.federation),
}.ServeHTTP))
router.Get("/consoles/*filepath", readyf(h.consoles))
router.Get("/static/*filepath", func(w http.ResponseWriter, r *http.Request) {
r.URL.Path = path.Join("/static", route.Param(r.Context(), "filepath"))
fs := server.StaticFileServer(ui.Assets)
fs.ServeHTTP(w, r)
})
// Make sure that "<path-prefix>/new" is redirected to "<path-prefix>/new/" and
// not just the naked "/new/", which would be the default behavior of the router
// with the "RedirectTrailingSlash" option (https://godoc.org/github.com/julienschmidt/httprouter#Router.RedirectTrailingSlash),
// and which breaks users with a --web.route-prefix that deviates from the path derived
// from the external URL.
router.Get("/new", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, path.Join(o.ExternalURL.Path, "new")+"/", http.StatusFound)
})
router.Get("/new/*filepath", func(w http.ResponseWriter, r *http.Request) {
p := route.Param(r.Context(), "filepath")
// For paths that the React/Reach router handles, we want to serve the
// index.html, but with replaced path prefix placeholder.
for _, rp := range reactRouterPaths {
if p != rp {
continue
}
f, err := ui.Assets.Open("/static/react/index.html")
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Error opening React index.html: %v", err)
return
}
idx, err := ioutil.ReadAll(f)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Error reading React index.html: %v", err)
return
}
prefixedIdx := bytes.ReplaceAll(idx, []byte("PATH_PREFIX_PLACEHOLDER"), []byte(o.ExternalURL.Path))
prefixedIdx = bytes.ReplaceAll(prefixedIdx, []byte("CONSOLES_LINK_PLACEHOLDER"), []byte(h.consolesPath()))
w.Write(prefixedIdx)
return
}
// For all other paths, serve auxiliary assets.
r.URL.Path = path.Join("/static/react/", p)
fs := server.StaticFileServer(ui.Assets)
fs.ServeHTTP(w, r)
})
if o.UserAssetsPath != "" {
router.Get("/user/*filepath", route.FileServe(o.UserAssetsPath))
}
if o.EnableLifecycle {
router.Post("/-/quit", h.quit)
router.Put("/-/quit", h.quit)
router.Post("/-/reload", h.reload)
router.Put("/-/reload", h.reload)
} else {
forbiddenAPINotEnabled := func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusForbidden)
w.Write([]byte("Lifecycle API is not enabled."))
}
router.Post("/-/quit", forbiddenAPINotEnabled)
router.Put("/-/quit", forbiddenAPINotEnabled)
router.Post("/-/reload", forbiddenAPINotEnabled)
router.Put("/-/reload", forbiddenAPINotEnabled)
}
router.Get("/-/quit", func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusMethodNotAllowed)
w.Write([]byte("Only POST or PUT requests allowed"))
})
router.Get("/-/reload", func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusMethodNotAllowed)
w.Write([]byte("Only POST or PUT requests allowed"))
})
router.Get("/debug/*subpath", serveDebug)
router.Post("/debug/*subpath", serveDebug)
router.Get("/-/healthy", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "Prometheus is Healthy.\n")
})
router.Get("/-/ready", readyf(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "Prometheus is Ready.\n")
}))
return h
}
func serveDebug(w http.ResponseWriter, req *http.Request) {
ctx := req.Context()
subpath := route.Param(ctx, "subpath")
if subpath == "/pprof" {
http.Redirect(w, req, req.URL.Path+"/", http.StatusMovedPermanently)
return
}
if !strings.HasPrefix(subpath, "/pprof/") {
http.NotFound(w, req)
return
}
subpath = strings.TrimPrefix(subpath, "/pprof/")
switch subpath {
case "cmdline":
pprof.Cmdline(w, req)
case "profile":
pprof.Profile(w, req)
case "symbol":
pprof.Symbol(w, req)
case "trace":
pprof.Trace(w, req)
default:
req.URL.Path = "/debug/pprof/" + subpath
pprof.Index(w, req)
}
}
// Ready sets Handler to be ready.
func (h *Handler) Ready() {
atomic.StoreUint32(&h.ready, 1)
}
// Verifies whether the server is ready or not.
func (h *Handler) isReady() bool {
ready := atomic.LoadUint32(&h.ready)
return ready > 0
}
// Checks if server is ready, calls f if it is, returns 503 if it is not.
func (h *Handler) testReady(f http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if h.isReady() {
f(w, r)
} else {
w.WriteHeader(http.StatusServiceUnavailable)
fmt.Fprintf(w, "Service Unavailable")
}
}
}
// Checks if server is ready, calls f if it is, returns 503 if it is not.
func (h *Handler) testReadyHandler(f http.Handler) http.HandlerFunc {
return h.testReady(f.ServeHTTP)
}
// Quit returns the receive-only quit channel.
func (h *Handler) Quit() <-chan struct{} {
return h.quitCh
}
// Reload returns the receive-only channel that signals configuration reload requests.
func (h *Handler) Reload() <-chan chan error {
return h.reloadCh
}
// Run serves the HTTP endpoints.
func (h *Handler) Run(ctx context.Context) error {
level.Info(h.logger).Log("msg", "Start listening for connections", "address", h.options.ListenAddress)
listener, err := net.Listen("tcp", h.options.ListenAddress)
if err != nil {
return err
}
listener = netutil.LimitListener(listener, h.options.MaxConnections)
// Monitor incoming connections with conntrack.
listener = conntrack.NewListener(listener,
conntrack.TrackWithName("http"),
conntrack.TrackWithTracing())
var (
m = cmux.New(listener)
// See https://github.com/grpc/grpc-go/issues/2636 for why we need to use MatchWithWriters().
grpcl = m.MatchWithWriters(cmux.HTTP2MatchHeaderFieldSendSettings("content-type", "application/grpc"))
httpl = m.Match(cmux.HTTP1Fast())
grpcSrv = grpc.NewServer()
)
av2 := api_v2.New(
h.options.TSDB,
h.options.EnableAdminAPI,
)
av2.RegisterGRPC(grpcSrv)
hh, err := av2.HTTPHandler(ctx, h.options.ListenAddress)
if err != nil {
return err
}
hhFunc := h.testReadyHandler(hh)
operationName := nethttp.OperationNameFunc(func(r *http.Request) string {
return fmt.Sprintf("%s %s", r.Method, r.URL.Path)
})
mux := http.NewServeMux()
mux.Handle("/", h.router)
apiPath := "/api"
if h.options.RoutePrefix != "/" {
apiPath = h.options.RoutePrefix + apiPath
level.Info(h.logger).Log("msg", "Router prefix", "prefix", h.options.RoutePrefix)
}
av1 := route.New().
WithInstrumentation(h.metrics.instrumentHandlerWithPrefix("/api/v1")).
WithInstrumentation(setPathWithPrefix(apiPath + "/v1"))
h.apiV1.Register(av1)
mux.Handle(apiPath+"/v1/", http.StripPrefix(apiPath+"/v1", av1))
mux.Handle(apiPath+"/", http.StripPrefix(apiPath,
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
httputil.SetCORS(w, h.options.CORSOrigin, r)
hhFunc(w, r)
}),
))
errlog := stdlog.New(log.NewStdlibAdapter(level.Error(h.logger)), "", 0)
httpSrv := &http.Server{
Handler: withStackTracer(nethttp.Middleware(opentracing.GlobalTracer(), mux, operationName), h.logger),
ErrorLog: errlog,
ReadTimeout: h.options.ReadTimeout,
}
errCh := make(chan error)
go func() {
errCh <- httpSrv.Serve(httpl)
}()
go func() {
errCh <- grpcSrv.Serve(grpcl)
}()
go func() {
errCh <- m.Serve()
}()
select {
case e := <-errCh:
return e
case <-ctx.Done():
httpSrv.Shutdown(ctx)
grpcSrv.GracefulStop()
return nil
}
}
func (h *Handler) alerts(w http.ResponseWriter, r *http.Request) {
var groups []*rules.Group
for _, group := range h.ruleManager.RuleGroups() {
if group.HasAlertingRules() {
groups = append(groups, group)
}
}
alertStatus := AlertStatus{
Groups: groups,
AlertStateToRowClass: map[rules.AlertState]string{
rules.StateInactive: "success",
rules.StatePending: "warning",
rules.StateFiring: "danger",
},
Counts: alertCounts(groups),
}
h.executeTemplate(w, "alerts.html", alertStatus)
}
func alertCounts(groups []*rules.Group) AlertByStateCount {
result := AlertByStateCount{}
for _, group := range groups {
for _, alert := range group.AlertingRules() {
switch alert.State() {
case rules.StateInactive:
result.Inactive++
case rules.StatePending:
result.Pending++
case rules.StateFiring:
result.Firing++
}
}
}
return result
}
func (h *Handler) consoles(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
name := route.Param(ctx, "filepath")
file, err := http.Dir(h.options.ConsoleTemplatesPath).Open(name)
if err != nil {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
defer file.Close()
text, err := ioutil.ReadAll(file)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
ctx = httputil.ContextFromRequest(ctx, r)
// Provide URL parameters as a map for easy use. Advanced users may have need for
// parameters beyond the first, so provide RawParams.
rawParams, err := url.ParseQuery(r.URL.RawQuery)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
params := map[string]string{}
for k, v := range rawParams {
params[k] = v[0]
}
externalLabels := map[string]string{}
h.mtx.RLock()
els := h.config.GlobalConfig.ExternalLabels
h.mtx.RUnlock()
for _, el := range els {
externalLabels[el.Name] = el.Value
}
// Inject some convenience variables that are easier to remember for users
// who are not used to Go's templating system.
defs := []string{
"{{$rawParams := .RawParams }}",
"{{$params := .Params}}",
"{{$path := .Path}}",
"{{$externalLabels := .ExternalLabels}}",
}
data := struct {
RawParams url.Values
Params map[string]string
Path string
ExternalLabels map[string]string
}{
RawParams: rawParams,
Params: params,
Path: strings.TrimLeft(name, "/"),
ExternalLabels: externalLabels,
}
tmpl := template.NewTemplateExpander(
ctx,
strings.Join(append(defs, string(text)), ""),
"__console_"+name,
data,
h.now(),
template.QueryFunc(rules.EngineQueryFunc(h.queryEngine, h.storage)),
h.options.ExternalURL,
)
filenames, err := filepath.Glob(h.options.ConsoleLibrariesPath + "/*.lib")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
result, err := tmpl.ExpandHTML(filenames)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
io.WriteString(w, result)
}
func (h *Handler) graph(w http.ResponseWriter, r *http.Request) {
h.executeTemplate(w, "graph.html", nil)
}
func (h *Handler) status(w http.ResponseWriter, r *http.Request) {
status := struct {
Birth time.Time
CWD string
Version *PrometheusVersion
Alertmanagers []*url.URL
GoroutineCount int
GOMAXPROCS int
GOGC string
GODEBUG string
CorruptionCount int64
ChunkCount int64
TimeSeriesCount int64
LastConfigTime time.Time
ReloadConfigSuccess bool
StorageRetention string
NumSeries uint64
MaxTime int64
MinTime int64
Stats *index.PostingsStats
Duration string
}{
Birth: h.birth,
CWD: h.cwd,
Version: h.versionInfo,
Alertmanagers: h.notifier.Alertmanagers(),
GoroutineCount: runtime.NumGoroutine(),
GOMAXPROCS: runtime.GOMAXPROCS(0),
GOGC: os.Getenv("GOGC"),
GODEBUG: os.Getenv("GODEBUG"),
}
if h.options.TSDBRetentionDuration != 0 {
status.StorageRetention = h.options.TSDBRetentionDuration.String()
}
if h.options.TSDBMaxBytes != 0 {
if status.StorageRetention != "" {
status.StorageRetention = status.StorageRetention + " or "
}
status.StorageRetention = status.StorageRetention + h.options.TSDBMaxBytes.String()
}
metrics, err := h.gatherer.Gather()
if err != nil {
http.Error(w, fmt.Sprintf("error gathering runtime status: %s", err), http.StatusInternalServerError)
return
}
for _, mF := range metrics {
switch *mF.Name {
case "prometheus_tsdb_head_chunks":
status.ChunkCount = int64(toFloat64(mF))
case "prometheus_tsdb_head_series":
status.TimeSeriesCount = int64(toFloat64(mF))
case "prometheus_tsdb_wal_corruptions_total":
status.CorruptionCount = int64(toFloat64(mF))
case "prometheus_config_last_reload_successful":
status.ReloadConfigSuccess = toFloat64(mF) != 0
case "prometheus_config_last_reload_success_timestamp_seconds":
status.LastConfigTime = time.Unix(int64(toFloat64(mF)), 0).UTC()
}
}
db := h.tsdb()
startTime := time.Now().UnixNano()
status.Stats = db.Head().PostingsCardinalityStats("__name__")
status.Duration = fmt.Sprintf("%.3f", float64(time.Now().UnixNano()-startTime)/float64(1e9))
status.NumSeries = db.Head().NumSeries()
status.MaxTime = db.Head().MaxTime()
status.MinTime = db.Head().MaxTime()
h.executeTemplate(w, "status.html", status)
}
func (h *Handler) runtimeInfo() (api_v1.RuntimeInfo, error) {
status := api_v1.RuntimeInfo{
StartTime: h.birth,
CWD: h.cwd,
GoroutineCount: runtime.NumGoroutine(),
GOMAXPROCS: runtime.GOMAXPROCS(0),
GOGC: os.Getenv("GOGC"),
GODEBUG: os.Getenv("GODEBUG"),
}
if h.options.TSDBRetentionDuration != 0 {
status.StorageRetention = h.options.TSDBRetentionDuration.String()
}
if h.options.TSDBMaxBytes != 0 {
if status.StorageRetention != "" {
status.StorageRetention = status.StorageRetention + " or "
}
status.StorageRetention = status.StorageRetention + h.options.TSDBMaxBytes.String()
}
metrics, err := h.gatherer.Gather()
if err != nil {
return status, errors.Errorf("error gathering runtime status: %s", err)
}
for _, mF := range metrics {
switch *mF.Name {
case "prometheus_tsdb_head_chunks":
status.ChunkCount = int64(toFloat64(mF))
case "prometheus_tsdb_head_series":
status.TimeSeriesCount = int64(toFloat64(mF))
case "prometheus_tsdb_wal_corruptions_total":
status.CorruptionCount = int64(toFloat64(mF))
case "prometheus_config_last_reload_successful":
status.ReloadConfigSuccess = toFloat64(mF) != 0
case "prometheus_config_last_reload_success_timestamp_seconds":
status.LastConfigTime = time.Unix(int64(toFloat64(mF)), 0).UTC()
}
}
return status, nil
}
func toFloat64(f *io_prometheus_client.MetricFamily) float64 {
m := *f.Metric[0]
if m.Gauge != nil {
return m.Gauge.GetValue()
}
if m.Counter != nil {
return m.Counter.GetValue()
}
if m.Untyped != nil {
return m.Untyped.GetValue()
}
return math.NaN()
}
func (h *Handler) flags(w http.ResponseWriter, r *http.Request) {
h.executeTemplate(w, "flags.html", h.flagsMap)
}
func (h *Handler) serveConfig(w http.ResponseWriter, r *http.Request) {
h.mtx.RLock()
defer h.mtx.RUnlock()
h.executeTemplate(w, "config.html", h.config.String())
}
func (h *Handler) rules(w http.ResponseWriter, r *http.Request) {
h.executeTemplate(w, "rules.html", h.ruleManager)
}
func (h *Handler) serviceDiscovery(w http.ResponseWriter, r *http.Request) {
var index []string
targets := h.scrapeManager.TargetsAll()
for job := range targets {
index = append(index, job)
}
sort.Strings(index)
scrapeConfigData := struct {
Index []string
Targets map[string][]*scrape.Target
Active []int
Dropped []int
Total []int
}{
Index: index,
Targets: make(map[string][]*scrape.Target),
Active: make([]int, len(index)),
Dropped: make([]int, len(index)),
Total: make([]int, len(index)),
}
for i, job := range scrapeConfigData.Index {
scrapeConfigData.Targets[job] = make([]*scrape.Target, 0, len(targets[job]))
scrapeConfigData.Total[i] = len(targets[job])
for _, target := range targets[job] {
// Do not display more than 100 dropped targets per job to avoid
// returning too much data to the clients.
if target.Labels().Len() == 0 {
scrapeConfigData.Dropped[i]++
if scrapeConfigData.Dropped[i] > 100 {
continue
}
} else {
scrapeConfigData.Active[i]++
}
scrapeConfigData.Targets[job] = append(scrapeConfigData.Targets[job], target)
}
}
h.executeTemplate(w, "service-discovery.html", scrapeConfigData)
}
func (h *Handler) targets(w http.ResponseWriter, r *http.Request) {
tps := h.scrapeManager.TargetsActive()
for _, targets := range tps {
sort.Slice(targets, func(i, j int) bool {
iJobLabel := targets[i].Labels().Get(model.JobLabel)
jJobLabel := targets[j].Labels().Get(model.JobLabel)
if iJobLabel == jJobLabel {
return targets[i].Labels().Get(model.InstanceLabel) < targets[j].Labels().Get(model.InstanceLabel)
}
return iJobLabel < jJobLabel
})
}
h.executeTemplate(w, "targets.html", struct {
TargetPools map[string][]*scrape.Target
}{
TargetPools: tps,
})
}
func (h *Handler) version(w http.ResponseWriter, r *http.Request) {
dec := json.NewEncoder(w)
if err := dec.Encode(h.versionInfo); err != nil {
http.Error(w, fmt.Sprintf("error encoding JSON: %s", err), http.StatusInternalServerError)
}
}
func (h *Handler) quit(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Requesting termination... Goodbye!")
close(h.quitCh)
}
func (h *Handler) reload(w http.ResponseWriter, r *http.Request) {
rc := make(chan error)
h.reloadCh <- rc
if err := <-rc; err != nil {
http.Error(w, fmt.Sprintf("failed to reload config: %s", err), http.StatusInternalServerError)
}
}
func (h *Handler) consolesPath() string {
if _, err := os.Stat(h.options.ConsoleTemplatesPath + "/index.html"); !os.IsNotExist(err) {
return h.options.ExternalURL.Path + "/consoles/index.html"
}
if h.options.UserAssetsPath != "" {
if _, err := os.Stat(h.options.UserAssetsPath + "/index.html"); !os.IsNotExist(err) {
return h.options.ExternalURL.Path + "/user/index.html"
}
}
return ""
}
func tmplFuncs(consolesPath string, opts *Options) template_text.FuncMap {
return template_text.FuncMap{
"since": func(t time.Time) time.Duration {
return time.Since(t) / time.Millisecond * time.Millisecond
},
"consolesPath": func() string { return consolesPath },
"pathPrefix": func() string { return opts.ExternalURL.Path },
"pageTitle": func() string { return opts.PageTitle },
"buildVersion": func() string { return opts.Version.Revision },
"globalURL": func(u *url.URL) *url.URL {
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
return u
}
for _, lhr := range api_v1.LocalhostRepresentations {
if host == lhr {
_, ownPort, err := net.SplitHostPort(opts.ListenAddress)
if err != nil {
return u
}
if port == ownPort {
// Only in the case where the target is on localhost and its port is
// the same as the one we're listening on, we know for sure that
// we're monitoring our own process and that we need to change the
// scheme, hostname, and port to the externally reachable ones as
// well. We shouldn't need to touch the path at all, since if a
// path prefix is defined, the path under which we scrape ourselves
// should already contain the prefix.
u.Scheme = opts.ExternalURL.Scheme
u.Host = opts.ExternalURL.Host
} else {
// Otherwise, we only know that localhost is not reachable
// externally, so we replace only the hostname by the one in the
// external URL. It could be the wrong hostname for the service on
// this port, but it's still the best possible guess.
host, _, err := net.SplitHostPort(opts.ExternalURL.Host)
if err != nil {
return u
}
u.Host = host + ":" + port
}
break
}
}
return u
},
"numHealthy": func(pool []*scrape.Target) int {
alive := len(pool)
for _, p := range pool {
if p.Health() != scrape.HealthGood {
alive--
}
}
return alive
},
"targetHealthToClass": func(th scrape.TargetHealth) string {
switch th {
case scrape.HealthUnknown:
return "warning"
case scrape.HealthGood:
return "success"
default:
return "danger"
}
},
"ruleHealthToClass": func(rh rules.RuleHealth) string {
switch rh {
case rules.HealthUnknown:
return "warning"
case rules.HealthGood:
return "success"
default:
return "danger"
}
},
"alertStateToClass": func(as rules.AlertState) string {
switch as {
case rules.StateInactive:
return "success"
case rules.StatePending:
return "warning"
case rules.StateFiring:
return "danger"
default:
panic("unknown alert state")
}
},
}
}
func (h *Handler) getTemplate(name string) (string, error) {
var tmpl string
appendf := func(name string) error {
f, err := ui.Assets.Open(path.Join("/templates", name))
if err != nil {
return err
}
defer f.Close()
b, err := ioutil.ReadAll(f)
if err != nil {
return err
}
tmpl += string(b)
return nil
}
err := appendf("_base.html")
if err != nil {
return "", errors.Wrap(err, "error reading base template")
}
err = appendf(name)
if err != nil {
return "", errors.Wrapf(err, "error reading page template %s", name)
}
return tmpl, nil
}
func (h *Handler) executeTemplate(w http.ResponseWriter, name string, data interface{}) {
text, err := h.getTemplate(name)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
tmpl := template.NewTemplateExpander(
h.context,
text,
name,
data,
h.now(),
template.QueryFunc(rules.EngineQueryFunc(h.queryEngine, h.storage)),
h.options.ExternalURL,
)
tmpl.Funcs(tmplFuncs(h.consolesPath(), h.options))
result, err := tmpl.ExpandHTML(nil)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
io.WriteString(w, result)
}
// AlertStatus bundles alerting rules and the mapping of alert states to row classes.
type AlertStatus struct {
Groups []*rules.Group
AlertStateToRowClass map[rules.AlertState]string
Counts AlertByStateCount
}
type AlertByStateCount struct {
Inactive int32
Pending int32
Firing int32
}
func setPathWithPrefix(prefix string) func(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return func(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
handler(w, r.WithContext(httputil.ContextWithPath(r.Context(), prefix+r.URL.Path)))
}
}
}
|
[
"\"GOGC\"",
"\"GODEBUG\"",
"\"GOGC\"",
"\"GODEBUG\""
] |
[] |
[
"GOGC",
"GODEBUG"
] |
[]
|
["GOGC", "GODEBUG"]
|
go
| 2 | 0 | |
sdk/identity/azure-identity/tests/test_managed_identity_async.py
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import os
import time
from unittest import mock
from azure.core.credentials import AccessToken
from azure.core.exceptions import ClientAuthenticationError
from azure.identity.aio import ManagedIdentityCredential
from azure.identity._credentials.imds import IMDS_AUTHORITY, IMDS_TOKEN_PATH
from azure.identity._constants import EnvironmentVariables
from azure.identity._internal.user_agent import USER_AGENT
import pytest
from helpers import build_aad_response, mock_response, Request
from helpers_async import async_validating_transport, AsyncMockTransport
from test_managed_identity import ALL_ENVIRONMENTS
MANAGED_IDENTITY_ENVIRON = "azure.identity.aio._credentials.managed_identity.os.environ"
@pytest.mark.asyncio
@pytest.mark.parametrize("environ", ALL_ENVIRONMENTS)
async def test_custom_hooks(environ):
"""The credential's pipeline should include azure-core's CustomHookPolicy"""
scope = "scope"
expected_token = "***"
request_hook = mock.Mock()
response_hook = mock.Mock()
now = int(time.time())
expected_response = mock_response(
json_payload={
"access_token": expected_token,
"expires_in": 3600,
"expires_on": now + 3600,
"ext_expires_in": 3600,
"not_before": now,
"resource": scope,
"token_type": "Bearer",
}
)
transport = async_validating_transport(requests=[Request()] * 2, responses=[expected_response] * 2)
with mock.patch.dict(MANAGED_IDENTITY_ENVIRON, environ, clear=True):
credential = ManagedIdentityCredential(
transport=transport, raw_request_hook=request_hook, raw_response_hook=response_hook
)
await credential.get_token(scope)
if environ:
# some environment variables are set, so we're not mocking IMDS and should expect 1 request
assert request_hook.call_count == 1
assert response_hook.call_count == 1
args, kwargs = response_hook.call_args
pipeline_response = args[0]
assert pipeline_response.http_response == expected_response
else:
# we're mocking IMDS and should expect 2 requests
assert request_hook.call_count == 2
assert response_hook.call_count == 2
responses = [args[0].http_response for args, _ in response_hook.call_args_list]
assert responses == [expected_response] * 2
@pytest.mark.asyncio
@pytest.mark.parametrize("environ", ALL_ENVIRONMENTS)
async def test_tenant_id(environ):
scope = "scope"
expected_token = "***"
request_hook = mock.Mock()
response_hook = mock.Mock()
now = int(time.time())
expected_response = mock_response(
json_payload={
"access_token": expected_token,
"expires_in": 3600,
"expires_on": now + 3600,
"ext_expires_in": 3600,
"not_before": now,
"resource": scope,
"token_type": "Bearer",
}
)
transport = async_validating_transport(requests=[Request()] * 2, responses=[expected_response] * 2)
with mock.patch.dict(MANAGED_IDENTITY_ENVIRON, environ, clear=True):
credential = ManagedIdentityCredential(
transport=transport, raw_request_hook=request_hook, raw_response_hook=response_hook
)
await credential.get_token(scope, tenant_id="tenant_id")
if environ:
# some environment variables are set, so we're not mocking IMDS and should expect 1 request
assert request_hook.call_count == 1
assert response_hook.call_count == 1
args, kwargs = response_hook.call_args
pipeline_response = args[0]
assert pipeline_response.http_response == expected_response
else:
# we're mocking IMDS and should expect 2 requests
assert request_hook.call_count == 2
assert response_hook.call_count == 2
responses = [args[0].http_response for args, _ in response_hook.call_args_list]
assert responses == [expected_response] * 2
@pytest.mark.asyncio
@pytest.mark.parametrize("environ", ALL_ENVIRONMENTS)
async def test_close(environ):
transport = AsyncMockTransport()
with mock.patch.dict(MANAGED_IDENTITY_ENVIRON, environ, clear=True):
credential = ManagedIdentityCredential(transport=transport)
await credential.close()
assert transport.__aexit__.call_count == 1
@pytest.mark.asyncio
@pytest.mark.parametrize("environ", ALL_ENVIRONMENTS)
async def test_context_manager(environ):
transport = AsyncMockTransport()
with mock.patch.dict(MANAGED_IDENTITY_ENVIRON, environ, clear=True):
credential = ManagedIdentityCredential(transport=transport)
async with credential:
assert transport.__aenter__.call_count == 1
assert transport.__aexit__.call_count == 0
assert transport.__aenter__.call_count == 1
assert transport.__aexit__.call_count == 1
@pytest.mark.asyncio
async def test_close_incomplete_configuration():
await ManagedIdentityCredential().close()
@pytest.mark.asyncio
async def test_context_manager_incomplete_configuration():
async with ManagedIdentityCredential():
pass
@pytest.mark.asyncio
async def test_cloud_shell():
"""Cloud Shell environment: only MSI_ENDPOINT set"""
access_token = "****"
expires_on = 42
expected_token = AccessToken(access_token, expires_on)
endpoint = "http://localhost:42/token"
scope = "scope"
transport = async_validating_transport(
requests=[
Request(
base_url=endpoint,
method="POST",
required_headers={"Metadata": "true", "User-Agent": USER_AGENT},
required_data={"resource": scope},
)
],
responses=[
mock_response(
json_payload={
"access_token": access_token,
"expires_in": 0,
"expires_on": expires_on,
"not_before": int(time.time()),
"resource": scope,
"token_type": "Bearer",
}
)
],
)
with mock.patch("os.environ", {EnvironmentVariables.MSI_ENDPOINT: endpoint}):
token = await ManagedIdentityCredential(transport=transport).get_token(scope)
assert token == expected_token
@pytest.mark.asyncio
async def test_cloud_shell_tenant_id():
"""Cloud Shell environment: only MSI_ENDPOINT set"""
access_token = "****"
expires_on = 42
expected_token = AccessToken(access_token, expires_on)
endpoint = "http://localhost:42/token"
scope = "scope"
transport = async_validating_transport(
requests=[
Request(
base_url=endpoint,
method="POST",
required_headers={"Metadata": "true", "User-Agent": USER_AGENT},
required_data={"resource": scope},
)
],
responses=[
mock_response(
json_payload={
"access_token": access_token,
"expires_in": 0,
"expires_on": expires_on,
"not_before": int(time.time()),
"resource": scope,
"token_type": "Bearer",
}
)
],
)
with mock.patch("os.environ", {EnvironmentVariables.MSI_ENDPOINT: endpoint}):
token = await ManagedIdentityCredential(transport=transport).get_token(scope, tenant_id="tenant_id")
assert token == expected_token
@pytest.mark.asyncio
async def test_azure_ml():
"""Azure ML: MSI_ENDPOINT, MSI_SECRET set (like App Service 2017-09-01 but with a different response format)"""
expected_token = AccessToken("****", int(time.time()) + 3600)
url = "http://localhost:42/token"
secret = "expected-secret"
scope = "scope"
client_id = "client"
transport = async_validating_transport(
requests=[
Request(
url,
method="GET",
required_headers={"secret": secret, "User-Agent": USER_AGENT},
required_params={"api-version": "2017-09-01", "resource": scope},
),
Request(
url,
method="GET",
required_headers={"secret": secret, "User-Agent": USER_AGENT},
required_params={"api-version": "2017-09-01", "resource": scope, "clientid": client_id},
),
],
responses=[
mock_response(
json_payload={
"access_token": expected_token.token,
"expires_in": 3600,
"expires_on": expected_token.expires_on,
"resource": scope,
"token_type": "Bearer",
}
)
]
* 2,
)
with mock.patch.dict(
MANAGED_IDENTITY_ENVIRON,
{EnvironmentVariables.MSI_ENDPOINT: url, EnvironmentVariables.MSI_SECRET: secret},
clear=True,
):
credential = ManagedIdentityCredential(transport=transport)
token = await credential.get_token(scope)
assert token.token == expected_token.token
assert token.expires_on == expected_token.expires_on
credential = ManagedIdentityCredential(transport=transport, client_id=client_id)
token = await credential.get_token(scope)
assert token.token == expected_token.token
assert token.expires_on == expected_token.expires_on
@pytest.mark.asyncio
async def test_azure_ml_tenant_id():
"""Azure ML: MSI_ENDPOINT, MSI_SECRET set (like App Service 2017-09-01 but with a different response format)"""
expected_token = AccessToken("****", int(time.time()) + 3600)
url = "http://localhost:42/token"
secret = "expected-secret"
scope = "scope"
client_id = "client"
transport = async_validating_transport(
requests=[
Request(
url,
method="GET",
required_headers={"secret": secret, "User-Agent": USER_AGENT},
required_params={"api-version": "2017-09-01", "resource": scope},
),
Request(
url,
method="GET",
required_headers={"secret": secret, "User-Agent": USER_AGENT},
required_params={"api-version": "2017-09-01", "resource": scope, "clientid": client_id},
),
],
responses=[
mock_response(
json_payload={
"access_token": expected_token.token,
"expires_in": 3600,
"expires_on": expected_token.expires_on,
"resource": scope,
"token_type": "Bearer",
}
)
]
* 2,
)
with mock.patch.dict(
MANAGED_IDENTITY_ENVIRON,
{EnvironmentVariables.MSI_ENDPOINT: url, EnvironmentVariables.MSI_SECRET: secret},
clear=True,
):
credential = ManagedIdentityCredential(transport=transport)
token = await credential.get_token(scope, tenant_id="tenant_id")
assert token.token == expected_token.token
assert token.expires_on == expected_token.expires_on
@pytest.mark.asyncio
async def test_cloud_shell_user_assigned_identity():
"""Cloud Shell environment: only MSI_ENDPOINT set"""
expected_token = "****"
expires_on = 42
client_id = "some-guid"
endpoint = "http://localhost:42/token"
scope = "scope"
param_name, param_value = "foo", "bar"
transport = async_validating_transport(
requests=[
Request(
base_url=endpoint,
method="POST",
required_headers={"Metadata": "true", "User-Agent": USER_AGENT},
required_data={"client_id": client_id, "resource": scope},
),
Request(
base_url=endpoint,
method="POST",
required_headers={"Metadata": "true", "User-Agent": USER_AGENT},
required_data={"resource": scope, param_name: param_value},
),
],
responses=[
mock_response(
json_payload={
"access_token": expected_token,
"expires_in": 0,
"expires_on": expires_on,
"not_before": int(time.time()),
"resource": scope,
"token_type": "Bearer",
}
)
]
* 2,
)
with mock.patch.dict(MANAGED_IDENTITY_ENVIRON, {EnvironmentVariables.MSI_ENDPOINT: endpoint}, clear=True):
credential = ManagedIdentityCredential(client_id=client_id, transport=transport)
token = await credential.get_token(scope)
assert token.token == expected_token
assert token.expires_on == expires_on
credential = ManagedIdentityCredential(transport=transport, identity_config={param_name: param_value})
token = await credential.get_token(scope)
assert token.token == expected_token
assert token.expires_on == expires_on
@pytest.mark.asyncio
async def test_prefers_app_service_2017_09_01():
"""When the environment is configured for both App Service versions, the credential should prefer 2017-09-01
Support for 2019-08-01 was removed due to https://github.com/Azure/azure-sdk-for-python/issues/14670. This test
should be removed when that support is added back.
"""
access_token = "****"
expires_on = 42
expected_token = AccessToken(access_token, expires_on)
url = "http://localhost:42/token"
secret = "expected-secret"
scope = "scope"
transport = async_validating_transport(
requests=[
Request(
url,
method="GET",
required_headers={"secret": secret, "User-Agent": USER_AGENT},
required_params={"api-version": "2017-09-01", "resource": scope},
)
]
* 2,
responses=[
mock_response(
json_payload={
"access_token": access_token,
"expires_on": "01/01/1970 00:00:{} +00:00".format(expires_on), # linux format
"resource": scope,
"token_type": "Bearer",
}
),
mock_response(
json_payload={
"access_token": access_token,
"expires_on": "1/1/1970 12:00:{} AM +00:00".format(expires_on), # windows format
"resource": scope,
"token_type": "Bearer",
}
),
],
)
with mock.patch.dict(
MANAGED_IDENTITY_ENVIRON,
{
EnvironmentVariables.IDENTITY_ENDPOINT: url,
EnvironmentVariables.IDENTITY_HEADER: secret,
EnvironmentVariables.MSI_ENDPOINT: url,
EnvironmentVariables.MSI_SECRET: secret,
},
clear=True,
):
credential = ManagedIdentityCredential(transport=transport)
token = await credential.get_token(scope)
assert token == expected_token
assert token.expires_on == expires_on
credential = ManagedIdentityCredential(transport=transport)
token = await credential.get_token(scope)
assert token == expected_token
assert token.expires_on == expires_on
@pytest.mark.skip(
"2019-08-01 support was removed due to https://github.com/Azure/azure-sdk-for-python/issues/14670. This test should be enabled when that support is added back."
)
@pytest.mark.asyncio
async def test_app_service_2019_08_01():
"""App Service 2019-08-01: IDENTITY_ENDPOINT, IDENTITY_HEADER set"""
access_token = "****"
expires_on = 42
endpoint = "http://localhost:42/token"
secret = "expected-secret"
scope = "scope"
async def send(request, **_):
assert request.url.startswith(endpoint)
assert request.method == "GET"
assert request.headers["X-IDENTITY-HEADER"] == secret
assert request.headers["User-Agent"] == USER_AGENT
assert request.query["api-version"] == "2019-08-01"
assert request.query["resource"] == scope
return mock_response(
json_payload={
"access_token": access_token,
"expires_on": str(expires_on),
"resource": scope,
"token_type": "Bearer",
}
)
# when configuration for both API versions is present, the credential should prefer the most recent
for environment in [
{EnvironmentVariables.IDENTITY_ENDPOINT: endpoint, EnvironmentVariables.IDENTITY_HEADER: secret},
{
EnvironmentVariables.IDENTITY_ENDPOINT: endpoint,
EnvironmentVariables.IDENTITY_HEADER: secret,
EnvironmentVariables.MSI_ENDPOINT: endpoint,
EnvironmentVariables.MSI_SECRET: secret,
},
]:
with mock.patch.dict("os.environ", environment, clear=True):
token = await ManagedIdentityCredential(transport=mock.Mock(send=send)).get_token(scope)
assert token.token == access_token
assert token.expires_on == expires_on
@pytest.mark.skip(
"2019-08-01 support was removed due to https://github.com/Azure/azure-sdk-for-python/issues/14670. This test should be enabled when that support is added back."
)
@pytest.mark.asyncio
async def test_app_service_2019_08_01_tenant_id():
access_token = "****"
expires_on = 42
endpoint = "http://localhost:42/token"
secret = "expected-secret"
scope = "scope"
async def send(request, **_):
assert request.url.startswith(endpoint)
assert request.method == "GET"
assert request.headers["X-IDENTITY-HEADER"] == secret
assert request.headers["User-Agent"] == USER_AGENT
assert request.query["api-version"] == "2019-08-01"
assert request.query["resource"] == scope
return mock_response(
json_payload={
"access_token": access_token,
"expires_on": str(expires_on),
"resource": scope,
"token_type": "Bearer",
}
)
# when configuration for both API versions is present, the credential should prefer the most recent
for environment in [
{EnvironmentVariables.IDENTITY_ENDPOINT: endpoint, EnvironmentVariables.IDENTITY_HEADER: secret},
{
EnvironmentVariables.IDENTITY_ENDPOINT: endpoint,
EnvironmentVariables.IDENTITY_HEADER: secret,
EnvironmentVariables.MSI_ENDPOINT: endpoint,
EnvironmentVariables.MSI_SECRET: secret,
},
]:
with mock.patch.dict("os.environ", environment, clear=True):
token = await ManagedIdentityCredential(transport=mock.Mock(send=send)).get_token(scope, tenant_id="tenant_id")
assert token.token == access_token
assert token.expires_on == expires_on
@pytest.mark.asyncio
async def test_app_service_2017_09_01():
"""test parsing of App Service MSI 2017-09-01's eccentric platform-dependent expires_on strings"""
access_token = "****"
expires_on = 42
expected_token = AccessToken(access_token, expires_on)
url = "http://localhost:42/token"
secret = "expected-secret"
scope = "scope"
transport = async_validating_transport(
requests=[
Request(
url,
method="GET",
required_headers={"secret": secret, "User-Agent": USER_AGENT},
required_params={"api-version": "2017-09-01", "resource": scope},
)
]
* 2,
responses=[
mock_response(
json_payload={
"access_token": access_token,
"expires_on": "01/01/1970 00:00:{} +00:00".format(expires_on), # linux format
"resource": scope,
"token_type": "Bearer",
}
),
mock_response(
json_payload={
"access_token": access_token,
"expires_on": "1/1/1970 12:00:{} AM +00:00".format(expires_on), # windows format
"resource": scope,
"token_type": "Bearer",
}
),
],
)
with mock.patch.dict(
MANAGED_IDENTITY_ENVIRON,
{EnvironmentVariables.MSI_ENDPOINT: url, EnvironmentVariables.MSI_SECRET: secret},
clear=True,
):
token = await ManagedIdentityCredential(transport=transport).get_token(scope)
assert token == expected_token
assert token.expires_on == expires_on
token = await ManagedIdentityCredential(transport=transport).get_token(scope)
assert token == expected_token
assert token.expires_on == expires_on
@pytest.mark.asyncio
async def test_app_service_2017_09_01_tenant_id():
access_token = "****"
expires_on = 42
expected_token = AccessToken(access_token, expires_on)
url = "http://localhost:42/token"
secret = "expected-secret"
scope = "scope"
transport = async_validating_transport(
requests=[
Request(
url,
method="GET",
required_headers={"secret": secret, "User-Agent": USER_AGENT},
required_params={"api-version": "2017-09-01", "resource": scope},
)
]
* 2,
responses=[
mock_response(
json_payload={
"access_token": access_token,
"expires_on": "01/01/1970 00:00:{} +00:00".format(expires_on), # linux format
"resource": scope,
"token_type": "Bearer",
}
),
mock_response(
json_payload={
"access_token": access_token,
"expires_on": "1/1/1970 12:00:{} AM +00:00".format(expires_on), # windows format
"resource": scope,
"token_type": "Bearer",
}
),
],
)
with mock.patch.dict(
MANAGED_IDENTITY_ENVIRON,
{EnvironmentVariables.MSI_ENDPOINT: url, EnvironmentVariables.MSI_SECRET: secret},
clear=True,
):
token = await ManagedIdentityCredential(transport=transport).get_token(scope, tenant_id="tenant_id")
assert token == expected_token
assert token.expires_on == expires_on
@pytest.mark.asyncio
async def test_app_service_user_assigned_identity():
"""App Service 2017-09-01: MSI_ENDPOINT, MSI_SECRET set"""
expected_token = "****"
expires_on = 42
client_id = "some-guid"
endpoint = "http://localhost:42/token"
secret = "expected-secret"
scope = "scope"
param_name, param_value = "foo", "bar"
transport = async_validating_transport(
requests=[
Request(
base_url=endpoint,
method="GET",
required_headers={"secret": secret, "User-Agent": USER_AGENT},
required_params={"api-version": "2017-09-01", "clientid": client_id, "resource": scope},
),
Request(
base_url=endpoint,
method="GET",
required_headers={"secret": secret, "User-Agent": USER_AGENT},
required_params={"api-version": "2017-09-01", "resource": scope, param_name: param_value},
),
],
responses=[
mock_response(
json_payload={
"access_token": expected_token,
"expires_on": "01/01/1970 00:00:{} +00:00".format(expires_on),
"resource": scope,
"token_type": "Bearer",
}
)
]
* 2,
)
with mock.patch.dict(
MANAGED_IDENTITY_ENVIRON,
{EnvironmentVariables.MSI_ENDPOINT: endpoint, EnvironmentVariables.MSI_SECRET: secret},
clear=True,
):
credential = ManagedIdentityCredential(client_id=client_id, transport=transport)
token = await credential.get_token(scope)
assert token.token == expected_token
assert token.expires_on == expires_on
credential = ManagedIdentityCredential(
client_id=client_id, transport=transport, identity_config={param_name: param_value}
)
token = await credential.get_token(scope)
assert token.token == expected_token
assert token.expires_on == expires_on
@pytest.mark.asyncio
async def test_client_id_none():
"""the credential should ignore client_id=None"""
expected_access_token = "****"
scope = "scope"
async def send(request, **_):
assert "client_id" not in request.query # IMDS
assert "clientid" not in request.query # App Service 2017-09-01
if request.data:
assert "client_id" not in request.body # Cloud Shell
return mock_response(
json_payload=(build_aad_response(access_token=expected_access_token, expires_on="42", resource=scope))
)
with mock.patch.dict(MANAGED_IDENTITY_ENVIRON, {}, clear=True):
credential = ManagedIdentityCredential(client_id=None, transport=mock.Mock(send=send))
token = await credential.get_token(scope)
assert token.token == expected_access_token
with mock.patch.dict(
MANAGED_IDENTITY_ENVIRON, {EnvironmentVariables.MSI_ENDPOINT: "https://localhost"}, clear=True
):
credential = ManagedIdentityCredential(client_id=None, transport=mock.Mock(send=send))
token = await credential.get_token(scope)
assert token.token == expected_access_token
@pytest.mark.asyncio
async def test_client_id_none_app_service_2017_09_01():
"""The credential should ignore client_id=None.
App Service 2017-09-01 must be tested separately due to its eccentric expires_on format.
"""
expected_access_token = "****"
scope = "scope"
async def send(request, **_):
assert "client_id" not in request.query
assert "clientid" not in request.query
return mock_response(
json_payload=(
build_aad_response(
access_token=expected_access_token, expires_on="01/01/1970 00:00:42 +00:00", resource=scope
)
)
)
with mock.patch.dict(
MANAGED_IDENTITY_ENVIRON,
{EnvironmentVariables.MSI_ENDPOINT: "https://localhost", EnvironmentVariables.MSI_SECRET: "secret"},
clear=True,
):
credential = ManagedIdentityCredential(client_id=None, transport=mock.Mock(send=send))
token = await credential.get_token(scope)
assert token.token == expected_access_token
@pytest.mark.asyncio
async def test_imds():
access_token = "****"
expires_on = 42
expected_token = AccessToken(access_token, expires_on)
scope = "scope"
transport = async_validating_transport(
requests=[
Request(base_url=IMDS_AUTHORITY + IMDS_TOKEN_PATH),
Request(
base_url=IMDS_AUTHORITY + IMDS_TOKEN_PATH,
method="GET",
required_headers={"Metadata": "true", "User-Agent": USER_AGENT},
required_params={"api-version": "2018-02-01", "resource": scope},
),
],
responses=[
# probe receives error response
mock_response(status_code=400, json_payload={"error": "this is an error message"}),
mock_response(
json_payload={
"access_token": access_token,
"expires_in": 42,
"expires_on": expires_on,
"ext_expires_in": 42,
"not_before": int(time.time()),
"resource": scope,
"token_type": "Bearer",
}
),
],
)
# ensure e.g. $MSI_ENDPOINT isn't set, so we get ImdsCredential
with mock.patch.dict("os.environ", clear=True):
token = await ManagedIdentityCredential(transport=transport).get_token(scope)
assert token == expected_token
@pytest.mark.asyncio
async def test_imds_tenant_id():
access_token = "****"
expires_on = 42
expected_token = AccessToken(access_token, expires_on)
scope = "scope"
transport = async_validating_transport(
requests=[
Request(base_url=IMDS_AUTHORITY + IMDS_TOKEN_PATH),
Request(
base_url=IMDS_AUTHORITY + IMDS_TOKEN_PATH,
method="GET",
required_headers={"Metadata": "true", "User-Agent": USER_AGENT},
required_params={"api-version": "2018-02-01", "resource": scope},
),
],
responses=[
# probe receives error response
mock_response(status_code=400, json_payload={"error": "this is an error message"}),
mock_response(
json_payload={
"access_token": access_token,
"expires_in": 42,
"expires_on": expires_on,
"ext_expires_in": 42,
"not_before": int(time.time()),
"resource": scope,
"token_type": "Bearer",
}
),
],
)
# ensure e.g. $MSI_ENDPOINT isn't set, so we get ImdsCredential
with mock.patch.dict("os.environ", clear=True):
token = await ManagedIdentityCredential(transport=transport).get_token(scope, tenant_id="tenant_id")
assert token == expected_token
@pytest.mark.asyncio
async def test_imds_user_assigned_identity():
access_token = "****"
expires_on = 42
expected_token = AccessToken(access_token, expires_on)
scope = "scope"
client_id = "some-guid"
transport = async_validating_transport(
requests=[
Request(base_url=IMDS_AUTHORITY + IMDS_TOKEN_PATH),
Request(
base_url=IMDS_AUTHORITY + IMDS_TOKEN_PATH,
method="GET",
required_headers={"Metadata": "true", "User-Agent": USER_AGENT},
required_params={"api-version": "2018-02-01", "client_id": client_id, "resource": scope},
),
],
responses=[
# probe receives error response
mock_response(status_code=400, json_payload={"error": "this is an error message"}),
mock_response(
json_payload={
"access_token": access_token,
"client_id": client_id,
"expires_in": 42,
"expires_on": expires_on,
"ext_expires_in": 42,
"not_before": int(time.time()),
"resource": scope,
"token_type": "Bearer",
}
),
],
)
# ensure e.g. $MSI_ENDPOINT isn't set, so we get ImdsCredential
with mock.patch.dict("os.environ", clear=True):
token = await ManagedIdentityCredential(client_id=client_id, transport=transport).get_token(scope)
assert token == expected_token
@pytest.mark.asyncio
async def test_service_fabric():
"""Service Fabric 2019-07-01-preview"""
access_token = "****"
expires_on = 42
endpoint = "http://localhost:42/token"
secret = "expected-secret"
thumbprint = "SHA1HEX"
scope = "scope"
async def send(request, **_):
assert request.url.startswith(endpoint)
assert request.method == "GET"
assert request.headers["Secret"] == secret
assert request.query["api-version"] == "2019-07-01-preview"
assert request.query["resource"] == scope
return mock_response(
json_payload={
"access_token": access_token,
"expires_on": str(expires_on),
"resource": scope,
"token_type": "Bearer",
}
)
with mock.patch(
"os.environ",
{
EnvironmentVariables.IDENTITY_ENDPOINT: endpoint,
EnvironmentVariables.IDENTITY_HEADER: secret,
EnvironmentVariables.IDENTITY_SERVER_THUMBPRINT: thumbprint,
},
):
token = await ManagedIdentityCredential(transport=mock.Mock(send=send)).get_token(scope)
assert token.token == access_token
assert token.expires_on == expires_on
@pytest.mark.asyncio
async def test_service_fabric_tenant_id():
access_token = "****"
expires_on = 42
endpoint = "http://localhost:42/token"
secret = "expected-secret"
thumbprint = "SHA1HEX"
scope = "scope"
async def send(request, **_):
assert request.url.startswith(endpoint)
assert request.method == "GET"
assert request.headers["Secret"] == secret
assert request.query["api-version"] == "2019-07-01-preview"
assert request.query["resource"] == scope
return mock_response(
json_payload={
"access_token": access_token,
"expires_on": str(expires_on),
"resource": scope,
"token_type": "Bearer",
}
)
with mock.patch(
"os.environ",
{
EnvironmentVariables.IDENTITY_ENDPOINT: endpoint,
EnvironmentVariables.IDENTITY_HEADER: secret,
EnvironmentVariables.IDENTITY_SERVER_THUMBPRINT: thumbprint,
},
):
token = await ManagedIdentityCredential(transport=mock.Mock(send=send)).get_token(scope, tenant_id="tenant_id")
assert token.token == access_token
assert token.expires_on == expires_on
@pytest.mark.asyncio
async def test_azure_arc(tmpdir):
"""Azure Arc 2019-11-01"""
access_token = "****"
api_version = "2019-11-01"
expires_on = 42
identity_endpoint = "http://localhost:42/token"
imds_endpoint = "http://localhost:42"
scope = "scope"
secret_key = "XXXX"
key_file = tmpdir.mkdir("key").join("key_file.key")
key_file.write(secret_key)
assert key_file.read() == secret_key
key_path = os.path.join(key_file.dirname, key_file.basename)
transport = async_validating_transport(
requests=[
Request(
base_url=identity_endpoint,
method="GET",
required_headers={"Metadata": "true"},
required_params={"api-version": api_version, "resource": scope},
),
Request(
base_url=identity_endpoint,
method="GET",
required_headers={"Metadata": "true", "Authorization": "Basic {}".format(secret_key)},
required_params={"api-version": api_version, "resource": scope},
),
],
responses=[
# first response gives path to authentication key
mock_response(status_code=401, headers={"WWW-Authenticate": "Basic realm={}".format(key_path)}),
mock_response(
json_payload={
"access_token": access_token,
"expires_on": expires_on,
"resource": scope,
"token_type": "Bearer",
}
),
],
)
with mock.patch(
"os.environ",
{EnvironmentVariables.IDENTITY_ENDPOINT: identity_endpoint, EnvironmentVariables.IMDS_ENDPOINT: imds_endpoint},
):
token = await ManagedIdentityCredential(transport=transport).get_token(scope)
assert token.token == access_token
assert token.expires_on == expires_on
@pytest.mark.asyncio
async def test_azure_arc_tenant_id(tmpdir):
access_token = "****"
api_version = "2019-11-01"
expires_on = 42
identity_endpoint = "http://localhost:42/token"
imds_endpoint = "http://localhost:42"
scope = "scope"
secret_key = "XXXX"
key_file = tmpdir.mkdir("key").join("key_file.key")
key_file.write(secret_key)
assert key_file.read() == secret_key
key_path = os.path.join(key_file.dirname, key_file.basename)
transport = async_validating_transport(
requests=[
Request(
base_url=identity_endpoint,
method="GET",
required_headers={"Metadata": "true"},
required_params={"api-version": api_version, "resource": scope},
),
Request(
base_url=identity_endpoint,
method="GET",
required_headers={"Metadata": "true", "Authorization": "Basic {}".format(secret_key)},
required_params={"api-version": api_version, "resource": scope},
),
],
responses=[
# first response gives path to authentication key
mock_response(status_code=401, headers={"WWW-Authenticate": "Basic realm={}".format(key_path)}),
mock_response(
json_payload={
"access_token": access_token,
"expires_on": expires_on,
"resource": scope,
"token_type": "Bearer",
}
),
],
)
with mock.patch(
"os.environ",
{EnvironmentVariables.IDENTITY_ENDPOINT: identity_endpoint, EnvironmentVariables.IMDS_ENDPOINT: imds_endpoint},
):
token = await ManagedIdentityCredential(transport=transport).get_token(scope, tenant_id="tenant_id")
assert token.token == access_token
assert token.expires_on == expires_on
@pytest.mark.asyncio
async def test_azure_arc_client_id():
"""Azure Arc doesn't support user-assigned managed identity"""
with mock.patch(
"os.environ",
{
EnvironmentVariables.IDENTITY_ENDPOINT: "http://localhost:42/token",
EnvironmentVariables.IMDS_ENDPOINT: "http://localhost:42",
},
):
credential = ManagedIdentityCredential(client_id="some-guid")
with pytest.raises(ClientAuthenticationError):
await credential.get_token("scope")
@pytest.mark.asyncio
async def test_token_exchange(tmpdir):
exchange_token = "exchange-token"
token_file = tmpdir.join("token")
token_file.write(exchange_token)
access_token = "***"
authority = "https://localhost"
default_client_id = "default_client_id"
tenant = "tenant_id"
scope = "scope"
success_response = mock_response(
json_payload={
"access_token": access_token,
"expires_in": 3600,
"ext_expires_in": 3600,
"expires_on": int(time.time()) + 3600,
"not_before": int(time.time()),
"resource": scope,
"token_type": "Bearer",
}
)
transport = async_validating_transport(
requests=[
Request(
base_url=authority,
method="POST",
required_data={
"client_assertion": exchange_token,
"client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer",
"client_id": default_client_id,
"grant_type": "client_credentials",
"scope": scope,
},
)
],
responses=[success_response],
)
mock_environ = {
EnvironmentVariables.AZURE_AUTHORITY_HOST: authority,
EnvironmentVariables.AZURE_CLIENT_ID: default_client_id,
EnvironmentVariables.AZURE_TENANT_ID: tenant,
EnvironmentVariables.AZURE_FEDERATED_TOKEN_FILE: token_file.strpath,
}
# credential should default to AZURE_CLIENT_ID
with mock.patch.dict("os.environ", mock_environ, clear=True):
credential = ManagedIdentityCredential(transport=transport)
token = await credential.get_token(scope)
assert token.token == access_token
# client_id kwarg should override AZURE_CLIENT_ID
nondefault_client_id = "non" + default_client_id
transport = async_validating_transport(
requests=[
Request(
base_url=authority,
method="POST",
required_data={
"client_assertion": exchange_token,
"client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer",
"client_id": nondefault_client_id,
"grant_type": "client_credentials",
"scope": scope,
},
)
],
responses=[success_response],
)
with mock.patch.dict("os.environ", mock_environ, clear=True):
credential = ManagedIdentityCredential(client_id=nondefault_client_id, transport=transport)
token = await credential.get_token(scope)
assert token.token == access_token
# AZURE_CLIENT_ID may not have a value, in which case client_id is required
transport = async_validating_transport(
requests=[
Request(
base_url=authority,
method="POST",
required_data={
"client_assertion": exchange_token,
"client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer",
"client_id": nondefault_client_id,
"grant_type": "client_credentials",
"scope": scope,
},
)
],
responses=[success_response],
)
with mock.patch.dict(
"os.environ",
{
EnvironmentVariables.AZURE_AUTHORITY_HOST: authority,
EnvironmentVariables.AZURE_TENANT_ID: tenant,
EnvironmentVariables.AZURE_FEDERATED_TOKEN_FILE: token_file.strpath,
},
clear=True,
):
with pytest.raises(ValueError):
ManagedIdentityCredential()
credential = ManagedIdentityCredential(client_id=nondefault_client_id, transport=transport)
token = await credential.get_token(scope)
assert token.token == access_token
@pytest.mark.asyncio
async def test_token_exchange_tenant_id(tmpdir):
exchange_token = "exchange-token"
token_file = tmpdir.join("token")
token_file.write(exchange_token)
access_token = "***"
authority = "https://localhost"
default_client_id = "default_client_id"
tenant = "tenant_id"
scope = "scope"
success_response = mock_response(
json_payload={
"access_token": access_token,
"expires_in": 3600,
"ext_expires_in": 3600,
"expires_on": int(time.time()) + 3600,
"not_before": int(time.time()),
"resource": scope,
"token_type": "Bearer",
}
)
transport = async_validating_transport(
requests=[
Request(
base_url=authority,
method="POST",
required_data={
"client_assertion": exchange_token,
"client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer",
"client_id": default_client_id,
"grant_type": "client_credentials",
"scope": scope,
},
)
],
responses=[success_response],
)
mock_environ = {
EnvironmentVariables.AZURE_AUTHORITY_HOST: authority,
EnvironmentVariables.AZURE_CLIENT_ID: default_client_id,
EnvironmentVariables.AZURE_TENANT_ID: tenant,
EnvironmentVariables.AZURE_FEDERATED_TOKEN_FILE: token_file.strpath,
}
# credential should default to AZURE_CLIENT_ID
with mock.patch.dict("os.environ", mock_environ, clear=True):
credential = ManagedIdentityCredential(transport=transport)
token = await credential.get_token(scope, tenant_id="tenant_id")
assert token.token == access_token
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
tests/test___init__.py
|
import os
import shutil
import sys
import tempfile
import unittest
import configbetter
class TestMakedirs(unittest.TestCase):
@classmethod
def setUpClass(cls):
configbetter.sys.platform = sys.platform
cls.tempdir = tempfile.mkdtemp()
if sys.platform in ['linux', 'darwin']:
configbetter.os.environ['HOME'] = cls.tempdir
elif sys.platform == 'win32':
configbetter.os.environ['APPDATA'] = cls.tempdir
if 'XDG_DATA_HOME' in configbetter.os.environ:
del configbetter.os.environ['XDG_DATA_HOME']
if 'XDG_CONFIG_HOME' in configbetter.os.environ:
del configbetter.os.environ['XDG_CONFIG_HOME']
if 'XDG_CACHE_HOME' in configbetter.os.environ:
del configbetter.os.environ['XDG_CACHE_HOME']
cls.conf = configbetter.Config('notarealapp')
cls.conf.makedirs()
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tempdir)
def test_data_dir(self):
self.assertTrue(os.path.exists(self.conf.data))
def test_config_dir(self):
self.assertTrue(os.path.exists(self.conf.config))
def test_cache_dir(self):
self.assertTrue(os.path.exists(self.conf.cache))
class TestRmdirs(unittest.TestCase):
@classmethod
def setUpClass(cls):
configbetter.sys.platform = 'win32'
cls.tempdir = tempfile.mkdtemp()
if sys.platform in ['linux', 'darwin']:
configbetter.os.environ['HOME'] = cls.tempdir
elif sys.platform == 'win32':
configbetter.os.environ['APPDATA'] = cls.tempdir
if 'XDG_DATA_HOME' in configbetter.os.environ:
del configbetter.os.environ['XDG_DATA_HOME']
if 'XDG_CONFIG_HOME' in configbetter.os.environ:
del configbetter.os.environ['XDG_CONFIG_HOME']
if 'XDG_CACHE_HOME' in configbetter.os.environ:
del configbetter.os.environ['XDG_CACHE_HOME']
cls.conf = configbetter.Config('notarealapp')
cls.conf.makedirs()
cls.conf.rmdirs()
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tempdir)
def test_data_rmdir(self):
self.assertFalse(os.path.exists(self.conf.data))
def test_config_rmdir(self):
self.assertFalse(os.path.exists(self.conf.config))
def test_cache_rmdir(self):
self.assertFalse(os.path.exists(self.conf.cache))
def test_windows_edgecase_rmdir(self):
self.assertFalse(os.path.exists(os.path.join(self.tempdir, 'notarealapp')))
class TestWindowsNoXDG(unittest.TestCase):
@classmethod
def setUpClass(cls):
configbetter.sys.platform = 'win32'
cls.tempdir = tempfile.mkdtemp()
configbetter.os.environ['APPDATA'] = cls.tempdir
cls.conf = configbetter.Config('fakeapp')
@classmethod
def tearDownClass(cls):
os.rmdir(cls.tempdir)
def test_noxdg_data(self):
checkdata = os.path.join(self.tempdir, 'fakeapp', 'Data')
self.assertEqual(self.conf.data, checkdata)
def test_noxdg_config(self):
checkconfig = os.path.join(self.tempdir, 'fakeapp', 'Config')
self.assertEqual(self.conf.config, checkconfig)
def test_noxdg_cache(self):
checkcache = os.path.join(self.tempdir, 'fakeapp', 'Cache')
self.assertEqual(self.conf.cache, checkcache)
class TestLinuxNoXDG(unittest.TestCase):
@classmethod
def setUpClass(cls):
configbetter.sys.platform = 'linux'
cls.tempdir = tempfile.mkdtemp()
configbetter.os.environ['HOME'] = cls.tempdir
cls.conf = configbetter.Config('fakeapp')
@classmethod
def tearDownClass(cls):
os.rmdir(cls.tempdir)
def test_noxdg_data(self):
checkdata = os.path.join(self.tempdir, '.local', 'share', 'fakeapp')
self.assertEqual(self.conf.data, checkdata)
def test_noxdg_config(self):
checkconfig = os.path.join(self.tempdir, '.config', 'fakeapp')
self.assertEqual(self.conf.config, checkconfig)
def test_noxdg_cache(self):
checkcache = os.path.join(self.tempdir, '.cache', 'fakeapp')
self.assertEqual(self.conf.cache, checkcache)
class TestMacNoXDG(unittest.TestCase):
@classmethod
def setUpClass(cls):
configbetter.sys.platform = 'darwin'
cls.tempdir = tempfile.mkdtemp()
configbetter.os.environ['HOME'] = cls.tempdir
cls.conf = configbetter.Config('fakeapp')
@classmethod
def tearDownClass(cls):
os.rmdir(cls.tempdir)
def test_noxdg_data(self):
checkdata = os.path.join(self.tempdir, 'Library', 'fakeapp')
self.assertEqual(self.conf.data, checkdata)
def test_noxdg_config(self):
checkconfig = os.path.join(self.tempdir, 'Library', 'Preferences', 'fakeapp')
self.assertEqual(self.conf.config, checkconfig)
def test_noxdg_cache(self):
checkcache = os.path.join(self.tempdir, 'Library', 'Caches', 'fakeapp')
self.assertEqual(self.conf.cache, checkcache)
class TestMacForceUnix(unittest.TestCase):
@classmethod
def setUpClass(cls):
configbetter.sys.platform = 'darwin'
cls.tempdir = tempfile.mkdtemp()
configbetter.os.environ['HOME'] = cls.tempdir
cls.conf = configbetter.Config('fakeapp', force_unix = True)
@classmethod
def tearDownClass(cls):
os.rmdir(cls.tempdir)
def test_noxdg_data(self):
checkdata = os.path.join(self.tempdir, '.local', 'share', 'fakeapp')
self.assertEqual(self.conf.data, checkdata)
def test_noxdg_config(self):
checkconfig = os.path.join(self.tempdir, '.config', 'fakeapp')
self.assertEqual(self.conf.config, checkconfig)
def test_noxdg_cache(self):
checkcache = os.path.join(self.tempdir, '.cache', 'fakeapp')
self.assertEqual(self.conf.cache, checkcache)
class TestXDG(unittest.TestCase):
@classmethod
def setUpClass(cls):
configbetter.sys.platform = 'win32'
cls.datatempdir = tempfile.mkdtemp()
cls.configtempdir = tempfile.mkdtemp()
cls.cachetempdir = tempfile.mkdtemp()
configbetter.os.environ['XDG_DATA_HOME'] = cls.datatempdir
configbetter.os.environ['XDG_CONFIG_HOME'] = cls.configtempdir
configbetter.os.environ['XDG_CACHE_HOME'] = cls.cachetempdir
cls.conf = configbetter.Config('fakeapp')
@classmethod
def tearDownClass(cls):
os.rmdir(cls.datatempdir)
os.rmdir(cls.configtempdir)
os.rmdir(cls.cachetempdir)
def test_xdg_data(self):
checkdata = os.path.join(self.datatempdir, 'fakeapp')
self.assertEqual(self.conf.data, checkdata)
def test_xdg_config(self):
checkconfig = os.path.join(self.configtempdir, 'fakeapp')
self.assertEqual(self.conf.config, checkconfig)
def test_xdg_cache(self):
checkcache = os.path.join(self.cachetempdir, 'fakeapp')
self.assertEqual(self.conf.cache, checkcache)
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[
"XDG_DATA_HOME",
"APPDATA",
"XDG_CACHE_HOME",
"HOME",
"XDG_CONFIG_HOME"
] |
[]
|
["XDG_DATA_HOME", "APPDATA", "XDG_CACHE_HOME", "HOME", "XDG_CONFIG_HOME"]
|
python
| 5 | 0 | |
pkg/status/status.go
|
// Copyright Contributors to the Open Cluster Management project
package status
import (
"context"
"errors"
"os"
"time"
"github.com/go-kit/kit/log"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/stolostron/metrics-collector/pkg/logger"
oav1beta1 "github.com/stolostron/multicluster-observability-operator/api/v1beta1"
)
const (
name = "observability-addon"
namespace = "open-cluster-management-addon-observability"
)
type StatusReport struct {
statusClient client.Client
logger log.Logger
}
func New(logger log.Logger) (*StatusReport, error) {
testMode := os.Getenv("UNIT_TEST") != ""
standaloneMode := os.Getenv("STANDALONE") == "true"
var kubeClient client.Client
if testMode {
kubeClient = fake.NewFakeClient()
} else if standaloneMode {
kubeClient = nil
} else {
config, err := clientcmd.BuildConfigFromFlags("", "")
if err != nil {
return nil, errors.New("Failed to create the kube config")
}
s := scheme.Scheme
if err := oav1beta1.AddToScheme(s); err != nil {
return nil, errors.New("Failed to add observabilityaddon into scheme")
}
kubeClient, err = client.New(config, client.Options{Scheme: s})
if err != nil {
return nil, errors.New("Failed to create the kube client")
}
}
return &StatusReport{
statusClient: kubeClient,
logger: log.With(logger, "component", "statusclient"),
}, nil
}
func (s *StatusReport) UpdateStatus(t string, r string, m string) error {
if s.statusClient == nil {
return nil
}
addon := &oav1beta1.ObservabilityAddon{}
err := s.statusClient.Get(context.TODO(), types.NamespacedName{
Name: name,
Namespace: namespace,
}, addon)
if err != nil {
logger.Log(s.logger, logger.Error, "err", err)
return err
}
update := false
found := false
conditions := []oav1beta1.StatusCondition{}
lastestC := oav1beta1.StatusCondition{}
for _, c := range addon.Status.Conditions {
if c.Status == metav1.ConditionTrue {
if c.Type != t {
c.Status = metav1.ConditionFalse
} else {
found = true
if c.Reason != r || c.Message != m {
c.Reason = r
c.Message = m
c.LastTransitionTime = metav1.NewTime(time.Now())
update = true
lastestC = c
continue
}
}
} else {
if c.Type == t {
found = true
c.Status = metav1.ConditionTrue
c.Reason = r
c.Message = m
c.LastTransitionTime = metav1.NewTime(time.Now())
update = true
lastestC = c
continue
}
}
conditions = append(conditions, c)
}
if update {
conditions = append(conditions, lastestC)
}
if !found {
conditions = append(conditions, oav1beta1.StatusCondition{
Type: t,
Status: metav1.ConditionTrue,
Reason: r,
Message: m,
LastTransitionTime: metav1.NewTime(time.Now()),
})
update = true
}
if update {
addon.Status.Conditions = conditions
err = s.statusClient.Status().Update(context.TODO(), addon)
if err != nil {
logger.Log(s.logger, logger.Error, "err", err)
}
return err
}
return nil
}
|
[
"\"UNIT_TEST\"",
"\"STANDALONE\""
] |
[] |
[
"STANDALONE",
"UNIT_TEST"
] |
[]
|
["STANDALONE", "UNIT_TEST"]
|
go
| 2 | 0 | |
ssht00ls/classes/utils/__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# imports.
from ssht00ls.classes.config import *
import os, sys, requests, ast, json, pathlib, glob, string, getpass, django
# save config file safely.
def save_config_safely(backup=True, __loader__=None, __keyboard_interrupt__=None):
if backup: save_config_backup_safely()
try:
CONFIG.save()
except KeyboardInterrupt as e:
if __loader__ == None:
__loader__ = dev0s.console.Loader("&RED&Do not interrupt!&END& Saving ssht00ls config file.")
return save_config_safely(backup=False, __loader__=__loader__, __keyboard_interrupt__=e)
if __loader__ != None: __loader__.stop()
if __keyboard_interrupt__ != None:
raise KeyboardInterrupt(__keyboard_interrupt__)
# save backup of config file safely.
def save_config_backup_safely(__loader__=None):
path = DATABASE.join(".backups")
if not Files.exists(path): Files.create(path, directory=True)
path += "/config/"
if not Files.exists(path): Files.create(path, directory=True)
path += f"/{Date().date}"
try:
Files.save(path, CONFIG.dictionary, format="json")
except KeyboardInterrupt as e:
if __loader__ == None:
__loader__ = dev0s.console.Loader("&RED&Do not interrupt!&END& Saving backup of ssht00ls config file.")
return save_config_backup_safely(__loader__=__loader__)
if __loader__ != None: __loader__.stop()
fp = FilePath(gfp.base(path))
if fp.size(format=int, mode="mb") >= 5:
fp.delete(forced=True)
fp.create(directory=True)
# check / start the ssh agent (due to circular import keep it over here for classes: [aliases]).
def ssh_agent():
"""
SSH_AUTH_SOCK = os.environ.get("SSH_AUTH_SOCK")
SSH_AGENT_PID = os.environ.get("SSH_AGENT_PID")
"""
"""
try:
output = utils.__execute__([f"ssh-add", "-D"])
except: a=1
try:
output = utils.__execute__([f"ssh-add", "-k"])
except: a=1
"""
# version 2.
if len(dev0s.code.processes(includes="ssh-agent").processes) >= 10:
dev0s.code.execute(f"pkill -9 -f ssh-agent")
try:
output = dev0s.code.execute(f"ssh-agent")
if not output.success: output.crash()
output = str(output)
try:
SSH_AUTH_SOCK = output.split("SSH_AUTH_SOCK=")[1].split(";")[0]
os.environ["SSH_AUTH_SOCK"] = SSH_AUTH_SOCK
except: return None
try:
SSH_AGENT_PID = output.split("SSH_AGENT_PID=")[1].split(";")[0]
os.environ["SSH_AGENT_PID"] = SSH_AGENT_PID
except: return None
except: return None
os.environ["SSH_AUTH_SOCK"] = SSH_AUTH_SOCK
os.environ["SSH_AGENT_PID"] = SSH_AGENT_PID
# converting variables.
def __array_to_string__(array, joiner=" "):
string = ""
for i in array:
if string == "": string = str(i)
else: string += joiner+str(i)
return string
def __string_to_boolean__(string):
if string in ["true", "True", True]: return True
elif string in ["false", "False", False]: return False
else: raise ValueError(f"Could not convert string [{string}] to a boolean.")
def __string_to_bash__(string):
a = string.replace('(','\(').replace(')','\)').replace("'","\'").replace(" ","\ ").replace("$","\$").replace("!","\!").replace("?","\?").replace("@","\@").replace("$","\$").replace("%","\%").replace("^","\^").replace("&","\&").replace("*","\*").replace("'","\'").replace('"','\"')
return a
# generation.
def __generate_pincode__(characters=6, charset=string.digits):
return ''.join(random.choice(charset) for x in range(characters))
#
# execute a shell command.
def __execute__(
# the command in array.
command=[],
# wait till the command is pinished.
wait=False,
# the commands timeout, [timeout] overwrites parameter [wait].
timeout=None,
# the commands output return format: string / array.
return_format="string",
# the subprocess.Popen.shell argument.
shell=False,
# pass a input string to the process.
input=None,
):
def __convert__(byte_array, return_format=return_format):
if return_format == "string":
lines = ""
for line in byte_array:
lines += line.decode()
return lines
elif return_format == "array":
lines = []
for line in byte_array:
lines.append(line.decode().replace("\n","").replace("\\n",""))
return lines
# create process.
if isinstance(command, str): command = command.split(' ')
p = subprocess.Popen(
command,
shell=shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,)
# send input.
if input != None:
if isinstance(input, list):
for s in input:
p.stdin.write(f'{s}\n'.encode())
elif isinstance(input, str):
p.stdin.write(f'{input}\n'.encode())
else: raise ValueError("Invalid format for parameter [input] required format: [string, array].")
p.stdin.flush()
# timeout.
if timeout != None:
time.sleep(timeout)
p.terminate()
# await.
elif wait:
p.wait()
# get output.
output = __convert__(p.stdout.readlines(), return_format=return_format)
if return_format == "string" and output == "":
output = __convert__(p.stderr.readlines(), return_format=return_format)
elif return_format == "array" and output == []:
output = __convert__(p.stderr.readlines(), return_format=return_format)
return output
# execute a shell script.
def __execute_script__(
# the script in string.
script="",
# wait till the command is pinished.
wait=False,
# the commands timeout, [timeout] overwrites parameter [wait].
timeout=None,
# the commands output return format: string / array.
return_format="string",
# the subprocess.Popen.shell argument.
shell=False,
# pass a input string to the process.
input=None,
):
path = f"/tmp/shell_script.{__generate_pincode__(characters=32)}.sh"
with open(str(path), "w") as file:
file.write(str(script))
os.system(f"chmod +x {path}")
output = __execute__(
command=[f"sh", f"{path}"],
wait=wait,
timeout=timeout,
return_format=return_format,
shell=shell,
input=input,)
os.system(f"rm -fr {path}")
return output
|
[] |
[] |
[
"SSH_AGENT_PID",
"SSH_AUTH_SOCK"
] |
[]
|
["SSH_AGENT_PID", "SSH_AUTH_SOCK"]
|
python
| 2 | 0 | |
daemon/cluster/cluster.go
|
package cluster
import (
"crypto/x509"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/digest"
distreference "github.com/docker/distribution/reference"
apierrors "github.com/docker/docker/api/errors"
apitypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
types "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/convert"
executorpkg "github.com/docker/docker/daemon/cluster/executor"
"github.com/docker/docker/daemon/cluster/executor/container"
"github.com/docker/docker/daemon/logger"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/stdcopy"
"github.com/docker/docker/reference"
"github.com/docker/docker/runconfig"
swarmapi "github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/manager/encryption"
swarmnode "github.com/docker/swarmkit/node"
"github.com/docker/swarmkit/protobuf/ptypes"
"github.com/pkg/errors"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
const swarmDirName = "swarm"
const controlSocket = "control.sock"
const swarmConnectTimeout = 20 * time.Second
const swarmRequestTimeout = 20 * time.Second
const stateFile = "docker-state.json"
const defaultAddr = "0.0.0.0:2377"
const (
initialReconnectDelay = 100 * time.Millisecond
maxReconnectDelay = 30 * time.Second
contextPrefix = "com.docker.swarm"
)
// ErrNoSwarm is returned on leaving a cluster that was never initialized
var ErrNoSwarm = fmt.Errorf("This node is not part of a swarm")
// ErrSwarmExists is returned on initialize or join request for a cluster that has already been activated
var ErrSwarmExists = fmt.Errorf("This node is already part of a swarm. Use \"docker swarm leave\" to leave this swarm and join another one.")
// ErrPendingSwarmExists is returned on initialize or join request for a cluster that is already processing a similar request but has not succeeded yet.
var ErrPendingSwarmExists = fmt.Errorf("This node is processing an existing join request that has not succeeded yet. Use \"docker swarm leave\" to cancel the current request.")
// ErrSwarmJoinTimeoutReached is returned when cluster join could not complete before timeout was reached.
var ErrSwarmJoinTimeoutReached = fmt.Errorf("Timeout was reached before node was joined. The attempt to join the swarm will continue in the background. Use the \"docker info\" command to see the current swarm status of your node.")
// ErrSwarmLocked is returned if the swarm is encrypted and needs a key to unlock it.
var ErrSwarmLocked = fmt.Errorf("Swarm is encrypted and needs to be unlocked before it can be used. Please use \"docker swarm unlock\" to unlock it.")
// ErrSwarmCertificatesExpired is returned if docker was not started for the whole validity period and they had no chance to renew automatically.
var ErrSwarmCertificatesExpired = errors.New("Swarm certificates have expired. To replace them, leave the swarm and join again.")
// NetworkSubnetsProvider exposes functions for retrieving the subnets
// of networks managed by Docker, so they can be filtered.
type NetworkSubnetsProvider interface {
V4Subnets() []net.IPNet
V6Subnets() []net.IPNet
}
// Config provides values for Cluster.
type Config struct {
Root string
Name string
Backend executorpkg.Backend
NetworkSubnetsProvider NetworkSubnetsProvider
// DefaultAdvertiseAddr is the default host/IP or network interface to use
// if no AdvertiseAddr value is specified.
DefaultAdvertiseAddr string
// path to store runtime state, such as the swarm control socket
RuntimeRoot string
}
// Cluster provides capabilities to participate in a cluster as a worker or a
// manager.
type Cluster struct {
sync.RWMutex
*node
root string
runtimeRoot string
config Config
configEvent chan struct{} // todo: make this array and goroutine safe
actualLocalAddr string // after resolution, not persisted
stop bool
err error
cancelDelay func()
attachers map[string]*attacher
locked bool
lastNodeConfig *nodeStartConfig
}
// attacher manages the in-memory attachment state of a container
// attachment to a global scope network managed by swarm manager. It
// helps in identifying the attachment ID via the taskID and the
// corresponding attachment configuration obtained from the manager.
type attacher struct {
taskID string
config *network.NetworkingConfig
attachWaitCh chan *network.NetworkingConfig
attachCompleteCh chan struct{}
detachWaitCh chan struct{}
}
type node struct {
*swarmnode.Node
done chan struct{}
ready bool
conn *grpc.ClientConn
client swarmapi.ControlClient
logs swarmapi.LogsClient
reconnectDelay time.Duration
config nodeStartConfig
}
// nodeStartConfig holds configuration needed to start a new node. Exported
// fields of this structure are saved to disk in json. Unexported fields
// contain data that shouldn't be persisted between daemon reloads.
type nodeStartConfig struct {
// LocalAddr is this machine's local IP or hostname, if specified.
LocalAddr string
// RemoteAddr is the address that was given to "swarm join". It is used
// to find LocalAddr if necessary.
RemoteAddr string
// ListenAddr is the address we bind to, including a port.
ListenAddr string
// AdvertiseAddr is the address other nodes should connect to,
// including a port.
AdvertiseAddr string
joinAddr string
forceNewCluster bool
joinToken string
lockKey []byte
autolock bool
}
// New creates a new Cluster instance using provided config.
func New(config Config) (*Cluster, error) {
root := filepath.Join(config.Root, swarmDirName)
if err := os.MkdirAll(root, 0700); err != nil {
return nil, err
}
if config.RuntimeRoot == "" {
config.RuntimeRoot = root
}
if err := os.MkdirAll(config.RuntimeRoot, 0700); err != nil {
return nil, err
}
c := &Cluster{
root: root,
config: config,
configEvent: make(chan struct{}, 10),
runtimeRoot: config.RuntimeRoot,
attachers: make(map[string]*attacher),
}
nodeConfig, err := c.loadState()
if err != nil {
if os.IsNotExist(err) {
return c, nil
}
return nil, err
}
n, err := c.startNewNode(*nodeConfig)
if err != nil {
return nil, err
}
select {
case <-time.After(swarmConnectTimeout):
logrus.Error("swarm component could not be started before timeout was reached")
case <-n.Ready():
case <-n.done:
if errors.Cause(c.err) == ErrSwarmLocked {
return c, nil
}
if err, ok := errors.Cause(c.err).(x509.CertificateInvalidError); ok && err.Reason == x509.Expired {
c.err = ErrSwarmCertificatesExpired
return c, nil
}
return nil, fmt.Errorf("swarm component could not be started: %v", c.err)
}
go c.reconnectOnFailure(n)
return c, nil
}
func (c *Cluster) loadState() (*nodeStartConfig, error) {
dt, err := ioutil.ReadFile(filepath.Join(c.root, stateFile))
if err != nil {
return nil, err
}
// missing certificate means no actual state to restore from
if _, err := os.Stat(filepath.Join(c.root, "certificates/swarm-node.crt")); err != nil {
if os.IsNotExist(err) {
c.clearState()
}
return nil, err
}
var st nodeStartConfig
if err := json.Unmarshal(dt, &st); err != nil {
return nil, err
}
return &st, nil
}
func (c *Cluster) saveState(config nodeStartConfig) error {
dt, err := json.Marshal(config)
if err != nil {
return err
}
return ioutils.AtomicWriteFile(filepath.Join(c.root, stateFile), dt, 0600)
}
func (c *Cluster) reconnectOnFailure(n *node) {
for {
<-n.done
c.Lock()
if c.stop || c.node != nil {
c.Unlock()
return
}
n.reconnectDelay *= 2
if n.reconnectDelay > maxReconnectDelay {
n.reconnectDelay = maxReconnectDelay
}
logrus.Warnf("Restarting swarm in %.2f seconds", n.reconnectDelay.Seconds())
delayCtx, cancel := context.WithTimeout(context.Background(), n.reconnectDelay)
c.cancelDelay = cancel
c.Unlock()
<-delayCtx.Done()
if delayCtx.Err() != context.DeadlineExceeded {
return
}
c.Lock()
if c.node != nil {
c.Unlock()
return
}
var err error
config := n.config
config.RemoteAddr = c.getRemoteAddress()
config.joinAddr = config.RemoteAddr
n, err = c.startNewNode(config)
if err != nil {
c.err = err
close(n.done)
}
c.Unlock()
}
}
func (c *Cluster) startNewNode(conf nodeStartConfig) (*node, error) {
if err := c.config.Backend.IsSwarmCompatible(); err != nil {
return nil, err
}
actualLocalAddr := conf.LocalAddr
if actualLocalAddr == "" {
// If localAddr was not specified, resolve it automatically
// based on the route to joinAddr. localAddr can only be left
// empty on "join".
listenHost, _, err := net.SplitHostPort(conf.ListenAddr)
if err != nil {
return nil, fmt.Errorf("could not parse listen address: %v", err)
}
listenAddrIP := net.ParseIP(listenHost)
if listenAddrIP == nil || !listenAddrIP.IsUnspecified() {
actualLocalAddr = listenHost
} else {
if conf.RemoteAddr == "" {
// Should never happen except using swarms created by
// old versions that didn't save remoteAddr.
conf.RemoteAddr = "8.8.8.8:53"
}
conn, err := net.Dial("udp", conf.RemoteAddr)
if err != nil {
return nil, fmt.Errorf("could not find local IP address: %v", err)
}
localHostPort := conn.LocalAddr().String()
actualLocalAddr, _, _ = net.SplitHostPort(localHostPort)
conn.Close()
}
}
var control string
if runtime.GOOS == "windows" {
control = `\\.\pipe\` + controlSocket
} else {
control = filepath.Join(c.runtimeRoot, controlSocket)
}
c.node = nil
c.cancelDelay = nil
c.stop = false
n, err := swarmnode.New(&swarmnode.Config{
Hostname: c.config.Name,
ForceNewCluster: conf.forceNewCluster,
ListenControlAPI: control,
ListenRemoteAPI: conf.ListenAddr,
AdvertiseRemoteAPI: conf.AdvertiseAddr,
JoinAddr: conf.joinAddr,
StateDir: c.root,
JoinToken: conf.joinToken,
Executor: container.NewExecutor(c.config.Backend),
HeartbeatTick: 1,
ElectionTick: 3,
UnlockKey: conf.lockKey,
AutoLockManagers: conf.autolock,
PluginGetter: c.config.Backend.PluginGetter(),
})
if err != nil {
return nil, err
}
ctx := context.Background()
if err := n.Start(ctx); err != nil {
return nil, err
}
node := &node{
Node: n,
done: make(chan struct{}),
reconnectDelay: initialReconnectDelay,
config: conf,
}
c.node = node
c.actualLocalAddr = actualLocalAddr // not saved
c.saveState(conf)
c.config.Backend.DaemonJoinsCluster(c)
go func() {
err := detectLockedError(n.Err(ctx))
if err != nil {
logrus.Errorf("cluster exited with error: %v", err)
}
c.Lock()
c.node = nil
c.err = err
if errors.Cause(err) == ErrSwarmLocked {
c.locked = true
confClone := conf
c.lastNodeConfig = &confClone
}
c.Unlock()
close(node.done)
}()
go func() {
select {
case <-n.Ready():
c.Lock()
node.ready = true
c.err = nil
c.Unlock()
case <-ctx.Done():
}
c.configEvent <- struct{}{}
}()
go func() {
for conn := range n.ListenControlSocket(ctx) {
c.Lock()
if node.conn != conn {
if conn == nil {
node.client = nil
node.logs = nil
} else {
node.client = swarmapi.NewControlClient(conn)
node.logs = swarmapi.NewLogsClient(conn)
}
}
node.conn = conn
c.Unlock()
c.configEvent <- struct{}{}
}
}()
return node, nil
}
// Init initializes new cluster from user provided request.
func (c *Cluster) Init(req types.InitRequest) (string, error) {
c.Lock()
if c.swarmExists() {
if !req.ForceNewCluster {
c.Unlock()
return "", ErrSwarmExists
}
if err := c.stopNode(); err != nil {
c.Unlock()
return "", err
}
}
if err := validateAndSanitizeInitRequest(&req); err != nil {
c.Unlock()
return "", apierrors.NewBadRequestError(err)
}
listenHost, listenPort, err := resolveListenAddr(req.ListenAddr)
if err != nil {
c.Unlock()
return "", err
}
advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort)
if err != nil {
c.Unlock()
return "", err
}
localAddr := listenHost
// If the local address is undetermined, the advertise address
// will be used as local address, if it belongs to this system.
// If the advertise address is not local, then we try to find
// a system address to use as local address. If this fails,
// we give up and ask user to pass the listen address.
if net.ParseIP(localAddr).IsUnspecified() {
advertiseIP := net.ParseIP(advertiseHost)
found := false
for _, systemIP := range listSystemIPs() {
if systemIP.Equal(advertiseIP) {
localAddr = advertiseIP.String()
found = true
break
}
}
if !found {
ip, err := c.resolveSystemAddr()
if err != nil {
c.Unlock()
logrus.Warnf("Could not find a local address: %v", err)
return "", errMustSpecifyListenAddr
}
localAddr = ip.String()
}
}
// todo: check current state existing
n, err := c.startNewNode(nodeStartConfig{
forceNewCluster: req.ForceNewCluster,
autolock: req.AutoLockManagers,
LocalAddr: localAddr,
ListenAddr: net.JoinHostPort(listenHost, listenPort),
AdvertiseAddr: net.JoinHostPort(advertiseHost, advertisePort),
})
if err != nil {
c.Unlock()
return "", err
}
c.Unlock()
select {
case <-n.Ready():
if err := initClusterSpec(n, req.Spec); err != nil {
return "", err
}
go c.reconnectOnFailure(n)
return n.NodeID(), nil
case <-n.done:
c.RLock()
defer c.RUnlock()
if !req.ForceNewCluster { // if failure on first attempt don't keep state
if err := c.clearState(); err != nil {
return "", err
}
}
return "", c.err
}
}
// Join makes current Cluster part of an existing swarm cluster.
func (c *Cluster) Join(req types.JoinRequest) error {
c.Lock()
if c.swarmExists() {
c.Unlock()
return ErrSwarmExists
}
if err := validateAndSanitizeJoinRequest(&req); err != nil {
c.Unlock()
return apierrors.NewBadRequestError(err)
}
listenHost, listenPort, err := resolveListenAddr(req.ListenAddr)
if err != nil {
c.Unlock()
return err
}
var advertiseAddr string
if req.AdvertiseAddr != "" {
advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort)
// For joining, we don't need to provide an advertise address,
// since the remote side can detect it.
if err == nil {
advertiseAddr = net.JoinHostPort(advertiseHost, advertisePort)
}
}
// todo: check current state existing
n, err := c.startNewNode(nodeStartConfig{
RemoteAddr: req.RemoteAddrs[0],
ListenAddr: net.JoinHostPort(listenHost, listenPort),
AdvertiseAddr: advertiseAddr,
joinAddr: req.RemoteAddrs[0],
joinToken: req.JoinToken,
})
if err != nil {
c.Unlock()
return err
}
c.Unlock()
select {
case <-time.After(swarmConnectTimeout):
// attempt to connect will continue in background, but reconnect only if it didn't fail
go func() {
select {
case <-n.Ready():
c.reconnectOnFailure(n)
case <-n.done:
logrus.Errorf("failed to join the cluster: %+v", c.err)
}
}()
return ErrSwarmJoinTimeoutReached
case <-n.Ready():
go c.reconnectOnFailure(n)
return nil
case <-n.done:
c.RLock()
defer c.RUnlock()
return c.err
}
}
// GetUnlockKey returns the unlock key for the swarm.
func (c *Cluster) GetUnlockKey() (string, error) {
c.RLock()
defer c.RUnlock()
if !c.isActiveManager() {
return "", c.errNoManager()
}
ctx, cancel := c.getRequestContext()
defer cancel()
client := swarmapi.NewCAClient(c.conn)
r, err := client.GetUnlockKey(ctx, &swarmapi.GetUnlockKeyRequest{})
if err != nil {
return "", err
}
if len(r.UnlockKey) == 0 {
// no key
return "", nil
}
return encryption.HumanReadableKey(r.UnlockKey), nil
}
// UnlockSwarm provides a key to decrypt data that is encrypted at rest.
func (c *Cluster) UnlockSwarm(req types.UnlockRequest) error {
c.RLock()
if !c.isActiveManager() {
if err := c.errNoManager(); err != ErrSwarmLocked {
c.RUnlock()
return err
}
}
if c.node != nil || c.locked != true {
c.RUnlock()
return errors.New("swarm is not locked")
}
c.RUnlock()
key, err := encryption.ParseHumanReadableKey(req.UnlockKey)
if err != nil {
return err
}
c.Lock()
config := *c.lastNodeConfig
config.lockKey = key
n, err := c.startNewNode(config)
if err != nil {
c.Unlock()
return err
}
c.Unlock()
select {
case <-n.Ready():
case <-n.done:
if errors.Cause(c.err) == ErrSwarmLocked {
return errors.New("swarm could not be unlocked: invalid key provided")
}
return fmt.Errorf("swarm component could not be started: %v", c.err)
}
go c.reconnectOnFailure(n)
return nil
}
// stopNode is a helper that stops the active c.node and waits until it has
// shut down. Call while keeping the cluster lock.
func (c *Cluster) stopNode() error {
if c.node == nil {
return nil
}
c.stop = true
if c.cancelDelay != nil {
c.cancelDelay()
c.cancelDelay = nil
}
node := c.node
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
// TODO: can't hold lock on stop because it calls back to network
c.Unlock()
defer c.Lock()
if err := node.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") {
return err
}
<-node.done
return nil
}
func removingManagerCausesLossOfQuorum(reachable, unreachable int) bool {
return reachable-2 <= unreachable
}
func isLastManager(reachable, unreachable int) bool {
return reachable == 1 && unreachable == 0
}
// Leave shuts down Cluster and removes current state.
func (c *Cluster) Leave(force bool) error {
c.Lock()
node := c.node
if node == nil {
if c.locked {
c.locked = false
c.lastNodeConfig = nil
c.Unlock()
} else if c.err == ErrSwarmCertificatesExpired {
c.err = nil
c.Unlock()
} else {
c.Unlock()
return ErrNoSwarm
}
} else {
if node.Manager() != nil && !force {
msg := "You are attempting to leave the swarm on a node that is participating as a manager. "
if c.isActiveManager() {
active, reachable, unreachable, err := c.managerStats()
if err == nil {
if active && removingManagerCausesLossOfQuorum(reachable, unreachable) {
if isLastManager(reachable, unreachable) {
msg += "Removing the last manager erases all current state of the swarm. Use `--force` to ignore this message. "
c.Unlock()
return fmt.Errorf(msg)
}
msg += fmt.Sprintf("Removing this node leaves %v managers out of %v. Without a Raft quorum your swarm will be inaccessible. ", reachable-1, reachable+unreachable)
}
}
} else {
msg += "Doing so may lose the consensus of your cluster. "
}
msg += "The only way to restore a swarm that has lost consensus is to reinitialize it with `--force-new-cluster`. Use `--force` to suppress this message."
c.Unlock()
return fmt.Errorf(msg)
}
if err := c.stopNode(); err != nil {
logrus.Errorf("failed to shut down cluster node: %v", err)
signal.DumpStacks("")
c.Unlock()
return err
}
c.Unlock()
if nodeID := node.NodeID(); nodeID != "" {
nodeContainers, err := c.listContainerForNode(nodeID)
if err != nil {
return err
}
for _, id := range nodeContainers {
if err := c.config.Backend.ContainerRm(id, &apitypes.ContainerRmConfig{ForceRemove: true}); err != nil {
logrus.Errorf("error removing %v: %v", id, err)
}
}
}
}
c.configEvent <- struct{}{}
// todo: cleanup optional?
if err := c.clearState(); err != nil {
return err
}
return nil
}
func (c *Cluster) listContainerForNode(nodeID string) ([]string, error) {
var ids []string
filters := filters.NewArgs()
filters.Add("label", fmt.Sprintf("com.docker.swarm.node.id=%s", nodeID))
containers, err := c.config.Backend.Containers(&apitypes.ContainerListOptions{
Filters: filters,
})
if err != nil {
return []string{}, err
}
for _, c := range containers {
ids = append(ids, c.ID)
}
return ids, nil
}
func (c *Cluster) clearState() error {
// todo: backup this data instead of removing?
if err := os.RemoveAll(c.root); err != nil {
return err
}
if err := os.MkdirAll(c.root, 0700); err != nil {
return err
}
c.config.Backend.DaemonLeavesCluster()
return nil
}
func (c *Cluster) getRequestContext() (context.Context, func()) { // TODO: not needed when requests don't block on qourum lost
return context.WithTimeout(context.Background(), swarmRequestTimeout)
}
// Inspect retrieves the configuration properties of a managed swarm cluster.
func (c *Cluster) Inspect() (types.Swarm, error) {
c.RLock()
defer c.RUnlock()
if !c.isActiveManager() {
return types.Swarm{}, c.errNoManager()
}
ctx, cancel := c.getRequestContext()
defer cancel()
swarm, err := getSwarm(ctx, c.client)
if err != nil {
return types.Swarm{}, err
}
return convert.SwarmFromGRPC(*swarm), nil
}
// Update updates configuration of a managed swarm cluster.
func (c *Cluster) Update(version uint64, spec types.Spec, flags types.UpdateFlags) error {
c.RLock()
defer c.RUnlock()
if !c.isActiveManager() {
return c.errNoManager()
}
ctx, cancel := c.getRequestContext()
defer cancel()
swarm, err := getSwarm(ctx, c.client)
if err != nil {
return err
}
// In update, client should provide the complete spec of the swarm, including
// Name and Labels. If a field is specified with 0 or nil, then the default value
// will be used to swarmkit.
clusterSpec, err := convert.SwarmSpecToGRPC(spec)
if err != nil {
return apierrors.NewBadRequestError(err)
}
_, err = c.client.UpdateCluster(
ctx,
&swarmapi.UpdateClusterRequest{
ClusterID: swarm.ID,
Spec: &clusterSpec,
ClusterVersion: &swarmapi.Version{
Index: version,
},
Rotation: swarmapi.KeyRotation{
WorkerJoinToken: flags.RotateWorkerToken,
ManagerJoinToken: flags.RotateManagerToken,
ManagerUnlockKey: flags.RotateManagerUnlockKey,
},
},
)
return err
}
// IsManager returns true if Cluster is participating as a manager.
func (c *Cluster) IsManager() bool {
c.RLock()
defer c.RUnlock()
return c.isActiveManager()
}
// IsAgent returns true if Cluster is participating as a worker/agent.
func (c *Cluster) IsAgent() bool {
c.RLock()
defer c.RUnlock()
return c.node != nil && c.ready
}
// GetLocalAddress returns the local address.
func (c *Cluster) GetLocalAddress() string {
c.RLock()
defer c.RUnlock()
return c.actualLocalAddr
}
// GetListenAddress returns the listen address.
func (c *Cluster) GetListenAddress() string {
c.RLock()
defer c.RUnlock()
if c.node != nil {
return c.node.config.ListenAddr
}
return ""
}
// GetAdvertiseAddress returns the remotely reachable address of this node.
func (c *Cluster) GetAdvertiseAddress() string {
c.RLock()
defer c.RUnlock()
if c.node != nil && c.node.config.AdvertiseAddr != "" {
advertiseHost, _, _ := net.SplitHostPort(c.node.config.AdvertiseAddr)
return advertiseHost
}
return c.actualLocalAddr
}
// GetRemoteAddress returns a known advertise address of a remote manager if
// available.
// todo: change to array/connect with info
func (c *Cluster) GetRemoteAddress() string {
c.RLock()
defer c.RUnlock()
return c.getRemoteAddress()
}
func (c *Cluster) getRemoteAddress() string {
if c.node == nil {
return ""
}
nodeID := c.node.NodeID()
for _, r := range c.node.Remotes() {
if r.NodeID != nodeID {
return r.Addr
}
}
return ""
}
// ListenClusterEvents returns a channel that receives messages on cluster
// participation changes.
// todo: make cancelable and accessible to multiple callers
func (c *Cluster) ListenClusterEvents() <-chan struct{} {
return c.configEvent
}
// Info returns information about the current cluster state.
func (c *Cluster) Info() types.Info {
info := types.Info{
NodeAddr: c.GetAdvertiseAddress(),
}
c.RLock()
defer c.RUnlock()
if c.node == nil {
info.LocalNodeState = types.LocalNodeStateInactive
if c.cancelDelay != nil {
info.LocalNodeState = types.LocalNodeStateError
}
if c.locked {
info.LocalNodeState = types.LocalNodeStateLocked
} else if c.err == ErrSwarmCertificatesExpired {
info.LocalNodeState = types.LocalNodeStateError
}
} else {
info.LocalNodeState = types.LocalNodeStatePending
if c.ready == true {
info.LocalNodeState = types.LocalNodeStateActive
} else if c.locked {
info.LocalNodeState = types.LocalNodeStateLocked
}
}
if c.err != nil {
info.Error = c.err.Error()
}
ctx, cancel := c.getRequestContext()
defer cancel()
if c.isActiveManager() {
info.ControlAvailable = true
swarm, err := c.Inspect()
if err != nil {
info.Error = err.Error()
}
// Strip JoinTokens
info.Cluster = swarm.ClusterInfo
if r, err := c.client.ListNodes(ctx, &swarmapi.ListNodesRequest{}); err == nil {
info.Nodes = len(r.Nodes)
for _, n := range r.Nodes {
if n.ManagerStatus != nil {
info.Managers = info.Managers + 1
}
}
}
}
if c.node != nil {
for _, r := range c.node.Remotes() {
info.RemoteManagers = append(info.RemoteManagers, types.Peer{NodeID: r.NodeID, Addr: r.Addr})
}
info.NodeID = c.node.NodeID()
}
return info
}
// isActiveManager should not be called without a read lock
func (c *Cluster) isActiveManager() bool {
return c.node != nil && c.conn != nil
}
// swarmExists should not be called without a read lock
func (c *Cluster) swarmExists() bool {
return c.node != nil || c.locked || c.err == ErrSwarmCertificatesExpired
}
// errNoManager returns error describing why manager commands can't be used.
// Call with read lock.
func (c *Cluster) errNoManager() error {
if c.node == nil {
if c.locked {
return ErrSwarmLocked
}
if c.err == ErrSwarmCertificatesExpired {
return ErrSwarmCertificatesExpired
}
return fmt.Errorf("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.")
}
if c.node.Manager() != nil {
return fmt.Errorf("This node is not a swarm manager. Manager is being prepared or has trouble connecting to the cluster.")
}
return fmt.Errorf("This node is not a swarm manager. Worker nodes can't be used to view or modify cluster state. Please run this command on a manager node or promote the current node to a manager.")
}
// GetServices returns all services of a managed swarm cluster.
func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Service, error) {
c.RLock()
defer c.RUnlock()
if !c.isActiveManager() {
return nil, c.errNoManager()
}
filters, err := newListServicesFilters(options.Filters)
if err != nil {
return nil, err
}
ctx, cancel := c.getRequestContext()
defer cancel()
r, err := c.client.ListServices(
ctx,
&swarmapi.ListServicesRequest{Filters: filters})
if err != nil {
return nil, err
}
services := []types.Service{}
for _, service := range r.Services {
services = append(services, convert.ServiceFromGRPC(*service))
}
return services, nil
}
// imageWithDigestString takes an image such as name or name:tag
// and returns the image pinned to a digest, such as name@sha256:34234...
// Due to the difference between the docker/docker/reference, and the
// docker/distribution/reference packages, we're parsing the image twice.
// As the two packages converge, this function should be simplified.
// TODO(nishanttotla): After the packages converge, the function must
// convert distreference.Named -> distreference.Canonical, and the logic simplified.
func (c *Cluster) imageWithDigestString(ctx context.Context, image string, authConfig *apitypes.AuthConfig) (string, error) {
if _, err := digest.ParseDigest(image); err == nil {
return "", errors.New("image reference is an image ID")
}
ref, err := distreference.ParseNamed(image)
if err != nil {
return "", err
}
// only query registry if not a canonical reference (i.e. with digest)
if _, ok := ref.(distreference.Canonical); !ok {
// create a docker/docker/reference Named object because GetRepository needs it
dockerRef, err := reference.ParseNamed(image)
if err != nil {
return "", err
}
dockerRef = reference.WithDefaultTag(dockerRef)
namedTaggedRef, ok := dockerRef.(reference.NamedTagged)
if !ok {
return "", fmt.Errorf("unable to cast image to NamedTagged reference object")
}
repo, _, err := c.config.Backend.GetRepository(ctx, namedTaggedRef, authConfig)
if err != nil {
return "", err
}
dscrptr, err := repo.Tags(ctx).Get(ctx, namedTaggedRef.Tag())
if err != nil {
return "", err
}
namedDigestedRef, err := distreference.WithDigest(distreference.EnsureTagged(ref), dscrptr.Digest)
if err != nil {
return "", err
}
return namedDigestedRef.String(), nil
}
// reference already contains a digest, so just return it
return ref.String(), nil
}
// CreateService creates a new service in a managed swarm cluster.
func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string) (*apitypes.ServiceCreateResponse, error) {
c.RLock()
defer c.RUnlock()
if !c.isActiveManager() {
return nil, c.errNoManager()
}
ctx, cancel := c.getRequestContext()
defer cancel()
err := c.populateNetworkID(ctx, c.client, &s)
if err != nil {
return nil, err
}
serviceSpec, err := convert.ServiceSpecToGRPC(s)
if err != nil {
return nil, apierrors.NewBadRequestError(err)
}
ctnr := serviceSpec.Task.GetContainer()
if ctnr == nil {
return nil, fmt.Errorf("service does not use container tasks")
}
if encodedAuth != "" {
ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
}
// retrieve auth config from encoded auth
authConfig := &apitypes.AuthConfig{}
if encodedAuth != "" {
if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil {
logrus.Warnf("invalid authconfig: %v", err)
}
}
resp := &apitypes.ServiceCreateResponse{}
// pin image by digest
if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" {
digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig)
if err != nil {
logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error())
resp.Warnings = append(resp.Warnings, fmt.Sprintf("unable to pin image %s to digest: %s", ctnr.Image, err.Error()))
} else if ctnr.Image != digestImage {
logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage)
ctnr.Image = digestImage
} else {
logrus.Debugf("creating service using supplied digest reference %s", ctnr.Image)
}
// Replace the context with a fresh one.
// If we timed out while communicating with the
// registry, then "ctx" will already be expired, which
// would cause UpdateService below to fail. Reusing
// "ctx" could make it impossible to create a service
// if the registry is slow or unresponsive.
var newCancel func()
ctx, newCancel = c.getRequestContext()
defer newCancel()
}
r, err := c.client.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec})
if err != nil {
return nil, err
}
resp.ID = r.Service.ID
return resp, nil
}
// GetService returns a service based on an ID or name.
func (c *Cluster) GetService(input string) (types.Service, error) {
c.RLock()
defer c.RUnlock()
if !c.isActiveManager() {
return types.Service{}, c.errNoManager()
}
ctx, cancel := c.getRequestContext()
defer cancel()
service, err := getService(ctx, c.client, input)
if err != nil {
return types.Service{}, err
}
return convert.ServiceFromGRPC(*service), nil
}
// UpdateService updates existing service to match new properties.
func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec types.ServiceSpec, encodedAuth string, registryAuthFrom string) (*apitypes.ServiceUpdateResponse, error) {
c.RLock()
defer c.RUnlock()
if !c.isActiveManager() {
return nil, c.errNoManager()
}
ctx, cancel := c.getRequestContext()
defer cancel()
err := c.populateNetworkID(ctx, c.client, &spec)
if err != nil {
return nil, err
}
serviceSpec, err := convert.ServiceSpecToGRPC(spec)
if err != nil {
return nil, apierrors.NewBadRequestError(err)
}
currentService, err := getService(ctx, c.client, serviceIDOrName)
if err != nil {
return nil, err
}
newCtnr := serviceSpec.Task.GetContainer()
if newCtnr == nil {
return nil, fmt.Errorf("service does not use container tasks")
}
if encodedAuth != "" {
newCtnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
} else {
// this is needed because if the encodedAuth isn't being updated then we
// shouldn't lose it, and continue to use the one that was already present
var ctnr *swarmapi.ContainerSpec
switch registryAuthFrom {
case apitypes.RegistryAuthFromSpec, "":
ctnr = currentService.Spec.Task.GetContainer()
case apitypes.RegistryAuthFromPreviousSpec:
if currentService.PreviousSpec == nil {
return nil, fmt.Errorf("service does not have a previous spec")
}
ctnr = currentService.PreviousSpec.Task.GetContainer()
default:
return nil, fmt.Errorf("unsupported registryAuthFromValue")
}
if ctnr == nil {
return nil, fmt.Errorf("service does not use container tasks")
}
newCtnr.PullOptions = ctnr.PullOptions
// update encodedAuth so it can be used to pin image by digest
if ctnr.PullOptions != nil {
encodedAuth = ctnr.PullOptions.RegistryAuth
}
}
// retrieve auth config from encoded auth
authConfig := &apitypes.AuthConfig{}
if encodedAuth != "" {
if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil {
logrus.Warnf("invalid authconfig: %v", err)
}
}
resp := &apitypes.ServiceUpdateResponse{}
// pin image by digest
if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" {
digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig)
if err != nil {
logrus.Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error())
resp.Warnings = append(resp.Warnings, fmt.Sprintf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error()))
} else if newCtnr.Image != digestImage {
logrus.Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage)
newCtnr.Image = digestImage
} else {
logrus.Debugf("updating service using supplied digest reference %s", newCtnr.Image)
}
// Replace the context with a fresh one.
// If we timed out while communicating with the
// registry, then "ctx" will already be expired, which
// would cause UpdateService below to fail. Reusing
// "ctx" could make it impossible to create a service
// if the registry is slow or unresponsive.
var newCancel func()
ctx, newCancel = c.getRequestContext()
defer newCancel()
}
_, err = c.client.UpdateService(
ctx,
&swarmapi.UpdateServiceRequest{
ServiceID: currentService.ID,
Spec: &serviceSpec,
ServiceVersion: &swarmapi.Version{
Index: version,
},
},
)
return resp, err
}
// RemoveService removes a service from a managed swarm cluster.
func (c *Cluster) RemoveService(input string) error {
c.RLock()
defer c.RUnlock()
if !c.isActiveManager() {
return c.errNoManager()
}
ctx, cancel := c.getRequestContext()
defer cancel()
service, err := getService(ctx, c.client, input)
if err != nil {
return err
}
if _, err := c.client.RemoveService(ctx, &swarmapi.RemoveServiceRequest{ServiceID: service.ID}); err != nil {
return err
}
return nil
}
// ServiceLogs collects service logs and writes them back to `config.OutStream`
func (c *Cluster) ServiceLogs(ctx context.Context, input string, config *backend.ContainerLogsConfig, started chan struct{}) error {
c.RLock()
if !c.isActiveManager() {
c.RUnlock()
return c.errNoManager()
}
service, err := getService(ctx, c.client, input)
if err != nil {
c.RUnlock()
return err
}
stream, err := c.logs.SubscribeLogs(ctx, &swarmapi.SubscribeLogsRequest{
Selector: &swarmapi.LogSelector{
ServiceIDs: []string{service.ID},
},
Options: &swarmapi.LogSubscriptionOptions{
Follow: config.Follow,
},
})
if err != nil {
c.RUnlock()
return err
}
wf := ioutils.NewWriteFlusher(config.OutStream)
defer wf.Close()
close(started)
wf.Flush()
outStream := stdcopy.NewStdWriter(wf, stdcopy.Stdout)
errStream := stdcopy.NewStdWriter(wf, stdcopy.Stderr)
// Release the lock before starting the stream.
c.RUnlock()
for {
// Check the context before doing anything.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
subscribeMsg, err := stream.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
for _, msg := range subscribeMsg.Messages {
data := []byte{}
if config.Timestamps {
ts, err := ptypes.Timestamp(msg.Timestamp)
if err != nil {
return err
}
data = append(data, []byte(ts.Format(logger.TimeFormat)+" ")...)
}
data = append(data, []byte(fmt.Sprintf("%s.node.id=%s,%s.service.id=%s,%s.task.id=%s ",
contextPrefix, msg.Context.NodeID,
contextPrefix, msg.Context.ServiceID,
contextPrefix, msg.Context.TaskID,
))...)
data = append(data, msg.Data...)
switch msg.Stream {
case swarmapi.LogStreamStdout:
outStream.Write(data)
case swarmapi.LogStreamStderr:
errStream.Write(data)
}
}
}
}
// GetNodes returns a list of all nodes known to a cluster.
func (c *Cluster) GetNodes(options apitypes.NodeListOptions) ([]types.Node, error) {
c.RLock()
defer c.RUnlock()
if !c.isActiveManager() {
return nil, c.errNoManager()
}
filters, err := newListNodesFilters(options.Filters)
if err != nil {
return nil, err
}
ctx, cancel := c.getRequestContext()
defer cancel()
r, err := c.client.ListNodes(
ctx,
&swarmapi.ListNodesRequest{Filters: filters})
if err != nil {
return nil, err
}
nodes := []types.Node{}
for _, node := range r.Nodes {
nodes = append(nodes, convert.NodeFromGRPC(*node))
}
return nodes, nil
}
// GetNode returns a node based on an ID.
func (c *Cluster) GetNode(input string) (types.Node, error) {
c.RLock()
defer c.RUnlock()
if !c.isActiveManager() {
return types.Node{}, c.errNoManager()
}
ctx, cancel := c.getRequestContext()
defer cancel()
node, err := getNode(ctx, c.client, input)
if err != nil {
return types.Node{}, err
}
return convert.NodeFromGRPC(*node), nil
}
// UpdateNode updates existing nodes properties.
func (c *Cluster) UpdateNode(input string, version uint64, spec types.NodeSpec) error {
c.RLock()
defer c.RUnlock()
if !c.isActiveManager() {
return c.errNoManager()
}
nodeSpec, err := convert.NodeSpecToGRPC(spec)
if err != nil {
return apierrors.NewBadRequestError(err)
}
ctx, cancel := c.getRequestContext()
defer cancel()
currentNode, err := getNode(ctx, c.client, input)
if err != nil {
return err
}
_, err = c.client.UpdateNode(
ctx,
&swarmapi.UpdateNodeRequest{
NodeID: currentNode.ID,
Spec: &nodeSpec,
NodeVersion: &swarmapi.Version{
Index: version,
},
},
)
return err
}
// RemoveNode removes a node from a cluster
func (c *Cluster) RemoveNode(input string, force bool) error {
c.RLock()
defer c.RUnlock()
if !c.isActiveManager() {
return c.errNoManager()
}
ctx, cancel := c.getRequestContext()
defer cancel()
node, err := getNode(ctx, c.client, input)
if err != nil {
return err
}
if _, err := c.client.RemoveNode(ctx, &swarmapi.RemoveNodeRequest{NodeID: node.ID, Force: force}); err != nil {
return err
}
return nil
}
// GetTasks returns a list of tasks matching the filter options.
func (c *Cluster) GetTasks(options apitypes.TaskListOptions) ([]types.Task, error) {
c.RLock()
defer c.RUnlock()
if !c.isActiveManager() {
return nil, c.errNoManager()
}
byName := func(filter filters.Args) error {
if filter.Include("service") {
serviceFilters := filter.Get("service")
for _, serviceFilter := range serviceFilters {
service, err := c.GetService(serviceFilter)
if err != nil {
return err
}
filter.Del("service", serviceFilter)
filter.Add("service", service.ID)
}
}
if filter.Include("node") {
nodeFilters := filter.Get("node")
for _, nodeFilter := range nodeFilters {
node, err := c.GetNode(nodeFilter)
if err != nil {
return err
}
filter.Del("node", nodeFilter)
filter.Add("node", node.ID)
}
}
return nil
}
filters, err := newListTasksFilters(options.Filters, byName)
if err != nil {
return nil, err
}
ctx, cancel := c.getRequestContext()
defer cancel()
r, err := c.client.ListTasks(
ctx,
&swarmapi.ListTasksRequest{Filters: filters})
if err != nil {
return nil, err
}
tasks := []types.Task{}
for _, task := range r.Tasks {
if task.Spec.GetContainer() != nil {
tasks = append(tasks, convert.TaskFromGRPC(*task))
}
}
return tasks, nil
}
// GetTask returns a task by an ID.
func (c *Cluster) GetTask(input string) (types.Task, error) {
c.RLock()
defer c.RUnlock()
if !c.isActiveManager() {
return types.Task{}, c.errNoManager()
}
ctx, cancel := c.getRequestContext()
defer cancel()
task, err := getTask(ctx, c.client, input)
if err != nil {
return types.Task{}, err
}
return convert.TaskFromGRPC(*task), nil
}
// GetNetwork returns a cluster network by an ID.
func (c *Cluster) GetNetwork(input string) (apitypes.NetworkResource, error) {
c.RLock()
defer c.RUnlock()
if !c.isActiveManager() {
return apitypes.NetworkResource{}, c.errNoManager()
}
ctx, cancel := c.getRequestContext()
defer cancel()
network, err := getNetwork(ctx, c.client, input)
if err != nil {
return apitypes.NetworkResource{}, err
}
return convert.BasicNetworkFromGRPC(*network), nil
}
func (c *Cluster) getNetworks(filters *swarmapi.ListNetworksRequest_Filters) ([]apitypes.NetworkResource, error) {
c.RLock()
defer c.RUnlock()
if !c.isActiveManager() {
return nil, c.errNoManager()
}
ctx, cancel := c.getRequestContext()
defer cancel()
r, err := c.client.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: filters})
if err != nil {
return nil, err
}
var networks []apitypes.NetworkResource
for _, network := range r.Networks {
networks = append(networks, convert.BasicNetworkFromGRPC(*network))
}
return networks, nil
}
// GetNetworks returns all current cluster managed networks.
func (c *Cluster) GetNetworks() ([]apitypes.NetworkResource, error) {
return c.getNetworks(nil)
}
// GetNetworksByName returns cluster managed networks by name.
// It is ok to have multiple networks here. #18864
func (c *Cluster) GetNetworksByName(name string) ([]apitypes.NetworkResource, error) {
// Note that swarmapi.GetNetworkRequest.Name is not functional.
// So we cannot just use that with c.GetNetwork.
return c.getNetworks(&swarmapi.ListNetworksRequest_Filters{
Names: []string{name},
})
}
func attacherKey(target, containerID string) string {
return containerID + ":" + target
}
// UpdateAttachment signals the attachment config to the attachment
// waiter who is trying to start or attach the container to the
// network.
func (c *Cluster) UpdateAttachment(target, containerID string, config *network.NetworkingConfig) error {
c.RLock()
attacher, ok := c.attachers[attacherKey(target, containerID)]
c.RUnlock()
if !ok || attacher == nil {
return fmt.Errorf("could not find attacher for container %s to network %s", containerID, target)
}
attacher.attachWaitCh <- config
close(attacher.attachWaitCh)
return nil
}
// WaitForDetachment waits for the container to stop or detach from
// the network.
func (c *Cluster) WaitForDetachment(ctx context.Context, networkName, networkID, taskID, containerID string) error {
c.RLock()
attacher, ok := c.attachers[attacherKey(networkName, containerID)]
if !ok {
attacher, ok = c.attachers[attacherKey(networkID, containerID)]
}
if c.node == nil || c.node.Agent() == nil {
c.RUnlock()
return fmt.Errorf("invalid cluster node while waiting for detachment")
}
agent := c.node.Agent()
c.RUnlock()
if ok && attacher != nil &&
attacher.detachWaitCh != nil &&
attacher.attachCompleteCh != nil {
// Attachment may be in progress still so wait for
// attachment to complete.
select {
case <-attacher.attachCompleteCh:
case <-ctx.Done():
return ctx.Err()
}
if attacher.taskID == taskID {
select {
case <-attacher.detachWaitCh:
case <-ctx.Done():
return ctx.Err()
}
}
}
return agent.ResourceAllocator().DetachNetwork(ctx, taskID)
}
// AttachNetwork generates an attachment request towards the manager.
func (c *Cluster) AttachNetwork(target string, containerID string, addresses []string) (*network.NetworkingConfig, error) {
aKey := attacherKey(target, containerID)
c.Lock()
if c.node == nil || c.node.Agent() == nil {
c.Unlock()
return nil, fmt.Errorf("invalid cluster node while attaching to network")
}
if attacher, ok := c.attachers[aKey]; ok {
c.Unlock()
return attacher.config, nil
}
agent := c.node.Agent()
attachWaitCh := make(chan *network.NetworkingConfig)
detachWaitCh := make(chan struct{})
attachCompleteCh := make(chan struct{})
c.attachers[aKey] = &attacher{
attachWaitCh: attachWaitCh,
attachCompleteCh: attachCompleteCh,
detachWaitCh: detachWaitCh,
}
c.Unlock()
ctx, cancel := c.getRequestContext()
defer cancel()
taskID, err := agent.ResourceAllocator().AttachNetwork(ctx, containerID, target, addresses)
if err != nil {
c.Lock()
delete(c.attachers, aKey)
c.Unlock()
return nil, fmt.Errorf("Could not attach to network %s: %v", target, err)
}
c.Lock()
c.attachers[aKey].taskID = taskID
close(attachCompleteCh)
c.Unlock()
logrus.Debugf("Successfully attached to network %s with task id %s", target, taskID)
release := func() {
ctx, cancel := c.getRequestContext()
defer cancel()
if err := agent.ResourceAllocator().DetachNetwork(ctx, taskID); err != nil {
logrus.Errorf("Failed remove network attachment %s to network %s on allocation failure: %v",
taskID, target, err)
}
}
var config *network.NetworkingConfig
select {
case config = <-attachWaitCh:
case <-ctx.Done():
release()
return nil, fmt.Errorf("attaching to network failed, make sure your network options are correct and check manager logs: %v", ctx.Err())
}
c.Lock()
c.attachers[aKey].config = config
c.Unlock()
logrus.Debugf("Successfully allocated resources on network %s for task id %s", target, taskID)
return config, nil
}
// DetachNetwork unblocks the waiters waiting on WaitForDetachment so
// that a request to detach can be generated towards the manager.
func (c *Cluster) DetachNetwork(target string, containerID string) error {
aKey := attacherKey(target, containerID)
c.Lock()
attacher, ok := c.attachers[aKey]
delete(c.attachers, aKey)
c.Unlock()
if !ok {
return fmt.Errorf("could not find network attachment for container %s to network %s", containerID, target)
}
close(attacher.detachWaitCh)
return nil
}
// CreateNetwork creates a new cluster managed network.
func (c *Cluster) CreateNetwork(s apitypes.NetworkCreateRequest) (string, error) {
c.RLock()
defer c.RUnlock()
if !c.isActiveManager() {
return "", c.errNoManager()
}
if runconfig.IsPreDefinedNetwork(s.Name) {
err := fmt.Errorf("%s is a pre-defined network and cannot be created", s.Name)
return "", apierrors.NewRequestForbiddenError(err)
}
ctx, cancel := c.getRequestContext()
defer cancel()
networkSpec := convert.BasicNetworkCreateToGRPC(s)
r, err := c.client.CreateNetwork(ctx, &swarmapi.CreateNetworkRequest{Spec: &networkSpec})
if err != nil {
return "", err
}
return r.Network.ID, nil
}
// RemoveNetwork removes a cluster network.
func (c *Cluster) RemoveNetwork(input string) error {
c.RLock()
defer c.RUnlock()
if !c.isActiveManager() {
return c.errNoManager()
}
ctx, cancel := c.getRequestContext()
defer cancel()
network, err := getNetwork(ctx, c.client, input)
if err != nil {
return err
}
if _, err := c.client.RemoveNetwork(ctx, &swarmapi.RemoveNetworkRequest{NetworkID: network.ID}); err != nil {
return err
}
return nil
}
func (c *Cluster) populateNetworkID(ctx context.Context, client swarmapi.ControlClient, s *types.ServiceSpec) error {
// Always prefer NetworkAttachmentConfigs from TaskTemplate
// but fallback to service spec for backward compatibility
networks := s.TaskTemplate.Networks
if len(networks) == 0 {
networks = s.Networks
}
for i, n := range networks {
apiNetwork, err := getNetwork(ctx, client, n.Target)
if err != nil {
if ln, _ := c.config.Backend.FindNetwork(n.Target); ln != nil && !ln.Info().Dynamic() {
err = fmt.Errorf("The network %s cannot be used with services. Only networks scoped to the swarm can be used, such as those created with the overlay driver.", ln.Name())
return apierrors.NewRequestForbiddenError(err)
}
return err
}
networks[i].Target = apiNetwork.ID
}
return nil
}
func getNetwork(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Network, error) {
// GetNetwork to match via full ID.
rg, err := c.GetNetwork(ctx, &swarmapi.GetNetworkRequest{NetworkID: input})
if err != nil {
// If any error (including NotFound), ListNetworks to match via ID prefix and full name.
rl, err := c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: &swarmapi.ListNetworksRequest_Filters{Names: []string{input}}})
if err != nil || len(rl.Networks) == 0 {
rl, err = c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: &swarmapi.ListNetworksRequest_Filters{IDPrefixes: []string{input}}})
}
if err != nil {
return nil, err
}
if len(rl.Networks) == 0 {
return nil, fmt.Errorf("network %s not found", input)
}
if l := len(rl.Networks); l > 1 {
return nil, fmt.Errorf("network %s is ambiguous (%d matches found)", input, l)
}
return rl.Networks[0], nil
}
return rg.Network, nil
}
// Cleanup stops active swarm node. This is run before daemon shutdown.
func (c *Cluster) Cleanup() {
c.Lock()
node := c.node
if node == nil {
c.Unlock()
return
}
defer c.Unlock()
if c.isActiveManager() {
active, reachable, unreachable, err := c.managerStats()
if err == nil {
singlenode := active && isLastManager(reachable, unreachable)
if active && !singlenode && removingManagerCausesLossOfQuorum(reachable, unreachable) {
logrus.Errorf("Leaving cluster with %v managers left out of %v. Raft quorum will be lost.", reachable-1, reachable+unreachable)
}
}
}
c.stopNode()
}
func (c *Cluster) managerStats() (current bool, reachable int, unreachable int, err error) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
nodes, err := c.client.ListNodes(ctx, &swarmapi.ListNodesRequest{})
if err != nil {
return false, 0, 0, err
}
for _, n := range nodes.Nodes {
if n.ManagerStatus != nil {
if n.ManagerStatus.Reachability == swarmapi.RaftMemberStatus_REACHABLE {
reachable++
if n.ID == c.node.NodeID() {
current = true
}
}
if n.ManagerStatus.Reachability == swarmapi.RaftMemberStatus_UNREACHABLE {
unreachable++
}
}
}
return
}
func validateAndSanitizeInitRequest(req *types.InitRequest) error {
var err error
req.ListenAddr, err = validateAddr(req.ListenAddr)
if err != nil {
return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err)
}
if req.Spec.Annotations.Name == "" {
req.Spec.Annotations.Name = "default"
} else if req.Spec.Annotations.Name != "default" {
return errors.New(`swarm spec must be named "default"`)
}
return nil
}
func validateAndSanitizeJoinRequest(req *types.JoinRequest) error {
var err error
req.ListenAddr, err = validateAddr(req.ListenAddr)
if err != nil {
return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err)
}
if len(req.RemoteAddrs) == 0 {
return fmt.Errorf("at least 1 RemoteAddr is required to join")
}
for i := range req.RemoteAddrs {
req.RemoteAddrs[i], err = validateAddr(req.RemoteAddrs[i])
if err != nil {
return fmt.Errorf("invalid remoteAddr %q: %v", req.RemoteAddrs[i], err)
}
}
return nil
}
func validateAddr(addr string) (string, error) {
if addr == "" {
return addr, fmt.Errorf("invalid empty address")
}
newaddr, err := opts.ParseTCPAddr(addr, defaultAddr)
if err != nil {
return addr, nil
}
return strings.TrimPrefix(newaddr, "tcp://"), nil
}
func initClusterSpec(node *node, spec types.Spec) error {
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
for conn := range node.ListenControlSocket(ctx) {
if ctx.Err() != nil {
return ctx.Err()
}
if conn != nil {
client := swarmapi.NewControlClient(conn)
var cluster *swarmapi.Cluster
for i := 0; ; i++ {
lcr, err := client.ListClusters(ctx, &swarmapi.ListClustersRequest{})
if err != nil {
return fmt.Errorf("error on listing clusters: %v", err)
}
if len(lcr.Clusters) == 0 {
if i < 10 {
time.Sleep(200 * time.Millisecond)
continue
}
return fmt.Errorf("empty list of clusters was returned")
}
cluster = lcr.Clusters[0]
break
}
// In init, we take the initial default values from swarmkit, and merge
// any non nil or 0 value from spec to GRPC spec. This will leave the
// default value alone.
// Note that this is different from Update(), as in Update() we expect
// user to specify the complete spec of the cluster (as they already know
// the existing one and knows which field to update)
clusterSpec, err := convert.MergeSwarmSpecToGRPC(spec, cluster.Spec)
if err != nil {
return fmt.Errorf("error updating cluster settings: %v", err)
}
_, err = client.UpdateCluster(ctx, &swarmapi.UpdateClusterRequest{
ClusterID: cluster.ID,
ClusterVersion: &cluster.Meta.Version,
Spec: &clusterSpec,
})
if err != nil {
return fmt.Errorf("error updating cluster settings: %v", err)
}
return nil
}
}
return ctx.Err()
}
func detectLockedError(err error) error {
if err == swarmnode.ErrInvalidUnlockKey {
return errors.WithStack(ErrSwarmLocked)
}
return err
}
|
[
"\"DOCKER_SERVICE_PREFER_OFFLINE_IMAGE\"",
"\"DOCKER_SERVICE_PREFER_OFFLINE_IMAGE\""
] |
[] |
[
"DOCKER_SERVICE_PREFER_OFFLINE_IMAGE"
] |
[]
|
["DOCKER_SERVICE_PREFER_OFFLINE_IMAGE"]
|
go
| 1 | 0 | |
preload_test.go
|
package gorm_test
import (
"database/sql"
"encoding/json"
"os"
"reflect"
"testing"
"github.com/bookreport/gorm"
)
func getPreloadUser(name string) *User {
return getPreparedUser(name, "Preload")
}
func checkUserHasPreloadData(user User, t *testing.T) {
u := getPreloadUser(user.Name)
if user.BillingAddress.Address1 != u.BillingAddress.Address1 {
t.Error("Failed to preload user's BillingAddress")
}
if user.ShippingAddress.Address1 != u.ShippingAddress.Address1 {
t.Error("Failed to preload user's ShippingAddress")
}
if user.CreditCard.Number != u.CreditCard.Number {
t.Error("Failed to preload user's CreditCard")
}
if user.Company.Name != u.Company.Name {
t.Error("Failed to preload user's Company")
}
if len(user.Emails) != len(u.Emails) {
t.Error("Failed to preload user's Emails")
} else {
var found int
for _, e1 := range u.Emails {
for _, e2 := range user.Emails {
if e1.Email == e2.Email {
found++
break
}
}
}
if found != len(u.Emails) {
t.Error("Failed to preload user's email details")
}
}
}
func TestPreload(t *testing.T) {
user1 := getPreloadUser("user1")
DB.Save(user1)
preloadDB := DB.Where("role = ?", "Preload").Preload("BillingAddress").Preload("ShippingAddress").
Preload("CreditCard").Preload("Emails").Preload("Company")
var user User
preloadDB.Find(&user)
checkUserHasPreloadData(user, t)
user2 := getPreloadUser("user2")
DB.Save(user2)
user3 := getPreloadUser("user3")
DB.Save(user3)
var users []User
preloadDB.Find(&users)
for _, user := range users {
checkUserHasPreloadData(user, t)
}
var users2 []*User
preloadDB.Find(&users2)
for _, user := range users2 {
checkUserHasPreloadData(*user, t)
}
var users3 []*User
preloadDB.Preload("Emails", "email = ?", user3.Emails[0].Email).Find(&users3)
for _, user := range users3 {
if user.Name == user3.Name {
if len(user.Emails) != 1 {
t.Errorf("should only preload one emails for user3 when with condition")
}
} else if len(user.Emails) != 0 {
t.Errorf("should not preload any emails for other users when with condition")
} else if user.Emails == nil {
t.Errorf("should return an empty slice to indicate zero results")
}
}
}
func TestAutoPreload(t *testing.T) {
user1 := getPreloadUser("auto_user1")
DB.Save(user1)
preloadDB := DB.Set("gorm:auto_preload", true).Where("role = ?", "Preload")
var user User
preloadDB.Find(&user)
checkUserHasPreloadData(user, t)
user2 := getPreloadUser("auto_user2")
DB.Save(user2)
var users []User
preloadDB.Find(&users)
for _, user := range users {
checkUserHasPreloadData(user, t)
}
var users2 []*User
preloadDB.Find(&users2)
for _, user := range users2 {
checkUserHasPreloadData(*user, t)
}
}
func TestAutoPreloadFalseDoesntPreload(t *testing.T) {
user1 := getPreloadUser("auto_user1")
DB.Save(user1)
preloadDB := DB.Set("gorm:auto_preload", false).Where("role = ?", "Preload")
var user User
preloadDB.Find(&user)
if user.BillingAddress.Address1 != "" {
t.Error("AutoPreload was set to fasle, but still fetched data")
}
user2 := getPreloadUser("auto_user2")
DB.Save(user2)
var users []User
preloadDB.Find(&users)
for _, user := range users {
if user.BillingAddress.Address1 != "" {
t.Error("AutoPreload was set to fasle, but still fetched data")
}
}
}
func TestNestedPreload1(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
Level2ID uint
}
Level2 struct {
ID uint
Level1 Level1
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level2 Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level3{Level2: Level2{Level1: Level1{Value: "value"}}}
if err := DB.Create(&want).Error; err != nil {
t.Error(err)
}
var got Level3
if err := DB.Preload("Level2").Preload("Level2.Level1").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
if err := DB.Preload("Level2").Preload("Level2.Level1").Find(&got, "name = ?", "not_found").Error; err != gorm.ErrRecordNotFound {
t.Error(err)
}
}
func TestNestedPreload2(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
Level2ID uint
}
Level2 struct {
ID uint
Level1s []*Level1
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level2s []Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level3{
Level2s: []Level2{
{
Level1s: []*Level1{
{Value: "value1"},
{Value: "value2"},
},
},
{
Level1s: []*Level1{
{Value: "value3"},
},
},
},
}
if err := DB.Create(&want).Error; err != nil {
t.Error(err)
}
var got Level3
if err := DB.Preload("Level2s.Level1s").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestNestedPreload3(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
Level2ID uint
}
Level2 struct {
ID uint
Level1 Level1
Level3ID uint
}
Level3 struct {
Name string
ID uint
Level2s []Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level3{
Level2s: []Level2{
{Level1: Level1{Value: "value1"}},
{Level1: Level1{Value: "value2"}},
},
}
if err := DB.Create(&want).Error; err != nil {
t.Error(err)
}
var got Level3
if err := DB.Preload("Level2s.Level1").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestNestedPreload4(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
Level2ID uint
}
Level2 struct {
ID uint
Level1s []Level1
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level2 Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level3{
Level2: Level2{
Level1s: []Level1{
{Value: "value1"},
{Value: "value2"},
},
},
}
if err := DB.Create(&want).Error; err != nil {
t.Error(err)
}
var got Level3
if err := DB.Preload("Level2.Level1s").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
// Slice: []Level3
func TestNestedPreload5(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
Level2ID uint
}
Level2 struct {
ID uint
Level1 Level1
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level2 Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := make([]Level3, 2)
want[0] = Level3{Level2: Level2{Level1: Level1{Value: "value"}}}
if err := DB.Create(&want[0]).Error; err != nil {
t.Error(err)
}
want[1] = Level3{Level2: Level2{Level1: Level1{Value: "value2"}}}
if err := DB.Create(&want[1]).Error; err != nil {
t.Error(err)
}
var got []Level3
if err := DB.Preload("Level2").Preload("Level2.Level1").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestNestedPreload6(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
Level2ID uint
}
Level2 struct {
ID uint
Level1s []Level1
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level2s []Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := make([]Level3, 2)
want[0] = Level3{
Level2s: []Level2{
{
Level1s: []Level1{
{Value: "value1"},
{Value: "value2"},
},
},
{
Level1s: []Level1{
{Value: "value3"},
},
},
},
}
if err := DB.Create(&want[0]).Error; err != nil {
t.Error(err)
}
want[1] = Level3{
Level2s: []Level2{
{
Level1s: []Level1{
{Value: "value3"},
{Value: "value4"},
},
},
{
Level1s: []Level1{
{Value: "value5"},
},
},
},
}
if err := DB.Create(&want[1]).Error; err != nil {
t.Error(err)
}
var got []Level3
if err := DB.Preload("Level2s.Level1s").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestNestedPreload7(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
Level2ID uint
}
Level2 struct {
ID uint
Level1 Level1
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level2s []Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := make([]Level3, 2)
want[0] = Level3{
Level2s: []Level2{
{Level1: Level1{Value: "value1"}},
{Level1: Level1{Value: "value2"}},
},
}
if err := DB.Create(&want[0]).Error; err != nil {
t.Error(err)
}
want[1] = Level3{
Level2s: []Level2{
{Level1: Level1{Value: "value3"}},
{Level1: Level1{Value: "value4"}},
},
}
if err := DB.Create(&want[1]).Error; err != nil {
t.Error(err)
}
var got []Level3
if err := DB.Preload("Level2s.Level1").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestNestedPreload8(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
Level2ID uint
}
Level2 struct {
ID uint
Level1s []Level1
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level2 Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := make([]Level3, 2)
want[0] = Level3{
Level2: Level2{
Level1s: []Level1{
{Value: "value1"},
{Value: "value2"},
},
},
}
if err := DB.Create(&want[0]).Error; err != nil {
t.Error(err)
}
want[1] = Level3{
Level2: Level2{
Level1s: []Level1{
{Value: "value3"},
{Value: "value4"},
},
},
}
if err := DB.Create(&want[1]).Error; err != nil {
t.Error(err)
}
var got []Level3
if err := DB.Preload("Level2.Level1s").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestNestedPreload9(t *testing.T) {
type (
Level0 struct {
ID uint
Value string
Level1ID uint
}
Level1 struct {
ID uint
Value string
Level2ID uint
Level2_1ID uint
Level0s []Level0
}
Level2 struct {
ID uint
Level1s []Level1
Level3ID uint
}
Level2_1 struct {
ID uint
Level1s []Level1
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level2 Level2
Level2_1 Level2_1
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level2_1{})
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists(&Level0{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}, &Level2_1{}, &Level0{}).Error; err != nil {
t.Error(err)
}
want := make([]Level3, 2)
want[0] = Level3{
Level2: Level2{
Level1s: []Level1{
{Value: "value1"},
{Value: "value2"},
},
},
Level2_1: Level2_1{
Level1s: []Level1{
{
Value: "value1-1",
Level0s: []Level0{{Value: "Level0-1"}},
},
{
Value: "value2-2",
Level0s: []Level0{{Value: "Level0-2"}},
},
},
},
}
if err := DB.Create(&want[0]).Error; err != nil {
t.Error(err)
}
want[1] = Level3{
Level2: Level2{
Level1s: []Level1{
{Value: "value3"},
{Value: "value4"},
},
},
Level2_1: Level2_1{
Level1s: []Level1{
{
Value: "value3-3",
Level0s: []Level0{},
},
{
Value: "value4-4",
Level0s: []Level0{},
},
},
},
}
if err := DB.Create(&want[1]).Error; err != nil {
t.Error(err)
}
var got []Level3
if err := DB.Preload("Level2").Preload("Level2.Level1s").Preload("Level2_1").Preload("Level2_1.Level1s").Preload("Level2_1.Level1s.Level0s").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
type LevelA1 struct {
ID uint
Value string
}
type LevelA2 struct {
ID uint
Value string
LevelA3s []*LevelA3
}
type LevelA3 struct {
ID uint
Value string
LevelA1ID sql.NullInt64
LevelA1 *LevelA1
LevelA2ID sql.NullInt64
LevelA2 *LevelA2
}
func TestNestedPreload10(t *testing.T) {
DB.DropTableIfExists(&LevelA3{})
DB.DropTableIfExists(&LevelA2{})
DB.DropTableIfExists(&LevelA1{})
if err := DB.AutoMigrate(&LevelA1{}, &LevelA2{}, &LevelA3{}).Error; err != nil {
t.Error(err)
}
levelA1 := &LevelA1{Value: "foo"}
if err := DB.Save(levelA1).Error; err != nil {
t.Error(err)
}
want := []*LevelA2{
{
Value: "bar",
LevelA3s: []*LevelA3{
{
Value: "qux",
LevelA1: levelA1,
},
},
},
{
Value: "bar 2",
LevelA3s: []*LevelA3{},
},
}
for _, levelA2 := range want {
if err := DB.Save(levelA2).Error; err != nil {
t.Error(err)
}
}
var got []*LevelA2
if err := DB.Preload("LevelA3s.LevelA1").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
type LevelB1 struct {
ID uint
Value string
LevelB3s []*LevelB3
}
type LevelB2 struct {
ID uint
Value string
}
type LevelB3 struct {
ID uint
Value string
LevelB1ID sql.NullInt64
LevelB1 *LevelB1
LevelB2s []*LevelB2 `gorm:"many2many:levelb1_levelb3_levelb2s"`
}
func TestNestedPreload11(t *testing.T) {
DB.DropTableIfExists(&LevelB2{})
DB.DropTableIfExists(&LevelB3{})
DB.DropTableIfExists(&LevelB1{})
if err := DB.AutoMigrate(&LevelB1{}, &LevelB2{}, &LevelB3{}).Error; err != nil {
t.Error(err)
}
levelB1 := &LevelB1{Value: "foo"}
if err := DB.Create(levelB1).Error; err != nil {
t.Error(err)
}
levelB3 := &LevelB3{
Value: "bar",
LevelB1ID: sql.NullInt64{Valid: true, Int64: int64(levelB1.ID)},
LevelB2s: []*LevelB2{},
}
if err := DB.Create(levelB3).Error; err != nil {
t.Error(err)
}
levelB1.LevelB3s = []*LevelB3{levelB3}
want := []*LevelB1{levelB1}
var got []*LevelB1
if err := DB.Preload("LevelB3s.LevelB2s").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
type LevelC1 struct {
ID uint
Value string
LevelC2ID uint
}
type LevelC2 struct {
ID uint
Value string
LevelC1 LevelC1
}
type LevelC3 struct {
ID uint
Value string
LevelC2ID uint
LevelC2 LevelC2
}
func TestNestedPreload12(t *testing.T) {
DB.DropTableIfExists(&LevelC2{})
DB.DropTableIfExists(&LevelC3{})
DB.DropTableIfExists(&LevelC1{})
if err := DB.AutoMigrate(&LevelC1{}, &LevelC2{}, &LevelC3{}).Error; err != nil {
t.Error(err)
}
level2 := LevelC2{
Value: "c2",
LevelC1: LevelC1{
Value: "c1",
},
}
DB.Create(&level2)
want := []LevelC3{
{
Value: "c3-1",
LevelC2: level2,
}, {
Value: "c3-2",
LevelC2: level2,
},
}
for i := range want {
if err := DB.Create(&want[i]).Error; err != nil {
t.Error(err)
}
}
var got []LevelC3
if err := DB.Preload("LevelC2").Preload("LevelC2.LevelC1").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestManyToManyPreloadWithMultiPrimaryKeys(t *testing.T) {
if dialect := os.Getenv("GORM_DIALECT"); dialect == "" || dialect == "sqlite" || dialect == "mssql" {
return
}
type (
Level1 struct {
ID uint `gorm:"primary_key;"`
LanguageCode string `gorm:"primary_key"`
Value string
}
Level2 struct {
ID uint `gorm:"primary_key;"`
LanguageCode string `gorm:"primary_key"`
Value string
Level1s []Level1 `gorm:"many2many:levels;"`
}
)
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists("levels")
if err := DB.AutoMigrate(&Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level2{Value: "Bob", LanguageCode: "ru", Level1s: []Level1{
{Value: "ru", LanguageCode: "ru"},
{Value: "en", LanguageCode: "en"},
}}
if err := DB.Save(&want).Error; err != nil {
t.Error(err)
}
want2 := Level2{Value: "Tom", LanguageCode: "zh", Level1s: []Level1{
{Value: "zh", LanguageCode: "zh"},
{Value: "de", LanguageCode: "de"},
}}
if err := DB.Save(&want2).Error; err != nil {
t.Error(err)
}
var got Level2
if err := DB.Preload("Level1s").Find(&got, "value = ?", "Bob").Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
var got2 Level2
if err := DB.Preload("Level1s").Find(&got2, "value = ?", "Tom").Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got2, want2) {
t.Errorf("got %s; want %s", toJSONString(got2), toJSONString(want2))
}
var got3 []Level2
if err := DB.Preload("Level1s").Find(&got3, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got3, []Level2{got, got2}) {
t.Errorf("got %s; want %s", toJSONString(got3), toJSONString([]Level2{got, got2}))
}
var got4 []Level2
if err := DB.Preload("Level1s", "value IN (?)", []string{"zh", "ru"}).Find(&got4, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil {
t.Error(err)
}
var ruLevel1 Level1
var zhLevel1 Level1
DB.First(&ruLevel1, "value = ?", "ru")
DB.First(&zhLevel1, "value = ?", "zh")
got.Level1s = []Level1{ruLevel1}
got2.Level1s = []Level1{zhLevel1}
if !reflect.DeepEqual(got4, []Level2{got, got2}) {
t.Errorf("got %s; want %s", toJSONString(got4), toJSONString([]Level2{got, got2}))
}
if err := DB.Preload("Level1s").Find(&got4, "value IN (?)", []string{"non-existing"}).Error; err != nil {
t.Error(err)
}
}
func TestManyToManyPreloadForNestedPointer(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
}
Level2 struct {
ID uint
Value string
Level1s []*Level1 `gorm:"many2many:levels;"`
}
Level3 struct {
ID uint
Value string
Level2ID sql.NullInt64
Level2 *Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists("levels")
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level3{
Value: "Bob",
Level2: &Level2{
Value: "Foo",
Level1s: []*Level1{
{Value: "ru"},
{Value: "en"},
},
},
}
if err := DB.Save(&want).Error; err != nil {
t.Error(err)
}
want2 := Level3{
Value: "Tom",
Level2: &Level2{
Value: "Bar",
Level1s: []*Level1{
{Value: "zh"},
{Value: "de"},
},
},
}
if err := DB.Save(&want2).Error; err != nil {
t.Error(err)
}
var got Level3
if err := DB.Preload("Level2.Level1s").Find(&got, "value = ?", "Bob").Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
var got2 Level3
if err := DB.Preload("Level2.Level1s").Find(&got2, "value = ?", "Tom").Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got2, want2) {
t.Errorf("got %s; want %s", toJSONString(got2), toJSONString(want2))
}
var got3 []Level3
if err := DB.Preload("Level2.Level1s").Find(&got3, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got3, []Level3{got, got2}) {
t.Errorf("got %s; want %s", toJSONString(got3), toJSONString([]Level3{got, got2}))
}
var got4 []Level3
if err := DB.Preload("Level2.Level1s", "value IN (?)", []string{"zh", "ru"}).Find(&got4, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil {
t.Error(err)
}
var got5 Level3
DB.Preload("Level2.Level1s").Find(&got5, "value = ?", "bogus")
var ruLevel1 Level1
var zhLevel1 Level1
DB.First(&ruLevel1, "value = ?", "ru")
DB.First(&zhLevel1, "value = ?", "zh")
got.Level2.Level1s = []*Level1{&ruLevel1}
got2.Level2.Level1s = []*Level1{&zhLevel1}
if !reflect.DeepEqual(got4, []Level3{got, got2}) {
t.Errorf("got %s; want %s", toJSONString(got4), toJSONString([]Level3{got, got2}))
}
}
func TestNestedManyToManyPreload(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
}
Level2 struct {
ID uint
Value string
Level1s []*Level1 `gorm:"many2many:level1_level2;"`
}
Level3 struct {
ID uint
Value string
Level2s []Level2 `gorm:"many2many:level2_level3;"`
}
)
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists("level1_level2")
DB.DropTableIfExists("level2_level3")
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level3{
Value: "Level3",
Level2s: []Level2{
{
Value: "Bob",
Level1s: []*Level1{
{Value: "ru"},
{Value: "en"},
},
}, {
Value: "Tom",
Level1s: []*Level1{
{Value: "zh"},
{Value: "de"},
},
},
},
}
if err := DB.Save(&want).Error; err != nil {
t.Error(err)
}
var got Level3
if err := DB.Preload("Level2s").Preload("Level2s.Level1s").Find(&got, "value = ?", "Level3").Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
if err := DB.Preload("Level2s.Level1s").Find(&got, "value = ?", "not_found").Error; err != gorm.ErrRecordNotFound {
t.Error(err)
}
}
func TestNestedManyToManyPreload2(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
}
Level2 struct {
ID uint
Value string
Level1s []*Level1 `gorm:"many2many:level1_level2;"`
}
Level3 struct {
ID uint
Value string
Level2ID sql.NullInt64
Level2 *Level2
}
)
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists("level1_level2")
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level3{
Value: "Level3",
Level2: &Level2{
Value: "Bob",
Level1s: []*Level1{
{Value: "ru"},
{Value: "en"},
},
},
}
if err := DB.Save(&want).Error; err != nil {
t.Error(err)
}
var got Level3
if err := DB.Preload("Level2.Level1s").Find(&got, "value = ?", "Level3").Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
if err := DB.Preload("Level2.Level1s").Find(&got, "value = ?", "not_found").Error; err != gorm.ErrRecordNotFound {
t.Error(err)
}
}
func TestNestedManyToManyPreload3(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
}
Level2 struct {
ID uint
Value string
Level1s []*Level1 `gorm:"many2many:level1_level2;"`
}
Level3 struct {
ID uint
Value string
Level2ID sql.NullInt64
Level2 *Level2
}
)
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists("level1_level2")
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
level1Zh := &Level1{Value: "zh"}
level1Ru := &Level1{Value: "ru"}
level1En := &Level1{Value: "en"}
level21 := &Level2{
Value: "Level2-1",
Level1s: []*Level1{level1Zh, level1Ru},
}
level22 := &Level2{
Value: "Level2-2",
Level1s: []*Level1{level1Zh, level1En},
}
wants := []*Level3{
{
Value: "Level3-1",
Level2: level21,
},
{
Value: "Level3-2",
Level2: level22,
},
{
Value: "Level3-3",
Level2: level21,
},
}
for _, want := range wants {
if err := DB.Save(&want).Error; err != nil {
t.Error(err)
}
}
var gots []*Level3
if err := DB.Preload("Level2.Level1s", func(db *gorm.DB) *gorm.DB {
return db.Order("level1.id ASC")
}).Find(&gots).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(gots, wants) {
t.Errorf("got %s; want %s", toJSONString(gots), toJSONString(wants))
}
}
func TestNestedManyToManyPreload3ForStruct(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
}
Level2 struct {
ID uint
Value string
Level1s []Level1 `gorm:"many2many:level1_level2;"`
}
Level3 struct {
ID uint
Value string
Level2ID sql.NullInt64
Level2 Level2
}
)
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists("level1_level2")
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
level1Zh := Level1{Value: "zh"}
level1Ru := Level1{Value: "ru"}
level1En := Level1{Value: "en"}
level21 := Level2{
Value: "Level2-1",
Level1s: []Level1{level1Zh, level1Ru},
}
level22 := Level2{
Value: "Level2-2",
Level1s: []Level1{level1Zh, level1En},
}
wants := []*Level3{
{
Value: "Level3-1",
Level2: level21,
},
{
Value: "Level3-2",
Level2: level22,
},
{
Value: "Level3-3",
Level2: level21,
},
}
for _, want := range wants {
if err := DB.Save(&want).Error; err != nil {
t.Error(err)
}
}
var gots []*Level3
if err := DB.Preload("Level2.Level1s", func(db *gorm.DB) *gorm.DB {
return db.Order("level1.id ASC")
}).Find(&gots).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(gots, wants) {
t.Errorf("got %s; want %s", toJSONString(gots), toJSONString(wants))
}
}
func TestNestedManyToManyPreload4(t *testing.T) {
type (
Level4 struct {
ID uint
Value string
Level3ID uint
}
Level3 struct {
ID uint
Value string
Level4s []*Level4
}
Level2 struct {
ID uint
Value string
Level3s []*Level3 `gorm:"many2many:level2_level3;"`
}
Level1 struct {
ID uint
Value string
Level2s []*Level2 `gorm:"many2many:level1_level2;"`
}
)
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level4{})
DB.DropTableIfExists("level1_level2")
DB.DropTableIfExists("level2_level3")
dummy := Level1{
Value: "Level1",
Level2s: []*Level2{{
Value: "Level2",
Level3s: []*Level3{{
Value: "Level3",
Level4s: []*Level4{{
Value: "Level4",
}},
}},
}},
}
if err := DB.AutoMigrate(&Level4{}, &Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
if err := DB.Save(&dummy).Error; err != nil {
t.Error(err)
}
var level1 Level1
if err := DB.Preload("Level2s").Preload("Level2s.Level3s").Preload("Level2s.Level3s.Level4s").First(&level1).Error; err != nil {
t.Error(err)
}
}
func TestManyToManyPreloadForPointer(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
}
Level2 struct {
ID uint
Value string
Level1s []*Level1 `gorm:"many2many:levels;"`
}
)
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists("levels")
if err := DB.AutoMigrate(&Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level2{Value: "Bob", Level1s: []*Level1{
{Value: "ru"},
{Value: "en"},
}}
if err := DB.Save(&want).Error; err != nil {
t.Error(err)
}
want2 := Level2{Value: "Tom", Level1s: []*Level1{
{Value: "zh"},
{Value: "de"},
}}
if err := DB.Save(&want2).Error; err != nil {
t.Error(err)
}
var got Level2
if err := DB.Preload("Level1s").Find(&got, "value = ?", "Bob").Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
var got2 Level2
if err := DB.Preload("Level1s").Find(&got2, "value = ?", "Tom").Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got2, want2) {
t.Errorf("got %s; want %s", toJSONString(got2), toJSONString(want2))
}
var got3 []Level2
if err := DB.Preload("Level1s").Find(&got3, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got3, []Level2{got, got2}) {
t.Errorf("got %s; want %s", toJSONString(got3), toJSONString([]Level2{got, got2}))
}
var got4 []Level2
if err := DB.Preload("Level1s", "value IN (?)", []string{"zh", "ru"}).Find(&got4, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil {
t.Error(err)
}
var got5 Level2
DB.Preload("Level1s").First(&got5, "value = ?", "bogus")
var ruLevel1 Level1
var zhLevel1 Level1
DB.First(&ruLevel1, "value = ?", "ru")
DB.First(&zhLevel1, "value = ?", "zh")
got.Level1s = []*Level1{&ruLevel1}
got2.Level1s = []*Level1{&zhLevel1}
if !reflect.DeepEqual(got4, []Level2{got, got2}) {
t.Errorf("got %s; want %s", toJSONString(got4), toJSONString([]Level2{got, got2}))
}
}
func TestNilPointerSlice(t *testing.T) {
type (
Level3 struct {
ID uint
Value string
}
Level2 struct {
ID uint
Value string
Level3ID uint
Level3 *Level3
}
Level1 struct {
ID uint
Value string
Level2ID uint
Level2 *Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level1{
Value: "Bob",
Level2: &Level2{
Value: "en",
Level3: &Level3{
Value: "native",
},
},
}
if err := DB.Save(&want).Error; err != nil {
t.Error(err)
}
want2 := Level1{
Value: "Tom",
Level2: nil,
}
if err := DB.Save(&want2).Error; err != nil {
t.Error(err)
}
var got []Level1
if err := DB.Preload("Level2").Preload("Level2.Level3").Find(&got).Error; err != nil {
t.Error(err)
}
if len(got) != 2 {
t.Errorf("got %v items, expected 2", len(got))
}
if !reflect.DeepEqual(got[0], want) && !reflect.DeepEqual(got[1], want) {
t.Errorf("got %s; want array containing %s", toJSONString(got), toJSONString(want))
}
if !reflect.DeepEqual(got[0], want2) && !reflect.DeepEqual(got[1], want2) {
t.Errorf("got %s; want array containing %s", toJSONString(got), toJSONString(want2))
}
}
func TestNilPointerSlice2(t *testing.T) {
type (
Level4 struct {
ID uint
}
Level3 struct {
ID uint
Level4ID sql.NullInt64 `sql:"index"`
Level4 *Level4
}
Level2 struct {
ID uint
Level3s []*Level3 `gorm:"many2many:level2_level3s"`
}
Level1 struct {
ID uint
Level2ID sql.NullInt64 `sql:"index"`
Level2 *Level2
}
)
DB.DropTableIfExists(new(Level4))
DB.DropTableIfExists(new(Level3))
DB.DropTableIfExists(new(Level2))
DB.DropTableIfExists(new(Level1))
if err := DB.AutoMigrate(new(Level4), new(Level3), new(Level2), new(Level1)).Error; err != nil {
t.Error(err)
}
want := new(Level1)
if err := DB.Save(want).Error; err != nil {
t.Error(err)
}
got := new(Level1)
err := DB.Preload("Level2.Level3s.Level4").Last(&got).Error
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestPrefixedPreloadDuplication(t *testing.T) {
type (
Level4 struct {
ID uint
Name string
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level4s []*Level4
}
Level2 struct {
ID uint
Name string
Level3ID sql.NullInt64 `sql:"index"`
Level3 *Level3
}
Level1 struct {
ID uint
Name string
Level2ID sql.NullInt64 `sql:"index"`
Level2 *Level2
}
)
DB.DropTableIfExists(new(Level3))
DB.DropTableIfExists(new(Level4))
DB.DropTableIfExists(new(Level2))
DB.DropTableIfExists(new(Level1))
if err := DB.AutoMigrate(new(Level3), new(Level4), new(Level2), new(Level1)).Error; err != nil {
t.Error(err)
}
lvl := &Level3{}
if err := DB.Save(lvl).Error; err != nil {
t.Error(err)
}
sublvl1 := &Level4{Level3ID: lvl.ID}
if err := DB.Save(sublvl1).Error; err != nil {
t.Error(err)
}
sublvl2 := &Level4{Level3ID: lvl.ID}
if err := DB.Save(sublvl2).Error; err != nil {
t.Error(err)
}
lvl.Level4s = []*Level4{sublvl1, sublvl2}
want1 := Level1{
Level2: &Level2{
Level3: lvl,
},
}
if err := DB.Save(&want1).Error; err != nil {
t.Error(err)
}
want2 := Level1{
Level2: &Level2{
Level3: lvl,
},
}
if err := DB.Save(&want2).Error; err != nil {
t.Error(err)
}
want := []Level1{want1, want2}
var got []Level1
err := DB.Preload("Level2.Level3.Level4s").Find(&got).Error
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestPreloadManyToManyCallbacks(t *testing.T) {
type (
Level2 struct {
ID uint
Name string
}
Level1 struct {
ID uint
Name string
Level2s []Level2 `gorm:"many2many:level1_level2s;AssociationForeignKey:ID;ForeignKey:ID"`
}
)
DB.DropTableIfExists("level1_level2s")
DB.DropTableIfExists(new(Level1))
DB.DropTableIfExists(new(Level2))
if err := DB.AutoMigrate(new(Level1), new(Level2)).Error; err != nil {
t.Error(err)
}
lvl := Level1{
Name: "l1",
Level2s: []Level2{
{Name: "l2-1"}, {Name: "l2-2"},
},
}
DB.Save(&lvl)
called := 0
DB.Callback().Query().After("gorm:query").Register("TestPreloadManyToManyCallbacks", func(scope *gorm.Scope) {
called = called + 1
})
DB.Preload("Level2s").First(&Level1{}, "id = ?", lvl.ID)
if called != 3 {
t.Errorf("Wanted callback to be called 3 times but got %d", called)
}
}
func toJSONString(v interface{}) []byte {
r, _ := json.MarshalIndent(v, "", " ")
return r
}
|
[
"\"GORM_DIALECT\""
] |
[] |
[
"GORM_DIALECT"
] |
[]
|
["GORM_DIALECT"]
|
go
| 1 | 0 | |
internal/util/k8s.go
|
/*
Copyright 2020 The CephCSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"os"
snapclient "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
// NewK8sClient create kubernetes client.
func NewK8sClient() (*kubernetes.Clientset, error) {
config, err := getRestConfig()
if err != nil {
return nil, err
}
client, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to create client: %w", err)
}
return client, nil
}
func NewSnapClient() (*snapclient.SnapshotV1Client, error) {
config, err := getRestConfig()
if err != nil {
return nil, err
}
client, err := snapclient.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to create client: %w", err)
}
return client, nil
}
func getRestConfig() (*rest.Config, error) {
var cfg *rest.Config
var err error
cPath := os.Getenv("KUBERNETES_CONFIG_PATH")
if cPath != "" {
cfg, err = clientcmd.BuildConfigFromFlags("", cPath)
if err != nil {
return nil, fmt.Errorf("failed to get cluster config from %q: %w", cPath, err)
}
} else {
cfg, err = rest.InClusterConfig()
if err != nil {
return nil, fmt.Errorf("failed to get cluster config: %w", err)
}
}
return cfg, nil
}
|
[
"\"KUBERNETES_CONFIG_PATH\""
] |
[] |
[
"KUBERNETES_CONFIG_PATH"
] |
[]
|
["KUBERNETES_CONFIG_PATH"]
|
go
| 1 | 0 | |
autolook.py
|
import socket, os, json
from dotenv import load_dotenv
def path(filename):
if not os.path.isabs(filename):
return os.path.join(os.getcwd(),filename)
return filename
def send(subject,content=str(),attachs=list()):
envpath = os.path.join(os.path.dirname(os.path.abspath(os.path.realpath(__file__))),'.env')
load_dotenv(dotenv_path=envpath)
address = ('localhost',int(os.getenv('AUTOLOOK_PORT')))
data = {'subject': subject, 'content': content}
data['attachs'] = [path(f) for f in attachs]
data = json.dumps(data)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
s.connect(address)
except:
print(f'connection error: {address}')
return False
try:
s.sendall(bytearray(data,'utf-8'))
return True
except:
print('sending error: failed')
return False
|
[] |
[] |
[
"AUTOLOOK_PORT"
] |
[]
|
["AUTOLOOK_PORT"]
|
python
| 1 | 0 | |
test_transformers.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2021 jasoncheung <jasoncheung@iZwz95ffbqqbe9pkek5f3tZ>
#
# Distributed under terms of the MIT license.
"""
"""
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForPreTraining
from transformers import TFElectraForSequenceClassification, ElectraConfig
from transformers import TFTrainer, TFTrainingArguments
from transformers import training_args
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
dir_path = '/home/jasoncheung/project/trans/trans_models/electra_chinese_small'
tokenizer = AutoTokenizer.from_pretrained("hfl/chinese-electra-180g-small-discriminator")
config = ElectraConfig.from_pretrained(
'hfl/chinese-electra-180g-small-discriminator', num_labels=5,)
# model = TFAutoModelForPreTraining.from_pretrained("hfl/chinese-electra-180g-small-discriminator")
# model = TFElectraForSequenceClassification.from_pretrained("hfl/chinese-electra-180g-small-discriminator")
# inputs = tokenizer("你听明白了吗", return_tensors="tf")
# outputs = model(**inputs)
# print(inputs, outputs)
# load datas
path_datas = '/home/jasoncheung/project/trans/trans_datas/weibo_senti_100k.csv'
df = pd.read_csv(path_datas)
datas = df.review.tolist()
labels = df.label.tolist()
train_datas, test_datas, train_labels, test_labels = train_test_split(datas, labels, test_size=0.1)
train_datas, val_datas, train_labels, val_labels = train_test_split(train_datas, train_labels, test_size=0.1)
train_encodings = tokenizer(train_datas, truncation=True, padding='max_length', max_length=180)
val_encodings = tokenizer(val_datas, truncation=True, padding='max_length', max_length=180)
test_encodings = tokenizer(test_datas, truncation=True, padding='max_length', max_length=180)
train_dataset = tf.data.Dataset.from_tensor_slices((dict(train_encodings), train_labels))
val_dataset = tf.data.Dataset.from_tensor_slices((dict(val_encodings), val_labels))
test_dataset = tf.data.Dataset.from_tensor_slices((dict(test_encodings), test_labels))
# training
training_args = TFTrainingArguments(
do_train=True,
do_eval=True,
output_dir='./results', # output directory
num_train_epochs=3, # total # of training epochs
per_device_train_batch_size=32, # batch size per device during training
per_device_eval_batch_size=64, # batch size for evaluation
warmup_steps=500, # number of warmup steps for learning rate scheduler
weight_decay=0.01, # strength of weight decay
logging_dir='./logs', # directory for storing logs
save_total_limit=5,
evaluation_strategy='steps',
eval_steps=250,
load_best_model_at_end=True,
disable_tqdm=False,
max_steps=1000,
)
with training_args.strategy.scope():
model = TFElectraForSequenceClassification.from_pretrained(dir_path,
num_labels=2,
)
# model.load_weights('/home/jasoncheung/project/trans/results/checkpoint/ckpt-18.index')
def compute_metrics(pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='binary')
acc = accuracy_score(labels, preds)
return {
'accuracy': acc,
'f1': f1,
'precision': precision,
'recall': recall
}
trainer = TFTrainer(
model=model, # the instantiated 🤗 Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # ensorflow_datasets training dataset
eval_dataset=val_dataset, # tensorflow_datasets evaluation dataset
compute_metrics=compute_metrics,
)
# trainer.train()
'''
dir_path = '/home/jasoncheung/project/trans/trans_models/electra_chinese_small/'
model.save_pretrained(dir_path)
config.save_pretrained(dir_path)
tokenizer.save_vocabulary(dir_path)
'''
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
backend/shopping-cart-service/add_to_cart.py
|
import json
import os
import boto3
from aws_lambda_powertools import Logger, Metrics, Tracer
from shared import (
NotFoundException,
generate_ttl,
get_cart_id,
get_headers,
get_user_sub,
)
from utils import get_product_from_external_service
logger = Logger()
tracer = Tracer()
metrics = Metrics()
dynamodb = boto3.resource("dynamodb")
table = dynamodb.Table(os.environ["TABLE_NAME"])
product_service_url = os.environ["PRODUCT_SERVICE_URL"]
@metrics.log_metrics(capture_cold_start_metric=True)
@logger.inject_lambda_context(log_event=True)
@tracer.capture_lambda_handler
def lambda_handler(event, context):
"""
Add a the provided quantity of a product to a cart. Where an item already exists in the cart, the quantities will
be summed.
"""
try:
request_payload = json.loads(event["body"])
except KeyError:
return {
"statusCode": 400,
"headers": get_headers(""),
"body": json.dumps({"message": "No Request payload"}),
}
product_id = request_payload["productId"]
quantity = request_payload.get("quantity", 1)
cart_id, _ = get_cart_id(event["headers"])
# Because this method can be called anonymously, we need to check there's a logged in user
user_sub = None
jwt_token = event["headers"].get("Authorization")
if jwt_token:
user_sub = get_user_sub(jwt_token)
try:
product = get_product_from_external_service(product_id)
logger.info("No product found with product_id: %s", product_id)
except NotFoundException:
return {
"statusCode": 404,
"headers": get_headers(cart_id=cart_id),
"body": json.dumps({"message": "product not found"}),
}
if user_sub:
logger.info("Authenticated user")
pk = f"user#{user_sub}"
ttl = generate_ttl(
7
) # Set a longer ttl for logged in users - we want to keep their cart for longer.
else:
logger.info("Unauthenticated user")
pk = f"cart#{cart_id}"
ttl = generate_ttl()
if int(quantity) < 0:
table.update_item(
Key={"pk": pk, "sk": f"product#{product_id}"},
ExpressionAttributeNames={
"#quantity": "quantity",
"#expirationTime": "expirationTime",
"#productDetail": "productDetail",
},
ExpressionAttributeValues={
":val": quantity,
":ttl": ttl,
":productDetail": product,
":limit": abs(quantity),
},
UpdateExpression="ADD #quantity :val SET #expirationTime = :ttl, #productDetail = :productDetail",
# Prevent quantity less than 0
ConditionExpression="quantity >= :limit",
)
else:
table.update_item(
Key={"pk": pk, "sk": f"product#{product_id}"},
ExpressionAttributeNames={
"#quantity": "quantity",
"#expirationTime": "expirationTime",
"#productDetail": "productDetail",
},
ExpressionAttributeValues={
":val": quantity,
":ttl": generate_ttl(),
":productDetail": product,
},
UpdateExpression="ADD #quantity :val SET #expirationTime = :ttl, #productDetail = :productDetail",
)
metrics.add_metric(name="CartUpdated", unit="Count", value=1)
return {
"statusCode": 200,
"headers": get_headers(cart_id),
"body": json.dumps(
{"productId": product_id, "message": "product added to cart"}
),
}
|
[] |
[] |
[
"TABLE_NAME",
"PRODUCT_SERVICE_URL"
] |
[]
|
["TABLE_NAME", "PRODUCT_SERVICE_URL"]
|
python
| 2 | 0 | |
run.py
|
# App / Lib imports
import os
from app import create_app
from app import Config
# Declare app
app = create_app(os.getenv('APP_ENV') or 'DEVELOPMENT')
#######################
# For Dev only #
#######################
# Grand execute access.
if __name__ == '__main__':
app.run(host=Config.HOST, port=Config.PORT, debug=Config.DEBUG)
|
[] |
[] |
[
"APP_ENV"
] |
[]
|
["APP_ENV"]
|
python
| 1 | 0 | |
main.py
|
import os
import traceback
import discord
from discord.ext import commands
from modules.grouping import MakeTeam
token = os.environ['DISCORD_BOT_TOKEN']
bot = commands.Bot(command_prefix='/')
"""起動処理"""
@bot.event
async def on_ready():
print('-----Logged in info-----')
print(bot.user.name)
print(bot.user.id)
print(discord.__version__)
print('------------------------')
"""コマンド実行"""
# メンバー数が均等になるチーム分け
@bot.command()
async def team(ctx, specified_num=2):
make_team = MakeTeam()
remainder_flag = 'true'
msg = make_team.make_party_num(ctx,specified_num,remainder_flag)
await ctx.channel.send(msg)
# メンバー数が均等にはならないチーム分け
@bot.command()
async def team_norem(ctx, specified_num=2):
make_team = MakeTeam()
msg = make_team.make_party_num(ctx,specified_num)
await ctx.channel.send(msg)
# メンバー数を指定してチーム分け
@bot.command()
async def group(ctx, specified_num=1):
make_team = MakeTeam()
msg = make_team.make_specified_len(ctx,specified_num)
await ctx.channel.send(msg)
"""botの接続と起動"""
bot.run(token)
|
[] |
[] |
[
"DISCORD_BOT_TOKEN"
] |
[]
|
["DISCORD_BOT_TOKEN"]
|
python
| 1 | 0 | |
configure.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""configure script to get build parameters from user."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import errno
import os
import platform
import re
import subprocess
import sys
# pylint: disable=g-import-not-at-top
try:
from shutil import which
except ImportError:
from distutils.spawn import find_executable as which
# pylint: enable=g-import-not-at-top
_DEFAULT_CUDA_VERSION = '10'
_DEFAULT_CUDNN_VERSION = '7'
_DEFAULT_TENSORRT_VERSION = '6'
_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,7.0'
_TF_OPENCL_VERSION = '1.2'
_DEFAULT_COMPUTECPP_TOOLKIT_PATH = '/usr/local/computecpp'
_DEFAULT_TRISYCL_INCLUDE_DIR = '/usr/local/triSYCL/include'
_SUPPORTED_ANDROID_NDK_VERSIONS = [10, 11, 12, 13, 14, 15, 16, 17, 18]
_DEFAULT_PROMPT_ASK_ATTEMPTS = 10
_TF_BAZELRC_FILENAME = '.tf_configure.bazelrc'
_TF_WORKSPACE_ROOT = ''
_TF_BAZELRC = ''
_TF_CURRENT_BAZEL_VERSION = None
_TF_MIN_BAZEL_VERSION = '0.27.1'
_TF_MAX_BAZEL_VERSION = '0.29.1'
NCCL_LIB_PATHS = [
'lib64/', 'lib/powerpc64le-linux-gnu/', 'lib/x86_64-linux-gnu/', ''
]
# List of files to configure when building Bazel on Apple platforms.
APPLE_BAZEL_FILES = [
'tensorflow/lite/experimental/ios/BUILD',
'tensorflow/lite/experimental/objc/BUILD',
'tensorflow/lite/experimental/swift/BUILD'
]
# List of files to move when building for iOS.
IOS_FILES = [
'tensorflow/lite/experimental/objc/TensorFlowLiteObjC.podspec',
'tensorflow/lite/experimental/swift/TensorFlowLiteSwift.podspec',
]
class UserInputError(Exception):
pass
def is_windows():
return platform.system() == 'Windows'
def is_linux():
return platform.system() == 'Linux'
def is_macos():
return platform.system() == 'Darwin'
def is_ppc64le():
return platform.machine() == 'ppc64le'
def is_cygwin():
return platform.system().startswith('CYGWIN_NT')
def get_input(question):
try:
try:
answer = raw_input(question)
except NameError:
answer = input(question) # pylint: disable=bad-builtin
except EOFError:
answer = ''
return answer
def symlink_force(target, link_name):
"""Force symlink, equivalent of 'ln -sf'.
Args:
target: items to link to.
link_name: name of the link.
"""
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
def sed_in_place(filename, old, new):
"""Replace old string with new string in file.
Args:
filename: string for filename.
old: string to replace.
new: new string to replace to.
"""
with open(filename, 'r') as f:
filedata = f.read()
newdata = filedata.replace(old, new)
with open(filename, 'w') as f:
f.write(newdata)
def write_to_bazelrc(line):
with open(_TF_BAZELRC, 'a') as f:
f.write(line + '\n')
def write_action_env_to_bazelrc(var_name, var):
write_to_bazelrc('build --action_env %s="%s"' % (var_name, str(var)))
def run_shell(cmd, allow_non_zero=False):
if allow_non_zero:
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
output = e.output
else:
output = subprocess.check_output(cmd)
return output.decode('UTF-8').strip()
def cygpath(path):
"""Convert path from posix to windows."""
return os.path.abspath(path).replace('\\', '/')
def get_python_path(environ_cp, python_bin_path):
"""Get the python site package paths."""
python_paths = []
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
try:
library_paths = run_shell([
python_bin_path, '-c',
'import site; print("\\n".join(site.getsitepackages()))'
]).split('\n')
except subprocess.CalledProcessError:
library_paths = [
run_shell([
python_bin_path, '-c',
'from distutils.sysconfig import get_python_lib;'
'print(get_python_lib())'
])
]
all_paths = set(python_paths + library_paths)
paths = []
for path in all_paths:
if os.path.isdir(path):
paths.append(path)
return paths
def get_python_major_version(python_bin_path):
"""Get the python major version."""
return run_shell([python_bin_path, '-c', 'import sys; print(sys.version[0])'])
def setup_python(environ_cp):
"""Setup python related env variables."""
# Get PYTHON_BIN_PATH, default is the current running python.
default_python_bin_path = sys.executable
ask_python_bin_path = ('Please specify the location of python. [Default is '
'%s]: ') % default_python_bin_path
while True:
python_bin_path = get_from_env_or_user_or_default(environ_cp,
'PYTHON_BIN_PATH',
ask_python_bin_path,
default_python_bin_path)
# Check if the path is valid
if os.path.isfile(python_bin_path) and os.access(python_bin_path, os.X_OK):
break
elif not os.path.exists(python_bin_path):
print('Invalid python path: %s cannot be found.' % python_bin_path)
else:
print('%s is not executable. Is it the python binary?' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = ''
# Convert python path to Windows style before checking lib and version
if is_windows() or is_cygwin():
python_bin_path = cygpath(python_bin_path)
# Get PYTHON_LIB_PATH
python_lib_path = environ_cp.get('PYTHON_LIB_PATH')
if not python_lib_path:
python_lib_paths = get_python_path(environ_cp, python_bin_path)
if environ_cp.get('USE_DEFAULT_PYTHON_LIB_PATH') == '1':
python_lib_path = python_lib_paths[0]
else:
print('Found possible Python library paths:\n %s' %
'\n '.join(python_lib_paths))
default_python_lib_path = python_lib_paths[0]
python_lib_path = get_input(
'Please input the desired Python library path to use. '
'Default is [%s]\n' % python_lib_paths[0])
if not python_lib_path:
python_lib_path = default_python_lib_path
environ_cp['PYTHON_LIB_PATH'] = python_lib_path
python_major_version = get_python_major_version(python_bin_path)
if python_major_version == '2':
write_to_bazelrc('build --host_force_python=PY2')
# Convert python path to Windows style before writing into bazel.rc
if is_windows() or is_cygwin():
python_lib_path = cygpath(python_lib_path)
# Set-up env variables used by python_configure.bzl
write_action_env_to_bazelrc('PYTHON_BIN_PATH', python_bin_path)
write_action_env_to_bazelrc('PYTHON_LIB_PATH', python_lib_path)
write_to_bazelrc('build --python_path=\"%s"' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = python_bin_path
# If choosen python_lib_path is from a path specified in the PYTHONPATH
# variable, need to tell bazel to include PYTHONPATH
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
if python_lib_path in python_paths:
write_action_env_to_bazelrc('PYTHONPATH', environ_cp.get('PYTHONPATH'))
# Write tools/python_bin_path.sh
with open(
os.path.join(_TF_WORKSPACE_ROOT, 'tools', 'python_bin_path.sh'),
'w') as f:
f.write('export PYTHON_BIN_PATH="%s"' % python_bin_path)
def reset_tf_configure_bazelrc():
"""Reset file that contains customized config settings."""
open(_TF_BAZELRC, 'w').close()
def cleanup_makefile():
"""Delete any leftover BUILD files from the Makefile build.
These files could interfere with Bazel parsing.
"""
makefile_download_dir = os.path.join(_TF_WORKSPACE_ROOT, 'tensorflow',
'contrib', 'makefile', 'downloads')
if os.path.isdir(makefile_download_dir):
for root, _, filenames in os.walk(makefile_download_dir):
for f in filenames:
if f.endswith('BUILD'):
os.remove(os.path.join(root, f))
def get_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None):
"""Get boolean input from user.
If var_name is not set in env, ask user to enable query_item or not. If the
response is empty, use the default.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
Returns:
boolean value of the variable.
Raises:
UserInputError: if an environment variable is set, but it cannot be
interpreted as a boolean indicator, assume that the user has made a
scripting error, and will continue to provide invalid input.
Raise the error to avoid infinitely looping.
"""
if not question:
question = 'Do you wish to build TensorFlow with %s support?' % query_item
if not yes_reply:
yes_reply = '%s support will be enabled for TensorFlow.' % query_item
if not no_reply:
no_reply = 'No %s' % yes_reply
yes_reply += '\n'
no_reply += '\n'
if enabled_by_default:
question += ' [Y/n]: '
else:
question += ' [y/N]: '
var = environ_cp.get(var_name)
if var is not None:
var_content = var.strip().lower()
true_strings = ('1', 't', 'true', 'y', 'yes')
false_strings = ('0', 'f', 'false', 'n', 'no')
if var_content in true_strings:
var = True
elif var_content in false_strings:
var = False
else:
raise UserInputError(
'Environment variable %s must be set as a boolean indicator.\n'
'The following are accepted as TRUE : %s.\n'
'The following are accepted as FALSE: %s.\n'
'Current value is %s.' %
(var_name, ', '.join(true_strings), ', '.join(false_strings), var))
while var is None:
user_input_origin = get_input(question)
user_input = user_input_origin.strip().lower()
if user_input == 'y':
print(yes_reply)
var = True
elif user_input == 'n':
print(no_reply)
var = False
elif not user_input:
if enabled_by_default:
print(yes_reply)
var = True
else:
print(no_reply)
var = False
else:
print('Invalid selection: %s' % user_input_origin)
return var
def set_build_var(environ_cp,
var_name,
query_item,
option_name,
enabled_by_default,
bazel_config_name=None):
"""Set if query_item will be enabled for the build.
Ask user if query_item will be enabled. Default is used if no input is given.
Set subprocess environment variable and write to .bazelrc if enabled.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
option_name: string for option to define in .bazelrc.
enabled_by_default: boolean for default behavior.
bazel_config_name: Name for Bazel --config argument to enable build feature.
"""
var = str(int(get_var(environ_cp, var_name, query_item, enabled_by_default)))
environ_cp[var_name] = var
if var == '1':
write_to_bazelrc('build:%s --define %s=true' %
(bazel_config_name, option_name))
write_to_bazelrc('build --config=%s' % bazel_config_name)
elif bazel_config_name is not None:
# TODO(mikecase): Migrate all users of configure.py to use --config Bazel
# options and not to set build configs through environment variables.
write_to_bazelrc('build:%s --define %s=true' %
(bazel_config_name, option_name))
def set_action_env_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None,
bazel_config_name=None):
"""Set boolean action_env variable.
Ask user if query_item will be enabled. Default is used if no input is given.
Set environment variable and write to .bazelrc.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
bazel_config_name: adding config to .bazelrc instead of action_env.
"""
var = int(
get_var(environ_cp, var_name, query_item, enabled_by_default, question,
yes_reply, no_reply))
if not bazel_config_name:
write_action_env_to_bazelrc(var_name, var)
elif var:
write_to_bazelrc('build --config=%s' % bazel_config_name)
environ_cp[var_name] = str(var)
def convert_version_to_int(version):
"""Convert a version number to a integer that can be used to compare.
Version strings of the form X.YZ and X.Y.Z-xxxxx are supported. The
'xxxxx' part, for instance 'homebrew' on OS/X, is ignored.
Args:
version: a version to be converted
Returns:
An integer if converted successfully, otherwise return None.
"""
version = version.split('-')[0]
version_segments = version.split('.')
# Treat "0.24" as "0.24.0"
if len(version_segments) == 2:
version_segments.append('0')
for seg in version_segments:
if not seg.isdigit():
return None
version_str = ''.join(['%03d' % int(seg) for seg in version_segments])
return int(version_str)
def check_bazel_version(min_version, max_version):
"""Check installed bazel version is between min_version and max_version.
Args:
min_version: string for minimum bazel version (must exist!).
max_version: string for maximum bazel version (must exist!).
Returns:
The bazel version detected.
"""
if which('bazel') is None:
print('Cannot find bazel. Please install bazel.')
sys.exit(0)
curr_version = run_shell(
['bazel', '--batch', '--bazelrc=/dev/null', 'version'])
for line in curr_version.split('\n'):
if 'Build label: ' in line:
curr_version = line.split('Build label: ')[1]
break
min_version_int = convert_version_to_int(min_version)
curr_version_int = convert_version_to_int(curr_version)
max_version_int = convert_version_to_int(max_version)
# Check if current bazel version can be detected properly.
if not curr_version_int:
print('WARNING: current bazel installation is not a release version.')
print('Make sure you are running at least bazel %s' % min_version)
return curr_version
print('You have bazel %s installed.' % curr_version)
if curr_version_int < min_version_int:
print('Please upgrade your bazel installation to version %s or higher to '
'build TensorFlow!' % min_version)
sys.exit(1)
if (curr_version_int > max_version_int and
'TF_IGNORE_MAX_BAZEL_VERSION' not in os.environ):
print('Please downgrade your bazel installation to version %s or lower to '
'build TensorFlow! To downgrade: download the installer for the old '
'version (from https://github.com/bazelbuild/bazel/releases) then '
'run the installer.' % max_version)
sys.exit(1)
return curr_version
def set_cc_opt_flags(environ_cp):
"""Set up architecture-dependent optimization flags.
Also append CC optimization flags to bazel.rc..
Args:
environ_cp: copy of the os.environ.
"""
if is_ppc64le():
# gcc on ppc64le does not support -march, use mcpu instead
default_cc_opt_flags = '-mcpu=native'
elif is_windows():
default_cc_opt_flags = '/arch:AVX'
else:
default_cc_opt_flags = '-march=native -Wno-sign-compare'
question = ('Please specify optimization flags to use during compilation when'
' bazel option "--config=opt" is specified [Default is %s]: '
) % default_cc_opt_flags
cc_opt_flags = get_from_env_or_user_or_default(environ_cp, 'CC_OPT_FLAGS',
question, default_cc_opt_flags)
for opt in cc_opt_flags.split():
write_to_bazelrc('build:opt --copt=%s' % opt)
# It should be safe on the same build host.
if not is_ppc64le() and not is_windows():
write_to_bazelrc('build:opt --host_copt=-march=native')
write_to_bazelrc('build:opt --define with_default_optimizations=true')
def set_tf_cuda_clang(environ_cp):
"""set TF_CUDA_CLANG action_env.
Args:
environ_cp: copy of the os.environ.
"""
question = 'Do you want to use clang as CUDA compiler?'
yes_reply = 'Clang will be used as CUDA compiler.'
no_reply = 'nvcc will be used as CUDA compiler.'
set_action_env_var(
environ_cp,
'TF_CUDA_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply,
bazel_config_name='cuda_clang')
def set_tf_download_clang(environ_cp):
"""Set TF_DOWNLOAD_CLANG action_env."""
question = 'Do you wish to download a fresh release of clang? (Experimental)'
yes_reply = 'Clang will be downloaded and used to compile tensorflow.'
no_reply = 'Clang will not be downloaded.'
set_action_env_var(
environ_cp,
'TF_DOWNLOAD_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply,
bazel_config_name='download_clang')
def get_from_env_or_user_or_default(environ_cp, var_name, ask_for_var,
var_default):
"""Get var_name either from env, or user or default.
If var_name has been set as environment variable, use the preset value, else
ask for user input. If no input is provided, the default is used.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
ask_for_var: string for how to ask for user input.
var_default: default value string.
Returns:
string value for var_name
"""
var = environ_cp.get(var_name)
if not var:
var = get_input(ask_for_var)
print('\n')
if not var:
var = var_default
return var
def set_clang_cuda_compiler_path(environ_cp):
"""Set CLANG_CUDA_COMPILER_PATH."""
default_clang_path = which('clang') or ''
ask_clang_path = ('Please specify which clang should be used as device and '
'host compiler. [Default is %s]: ') % default_clang_path
while True:
clang_cuda_compiler_path = get_from_env_or_user_or_default(
environ_cp, 'CLANG_CUDA_COMPILER_PATH', ask_clang_path,
default_clang_path)
if os.path.exists(clang_cuda_compiler_path):
break
# Reset and retry
print('Invalid clang path: %s cannot be found.' % clang_cuda_compiler_path)
environ_cp['CLANG_CUDA_COMPILER_PATH'] = ''
# Set CLANG_CUDA_COMPILER_PATH
environ_cp['CLANG_CUDA_COMPILER_PATH'] = clang_cuda_compiler_path
write_action_env_to_bazelrc('CLANG_CUDA_COMPILER_PATH',
clang_cuda_compiler_path)
def prompt_loop_or_load_from_env(environ_cp,
var_name,
var_default,
ask_for_var,
check_success,
error_msg,
suppress_default_error=False,
resolve_symlinks=False,
n_ask_attempts=_DEFAULT_PROMPT_ASK_ATTEMPTS):
"""Loop over user prompts for an ENV param until receiving a valid response.
For the env param var_name, read from the environment or verify user input
until receiving valid input. When done, set var_name in the environ_cp to its
new value.
Args:
environ_cp: (Dict) copy of the os.environ.
var_name: (String) string for name of environment variable, e.g. "TF_MYVAR".
var_default: (String) default value string.
ask_for_var: (String) string for how to ask for user input.
check_success: (Function) function that takes one argument and returns a
boolean. Should return True if the value provided is considered valid. May
contain a complex error message if error_msg does not provide enough
information. In that case, set suppress_default_error to True.
error_msg: (String) String with one and only one '%s'. Formatted with each
invalid response upon check_success(input) failure.
suppress_default_error: (Bool) Suppress the above error message in favor of
one from the check_success function.
resolve_symlinks: (Bool) Translate symbolic links into the real filepath.
n_ask_attempts: (Integer) Number of times to query for valid input before
raising an error and quitting.
Returns:
[String] The value of var_name after querying for input.
Raises:
UserInputError: if a query has been attempted n_ask_attempts times without
success, assume that the user has made a scripting error, and will
continue to provide invalid input. Raise the error to avoid infinitely
looping.
"""
default = environ_cp.get(var_name) or var_default
full_query = '%s [Default is %s]: ' % (
ask_for_var,
default,
)
for _ in range(n_ask_attempts):
val = get_from_env_or_user_or_default(environ_cp, var_name, full_query,
default)
if check_success(val):
break
if not suppress_default_error:
print(error_msg % val)
environ_cp[var_name] = ''
else:
raise UserInputError('Invalid %s setting was provided %d times in a row. '
'Assuming to be a scripting mistake.' %
(var_name, n_ask_attempts))
if resolve_symlinks and os.path.islink(val):
val = os.path.realpath(val)
environ_cp[var_name] = val
return val
def create_android_ndk_rule(environ_cp):
"""Set ANDROID_NDK_HOME and write Android NDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_ndk_path = cygpath('%s/Android/Sdk/ndk-bundle' %
environ_cp['APPDATA'])
elif is_macos():
default_ndk_path = '%s/library/Android/Sdk/ndk-bundle' % environ_cp['HOME']
else:
default_ndk_path = '%s/Android/Sdk/ndk-bundle' % environ_cp['HOME']
def valid_ndk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'source.properties')))
android_ndk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_HOME',
var_default=default_ndk_path,
ask_for_var='Please specify the home path of the Android NDK to use.',
check_success=valid_ndk_path,
error_msg=('The path %s or its child file "source.properties" '
'does not exist.'))
write_action_env_to_bazelrc('ANDROID_NDK_HOME', android_ndk_home_path)
write_action_env_to_bazelrc(
'ANDROID_NDK_API_LEVEL',
get_ndk_api_level(environ_cp, android_ndk_home_path))
def create_android_sdk_rule(environ_cp):
"""Set Android variables and write Android SDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_sdk_path = cygpath('%s/Android/Sdk' % environ_cp['APPDATA'])
elif is_macos():
default_sdk_path = '%s/library/Android/Sdk' % environ_cp['HOME']
else:
default_sdk_path = '%s/Android/Sdk' % environ_cp['HOME']
def valid_sdk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'platforms')) and
os.path.exists(os.path.join(path, 'build-tools')))
android_sdk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_SDK_HOME',
var_default=default_sdk_path,
ask_for_var='Please specify the home path of the Android SDK to use.',
check_success=valid_sdk_path,
error_msg=('Either %s does not exist, or it does not contain the '
'subdirectories "platforms" and "build-tools".'))
platforms = os.path.join(android_sdk_home_path, 'platforms')
api_levels = sorted(os.listdir(platforms))
api_levels = [x.replace('android-', '') for x in api_levels]
def valid_api_level(api_level):
return os.path.exists(
os.path.join(android_sdk_home_path, 'platforms',
'android-' + api_level))
android_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_API_LEVEL',
var_default=api_levels[-1],
ask_for_var=('Please specify the Android SDK API level to use. '
'[Available levels: %s]') % api_levels,
check_success=valid_api_level,
error_msg='Android-%s is not present in the SDK path.')
build_tools = os.path.join(android_sdk_home_path, 'build-tools')
versions = sorted(os.listdir(build_tools))
def valid_build_tools(version):
return os.path.exists(
os.path.join(android_sdk_home_path, 'build-tools', version))
android_build_tools_version = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_BUILD_TOOLS_VERSION',
var_default=versions[-1],
ask_for_var=('Please specify an Android build tools version to use. '
'[Available versions: %s]') % versions,
check_success=valid_build_tools,
error_msg=('The selected SDK does not have build-tools version %s '
'available.'))
write_action_env_to_bazelrc('ANDROID_BUILD_TOOLS_VERSION',
android_build_tools_version)
write_action_env_to_bazelrc('ANDROID_SDK_API_LEVEL', android_api_level)
write_action_env_to_bazelrc('ANDROID_SDK_HOME', android_sdk_home_path)
def get_ndk_api_level(environ_cp, android_ndk_home_path):
"""Gets the appropriate NDK API level to use for the provided Android NDK path."""
# First check to see if we're using a blessed version of the NDK.
properties_path = '%s/source.properties' % android_ndk_home_path
if is_windows() or is_cygwin():
properties_path = cygpath(properties_path)
with open(properties_path, 'r') as f:
filedata = f.read()
revision = re.search(r'Pkg.Revision = (\d+)', filedata)
if revision:
ndk_version = revision.group(1)
else:
raise Exception('Unable to parse NDK revision.')
if int(ndk_version) not in _SUPPORTED_ANDROID_NDK_VERSIONS:
print('WARNING: The NDK version in %s is %s, which is not '
'supported by Bazel (officially supported versions: %s). Please use '
'another version. Compiling Android targets may result in confusing '
'errors.\n' %
(android_ndk_home_path, ndk_version, _SUPPORTED_ANDROID_NDK_VERSIONS))
# Now grab the NDK API level to use. Note that this is different from the
# SDK API level, as the NDK API level is effectively the *min* target SDK
# version.
platforms = os.path.join(android_ndk_home_path, 'platforms')
api_levels = sorted(os.listdir(platforms))
api_levels = [
x.replace('android-', '') for x in api_levels if 'android-' in x
]
def valid_api_level(api_level):
return os.path.exists(
os.path.join(android_ndk_home_path, 'platforms',
'android-' + api_level))
android_ndk_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_API_LEVEL',
var_default='18', # 18 is required for GPU acceleration.
ask_for_var=('Please specify the (min) Android NDK API level to use. '
'[Available levels: %s]') % api_levels,
check_success=valid_api_level,
error_msg='Android-%s is not present in the NDK path.')
return android_ndk_api_level
def set_gcc_host_compiler_path(environ_cp):
"""Set GCC_HOST_COMPILER_PATH."""
default_gcc_host_compiler_path = which('gcc') or ''
cuda_bin_symlink = '%s/bin/gcc' % environ_cp.get('CUDA_TOOLKIT_PATH')
if os.path.islink(cuda_bin_symlink):
# os.readlink is only available in linux
default_gcc_host_compiler_path = os.path.realpath(cuda_bin_symlink)
gcc_host_compiler_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='GCC_HOST_COMPILER_PATH',
var_default=default_gcc_host_compiler_path,
ask_for_var='Please specify which gcc should be used by nvcc as the host compiler.',
check_success=os.path.exists,
resolve_symlinks=True,
error_msg='Invalid gcc path. %s cannot be found.',
)
write_action_env_to_bazelrc('GCC_HOST_COMPILER_PATH', gcc_host_compiler_path)
def reformat_version_sequence(version_str, sequence_count):
"""Reformat the version string to have the given number of sequences.
For example:
Given (7, 2) -> 7.0
(7.0.1, 2) -> 7.0
(5, 1) -> 5
(5.0.3.2, 1) -> 5
Args:
version_str: String, the version string.
sequence_count: int, an integer.
Returns:
string, reformatted version string.
"""
v = version_str.split('.')
if len(v) < sequence_count:
v = v + (['0'] * (sequence_count - len(v)))
return '.'.join(v[:sequence_count])
def set_tf_cuda_paths(environ_cp):
"""Set TF_CUDA_PATHS."""
ask_cuda_paths = (
'Please specify the comma-separated list of base paths to look for CUDA '
'libraries and headers. [Leave empty to use the default]: ')
tf_cuda_paths = get_from_env_or_user_or_default(environ_cp, 'TF_CUDA_PATHS',
ask_cuda_paths, '')
if tf_cuda_paths:
environ_cp['TF_CUDA_PATHS'] = tf_cuda_paths
def set_tf_cuda_version(environ_cp):
"""Set TF_CUDA_VERSION."""
ask_cuda_version = (
'Please specify the CUDA SDK version you want to use. '
'[Leave empty to default to CUDA %s]: ') % _DEFAULT_CUDA_VERSION
tf_cuda_version = get_from_env_or_user_or_default(environ_cp,
'TF_CUDA_VERSION',
ask_cuda_version,
_DEFAULT_CUDA_VERSION)
environ_cp['TF_CUDA_VERSION'] = tf_cuda_version
def set_tf_cudnn_version(environ_cp):
"""Set TF_CUDNN_VERSION."""
ask_cudnn_version = (
'Please specify the cuDNN version you want to use. '
'[Leave empty to default to cuDNN %s]: ') % _DEFAULT_CUDNN_VERSION
tf_cudnn_version = get_from_env_or_user_or_default(environ_cp,
'TF_CUDNN_VERSION',
ask_cudnn_version,
_DEFAULT_CUDNN_VERSION)
environ_cp['TF_CUDNN_VERSION'] = tf_cudnn_version
def is_cuda_compatible(lib, cuda_ver, cudnn_ver):
"""Check compatibility between given library and cudnn/cudart libraries."""
ldd_bin = which('ldd') or '/usr/bin/ldd'
ldd_out = run_shell([ldd_bin, lib], True)
ldd_out = ldd_out.split(os.linesep)
cudnn_pattern = re.compile('.*libcudnn.so\\.?(.*) =>.*$')
cuda_pattern = re.compile('.*libcudart.so\\.?(.*) =>.*$')
cudnn = None
cudart = None
cudnn_ok = True # assume no cudnn dependency by default
cuda_ok = True # assume no cuda dependency by default
for line in ldd_out:
if 'libcudnn.so' in line:
cudnn = cudnn_pattern.search(line)
cudnn_ok = False
elif 'libcudart.so' in line:
cudart = cuda_pattern.search(line)
cuda_ok = False
if cudnn and len(cudnn.group(1)):
cudnn = convert_version_to_int(cudnn.group(1))
if cudart and len(cudart.group(1)):
cudart = convert_version_to_int(cudart.group(1))
if cudnn is not None:
cudnn_ok = (cudnn == cudnn_ver)
if cudart is not None:
cuda_ok = (cudart == cuda_ver)
return cudnn_ok and cuda_ok
def set_tf_tensorrt_version(environ_cp):
"""Set TF_TENSORRT_VERSION."""
if not is_linux():
raise ValueError('Currently TensorRT is only supported on Linux platform.')
if not int(environ_cp.get('TF_NEED_TENSORRT', False)):
return
ask_tensorrt_version = (
'Please specify the TensorRT version you want to use. '
'[Leave empty to default to TensorRT %s]: ') % _DEFAULT_TENSORRT_VERSION
tf_tensorrt_version = get_from_env_or_user_or_default(
environ_cp, 'TF_TENSORRT_VERSION', ask_tensorrt_version,
_DEFAULT_TENSORRT_VERSION)
environ_cp['TF_TENSORRT_VERSION'] = tf_tensorrt_version
def set_tf_nccl_version(environ_cp):
"""Set TF_NCCL_VERSION."""
if not is_linux():
raise ValueError('Currently NCCL is only supported on Linux platform.')
if 'TF_NCCL_VERSION' in environ_cp:
return
ask_nccl_version = (
'Please specify the locally installed NCCL version you want to use. '
'[Leave empty to use http://github.com/nvidia/nccl]: ')
tf_nccl_version = get_from_env_or_user_or_default(environ_cp,
'TF_NCCL_VERSION',
ask_nccl_version, '')
environ_cp['TF_NCCL_VERSION'] = tf_nccl_version
def get_native_cuda_compute_capabilities(environ_cp):
"""Get native cuda compute capabilities.
Args:
environ_cp: copy of the os.environ.
Returns:
string of native cuda compute capabilities, separated by comma.
"""
device_query_bin = os.path.join(
environ_cp.get('CUDA_TOOLKIT_PATH'), 'extras/demo_suite/deviceQuery')
if os.path.isfile(device_query_bin) and os.access(device_query_bin, os.X_OK):
try:
output = run_shell(device_query_bin).split('\n')
pattern = re.compile('[0-9]*\\.[0-9]*')
output = [pattern.search(x) for x in output if 'Capability' in x]
output = ','.join(x.group() for x in output if x is not None)
except subprocess.CalledProcessError:
output = ''
else:
output = ''
return output
def set_tf_cuda_compute_capabilities(environ_cp):
"""Set TF_CUDA_COMPUTE_CAPABILITIES."""
while True:
native_cuda_compute_capabilities = get_native_cuda_compute_capabilities(
environ_cp)
if not native_cuda_compute_capabilities:
default_cuda_compute_capabilities = _DEFAULT_CUDA_COMPUTE_CAPABILITIES
else:
default_cuda_compute_capabilities = native_cuda_compute_capabilities
ask_cuda_compute_capabilities = (
'Please specify a list of comma-separated '
'CUDA compute capabilities you want to '
'build with.\nYou can find the compute '
'capability of your device at: '
'https://developer.nvidia.com/cuda-gpus.\nPlease'
' note that each additional compute '
'capability significantly increases your '
'build time and binary size, and that '
'TensorFlow only supports compute '
'capabilities >= 3.5 [Default is: %s]: ' %
default_cuda_compute_capabilities)
tf_cuda_compute_capabilities = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_COMPUTE_CAPABILITIES',
ask_cuda_compute_capabilities, default_cuda_compute_capabilities)
# Check whether all capabilities from the input is valid
all_valid = True
# Remove all whitespace characters before splitting the string
# that users may insert by accident, as this will result in error
tf_cuda_compute_capabilities = ''.join(tf_cuda_compute_capabilities.split())
for compute_capability in tf_cuda_compute_capabilities.split(','):
m = re.match('[0-9]+.[0-9]+', compute_capability)
if not m:
print('Invalid compute capability: %s' % compute_capability)
all_valid = False
else:
ver = float(m.group(0))
if ver < 3.0:
print('ERROR: TensorFlow only supports CUDA compute capabilities 3.0 '
'and higher. Please re-specify the list of compute '
'capabilities excluding version %s.' % ver)
all_valid = False
if ver < 3.5:
print('WARNING: XLA does not support CUDA compute capabilities '
'lower than 3.5. Disable XLA when running on older GPUs.')
if all_valid:
break
# Reset and Retry
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = ''
# Set TF_CUDA_COMPUTE_CAPABILITIES
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = tf_cuda_compute_capabilities
write_action_env_to_bazelrc('TF_CUDA_COMPUTE_CAPABILITIES',
tf_cuda_compute_capabilities)
def set_other_cuda_vars(environ_cp):
"""Set other CUDA related variables."""
# If CUDA is enabled, always use GPU during build and test.
if environ_cp.get('TF_CUDA_CLANG') == '1':
write_to_bazelrc('build --config=cuda_clang')
else:
write_to_bazelrc('build --config=cuda')
def set_host_cxx_compiler(environ_cp):
"""Set HOST_CXX_COMPILER."""
default_cxx_host_compiler = which('g++') or ''
host_cxx_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_CXX_COMPILER',
var_default=default_cxx_host_compiler,
ask_for_var=('Please specify which C++ compiler should be used as the '
'host C++ compiler.'),
check_success=os.path.exists,
error_msg='Invalid C++ compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_CXX_COMPILER', host_cxx_compiler)
def set_host_c_compiler(environ_cp):
"""Set HOST_C_COMPILER."""
default_c_host_compiler = which('gcc') or ''
host_c_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_C_COMPILER',
var_default=default_c_host_compiler,
ask_for_var=('Please specify which C compiler should be used as the host '
'C compiler.'),
check_success=os.path.exists,
error_msg='Invalid C compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_C_COMPILER', host_c_compiler)
def set_computecpp_toolkit_path(environ_cp):
"""Set COMPUTECPP_TOOLKIT_PATH."""
def toolkit_exists(toolkit_path):
"""Check if a computecpp toolkit path is valid."""
if is_linux():
sycl_rt_lib_path = 'lib/libComputeCpp.so'
else:
sycl_rt_lib_path = ''
sycl_rt_lib_path_full = os.path.join(toolkit_path, sycl_rt_lib_path)
exists = os.path.exists(sycl_rt_lib_path_full)
if not exists:
print('Invalid SYCL %s library path. %s cannot be found' %
(_TF_OPENCL_VERSION, sycl_rt_lib_path_full))
return exists
computecpp_toolkit_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='COMPUTECPP_TOOLKIT_PATH',
var_default=_DEFAULT_COMPUTECPP_TOOLKIT_PATH,
ask_for_var=(
'Please specify the location where ComputeCpp for SYCL %s is '
'installed.' % _TF_OPENCL_VERSION),
check_success=toolkit_exists,
error_msg='Invalid SYCL compiler path. %s cannot be found.',
suppress_default_error=True)
write_action_env_to_bazelrc('COMPUTECPP_TOOLKIT_PATH',
computecpp_toolkit_path)
def set_trisycl_include_dir(environ_cp):
"""Set TRISYCL_INCLUDE_DIR."""
ask_trisycl_include_dir = ('Please specify the location of the triSYCL '
'include directory. (Use --config=sycl_trisycl '
'when building with Bazel) '
'[Default is %s]: ') % (
_DEFAULT_TRISYCL_INCLUDE_DIR)
while True:
trisycl_include_dir = get_from_env_or_user_or_default(
environ_cp, 'TRISYCL_INCLUDE_DIR', ask_trisycl_include_dir,
_DEFAULT_TRISYCL_INCLUDE_DIR)
if os.path.exists(trisycl_include_dir):
break
print('Invalid triSYCL include directory, %s cannot be found' %
(trisycl_include_dir))
# Set TRISYCL_INCLUDE_DIR
environ_cp['TRISYCL_INCLUDE_DIR'] = trisycl_include_dir
write_action_env_to_bazelrc('TRISYCL_INCLUDE_DIR', trisycl_include_dir)
def system_specific_test_config(env):
"""Add default build and test flags required for TF tests to bazelrc."""
write_to_bazelrc('test --flaky_test_attempts=3')
write_to_bazelrc('test --test_size_filters=small,medium')
write_to_bazelrc(
'test --test_tag_filters=-benchmark-test,-no_oss,-oss_serial')
write_to_bazelrc('test --build_tag_filters=-benchmark-test,-no_oss')
if is_windows():
if env.get('TF_NEED_CUDA', None) == '1':
write_to_bazelrc(
'test --test_tag_filters=-no_windows,-no_windows_gpu,-no_gpu')
write_to_bazelrc(
'test --build_tag_filters=-no_windows,-no_windows_gpu,-no_gpu')
else:
write_to_bazelrc('test --test_tag_filters=-no_windows,-gpu')
write_to_bazelrc('test --build_tag_filters=-no_windows,-gpu')
elif is_macos():
write_to_bazelrc('test --test_tag_filters=-gpu,-nomac,-no_mac')
write_to_bazelrc('test --build_tag_filters=-gpu,-nomac,-no_mac')
elif is_linux():
if env.get('TF_NEED_CUDA', None) == '1':
write_to_bazelrc('test --test_tag_filters=-no_gpu')
write_to_bazelrc('test --build_tag_filters=-no_gpu')
write_to_bazelrc('test --test_env=LD_LIBRARY_PATH')
else:
write_to_bazelrc('test --test_tag_filters=-gpu')
write_to_bazelrc('test --build_tag_filters=-gpu')
def set_system_libs_flag(environ_cp):
syslibs = environ_cp.get('TF_SYSTEM_LIBS', '')
if syslibs:
if ',' in syslibs:
syslibs = ','.join(sorted(syslibs.split(',')))
else:
syslibs = ','.join(sorted(syslibs.split()))
write_action_env_to_bazelrc('TF_SYSTEM_LIBS', syslibs)
if 'PREFIX' in environ_cp:
write_to_bazelrc('build --define=PREFIX=%s' % environ_cp['PREFIX'])
if 'LIBDIR' in environ_cp:
write_to_bazelrc('build --define=LIBDIR=%s' % environ_cp['LIBDIR'])
if 'INCLUDEDIR' in environ_cp:
write_to_bazelrc('build --define=INCLUDEDIR=%s' % environ_cp['INCLUDEDIR'])
def set_windows_build_flags(environ_cp):
"""Set Windows specific build options."""
# The non-monolithic build is not supported yet
write_to_bazelrc('build --config monolithic')
# Suppress warning messages
write_to_bazelrc('build --copt=-w --host_copt=-w')
# Fix winsock2.h conflicts
write_to_bazelrc(
'build --copt=-DWIN32_LEAN_AND_MEAN --host_copt=-DWIN32_LEAN_AND_MEAN '
'--copt=-DNOGDI --host_copt=-DNOGDI')
# Output more verbose information when something goes wrong
write_to_bazelrc('build --verbose_failures')
# The host and target platforms are the same in Windows build. So we don't
# have to distinct them. This avoids building the same targets twice.
write_to_bazelrc('build --distinct_host_configuration=false')
if get_var(
environ_cp, 'TF_OVERRIDE_EIGEN_STRONG_INLINE', 'Eigen strong inline',
True, ('Would you like to override eigen strong inline for some C++ '
'compilation to reduce the compilation time?'),
'Eigen strong inline overridden.', 'Not overriding eigen strong inline, '
'some compilations could take more than 20 mins.'):
# Due to a known MSVC compiler issue
# https://github.com/tensorflow/tensorflow/issues/10521
# Overriding eigen strong inline speeds up the compiling of
# conv_grad_ops_3d.cc and conv_ops_3d.cc by 20 minutes,
# but this also hurts the performance. Let users decide what they want.
write_to_bazelrc('build --define=override_eigen_strong_inline=true')
def config_info_line(name, help_text):
"""Helper function to print formatted help text for Bazel config options."""
print('\t--config=%-12s\t# %s' % (name, help_text))
def configure_ios():
"""Configures TensorFlow for iOS builds.
This function will only be executed if `is_macos()` is true.
"""
if not is_macos():
return
for filepath in APPLE_BAZEL_FILES:
existing_filepath = os.path.join(_TF_WORKSPACE_ROOT, filepath + '.apple')
renamed_filepath = os.path.join(_TF_WORKSPACE_ROOT, filepath)
symlink_force(existing_filepath, renamed_filepath)
for filepath in IOS_FILES:
filename = os.path.basename(filepath)
new_filepath = os.path.join(_TF_WORKSPACE_ROOT, filename)
symlink_force(filepath, new_filepath)
def validate_cuda_config(environ_cp):
"""Run find_cuda_config.py and return cuda_toolkit_path, or None."""
def maybe_encode_env(env):
"""Encodes unicode in env to str on Windows python 2.x."""
if not is_windows() or sys.version_info[0] != 2:
return env
for k, v in env.items():
if isinstance(k, unicode):
k = k.encode('ascii')
if isinstance(v, unicode):
v = v.encode('ascii')
env[k] = v
return env
cuda_libraries = ['cuda', 'cudnn']
if is_linux():
if int(environ_cp.get('TF_NEED_TENSORRT', False)):
cuda_libraries.append('tensorrt')
if environ_cp.get('TF_NCCL_VERSION', None):
cuda_libraries.append('nccl')
proc = subprocess.Popen(
[environ_cp['PYTHON_BIN_PATH'], 'third_party/gpus/find_cuda_config.py'] +
cuda_libraries,
stdout=subprocess.PIPE,
env=maybe_encode_env(environ_cp))
if proc.wait():
# Errors from find_cuda_config.py were sent to stderr.
print('Asking for detailed CUDA configuration...\n')
return False
config = dict(
tuple(line.decode('ascii').rstrip().split(': ')) for line in proc.stdout)
print('Found CUDA %s in:' % config['cuda_version'])
print(' %s' % config['cuda_library_dir'])
print(' %s' % config['cuda_include_dir'])
print('Found cuDNN %s in:' % config['cudnn_version'])
print(' %s' % config['cudnn_library_dir'])
print(' %s' % config['cudnn_include_dir'])
if 'tensorrt_version' in config:
print('Found TensorRT %s in:' % config['tensorrt_version'])
print(' %s' % config['tensorrt_library_dir'])
print(' %s' % config['tensorrt_include_dir'])
if config.get('nccl_version', None):
print('Found NCCL %s in:' % config['nccl_version'])
print(' %s' % config['nccl_library_dir'])
print(' %s' % config['nccl_include_dir'])
print('\n')
environ_cp['CUDA_TOOLKIT_PATH'] = config['cuda_toolkit_path']
return True
def main():
global _TF_WORKSPACE_ROOT
global _TF_BAZELRC
global _TF_CURRENT_BAZEL_VERSION
parser = argparse.ArgumentParser()
parser.add_argument(
'--workspace',
type=str,
default=os.path.abspath(os.path.dirname(__file__)),
help='The absolute path to your active Bazel workspace.')
args = parser.parse_args()
_TF_WORKSPACE_ROOT = args.workspace
_TF_BAZELRC = os.path.join(_TF_WORKSPACE_ROOT, _TF_BAZELRC_FILENAME)
# Make a copy of os.environ to be clear when functions and getting and setting
# environment variables.
environ_cp = dict(os.environ)
current_bazel_version = check_bazel_version(_TF_MIN_BAZEL_VERSION,
_TF_MAX_BAZEL_VERSION)
_TF_CURRENT_BAZEL_VERSION = convert_version_to_int(current_bazel_version)
reset_tf_configure_bazelrc()
cleanup_makefile()
setup_python(environ_cp)
if is_windows():
environ_cp['TF_NEED_OPENCL_SYCL'] = '0'
environ_cp['TF_NEED_COMPUTECPP'] = '0'
environ_cp['TF_NEED_OPENCL'] = '0'
environ_cp['TF_CUDA_CLANG'] = '0'
environ_cp['TF_NEED_TENSORRT'] = '0'
# TODO(ibiryukov): Investigate using clang as a cpu or cuda compiler on
# Windows.
environ_cp['TF_DOWNLOAD_CLANG'] = '0'
environ_cp['TF_NEED_MPI'] = '0'
environ_cp['TF_SET_ANDROID_WORKSPACE'] = '0'
if is_macos():
environ_cp['TF_NEED_TENSORRT'] = '0'
else:
environ_cp['TF_CONFIGURE_IOS'] = '0'
xla_enabled_by_default = is_linux() or is_macos()
set_build_var(environ_cp, 'TF_ENABLE_XLA', 'XLA JIT', 'with_xla_support',
xla_enabled_by_default, 'xla')
set_action_env_var(
environ_cp,
'TF_NEED_OPENCL_SYCL',
'OpenCL SYCL',
False,
bazel_config_name='sycl')
if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1':
set_host_cxx_compiler(environ_cp)
set_host_c_compiler(environ_cp)
set_action_env_var(environ_cp, 'TF_NEED_COMPUTECPP', 'ComputeCPP', True)
if environ_cp.get('TF_NEED_COMPUTECPP') == '1':
set_computecpp_toolkit_path(environ_cp)
else:
set_trisycl_include_dir(environ_cp)
set_action_env_var(
environ_cp, 'TF_NEED_ROCM', 'ROCm', False, bazel_config_name='rocm')
if (environ_cp.get('TF_NEED_ROCM') == '1' and
'LD_LIBRARY_PATH' in environ_cp and
environ_cp.get('LD_LIBRARY_PATH') != '1'):
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
environ_cp['TF_NEED_CUDA'] = str(
int(get_var(environ_cp, 'TF_NEED_CUDA', 'CUDA', False)))
if (environ_cp.get('TF_NEED_CUDA') == '1' and
'TF_CUDA_CONFIG_REPO' not in environ_cp):
set_action_env_var(
environ_cp,
'TF_NEED_TENSORRT',
'TensorRT',
False,
bazel_config_name='tensorrt')
environ_save = dict(environ_cp)
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
if validate_cuda_config(environ_cp):
cuda_env_names = [
'TF_CUDA_VERSION',
'TF_CUBLAS_VERSION',
'TF_CUDNN_VERSION',
'TF_TENSORRT_VERSION',
'TF_NCCL_VERSION',
'TF_CUDA_PATHS',
# Items below are for backwards compatibility when not using
# TF_CUDA_PATHS.
'CUDA_TOOLKIT_PATH',
'CUDNN_INSTALL_PATH',
'NCCL_INSTALL_PATH',
'NCCL_HDR_PATH',
'TENSORRT_INSTALL_PATH'
]
# Note: set_action_env_var above already writes to bazelrc.
for name in cuda_env_names:
if name in environ_cp:
write_action_env_to_bazelrc(name, environ_cp[name])
break
# Restore settings changed below if CUDA config could not be validated.
environ_cp = dict(environ_save)
set_tf_cuda_version(environ_cp)
set_tf_cudnn_version(environ_cp)
if is_linux():
set_tf_tensorrt_version(environ_cp)
set_tf_nccl_version(environ_cp)
set_tf_cuda_paths(environ_cp)
else:
raise UserInputError(
'Invalid CUDA setting were provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
set_tf_cuda_compute_capabilities(environ_cp)
if 'LD_LIBRARY_PATH' in environ_cp and environ_cp.get(
'LD_LIBRARY_PATH') != '1':
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
set_tf_cuda_clang(environ_cp)
if environ_cp.get('TF_CUDA_CLANG') == '1':
# Ask whether we should download the clang toolchain.
set_tf_download_clang(environ_cp)
if environ_cp.get('TF_DOWNLOAD_CLANG') != '1':
# Set up which clang we should use as the cuda / host compiler.
set_clang_cuda_compiler_path(environ_cp)
else:
# Use downloaded LLD for linking.
write_to_bazelrc('build:cuda_clang --config=download_clang_use_lld')
else:
# Set up which gcc nvcc should use as the host compiler
# No need to set this on Windows
if not is_windows():
set_gcc_host_compiler_path(environ_cp)
set_other_cuda_vars(environ_cp)
else:
# CUDA not required. Ask whether we should download the clang toolchain and
# use it for the CPU build.
set_tf_download_clang(environ_cp)
# SYCL / ROCm / CUDA are mutually exclusive.
# At most 1 GPU platform can be configured.
gpu_platform_count = 0
if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1':
gpu_platform_count += 1
if environ_cp.get('TF_NEED_ROCM') == '1':
gpu_platform_count += 1
if environ_cp.get('TF_NEED_CUDA') == '1':
gpu_platform_count += 1
if gpu_platform_count >= 2:
raise UserInputError('SYCL / CUDA / ROCm are mututally exclusive. '
'At most 1 GPU platform can be configured.')
set_cc_opt_flags(environ_cp)
set_system_libs_flag(environ_cp)
if is_windows():
set_windows_build_flags(environ_cp)
if get_var(environ_cp, 'TF_SET_ANDROID_WORKSPACE', 'android workspace', False,
('Would you like to interactively configure ./WORKSPACE for '
'Android builds?'), 'Searching for NDK and SDK installations.',
'Not configuring the WORKSPACE for Android builds.'):
create_android_ndk_rule(environ_cp)
create_android_sdk_rule(environ_cp)
system_specific_test_config(os.environ)
set_action_env_var(environ_cp, 'TF_CONFIGURE_IOS', 'iOS', False)
if environ_cp.get('TF_CONFIGURE_IOS') == '1':
configure_ios()
print('Preconfigured Bazel build configs. You can use any of the below by '
'adding "--config=<>" to your build command. See .bazelrc for more '
'details.')
config_info_line('mkl', 'Build with MKL support.')
config_info_line('monolithic', 'Config for mostly static monolithic build.')
config_info_line('ngraph', 'Build with Intel nGraph support.')
config_info_line('numa', 'Build with NUMA support.')
config_info_line(
'dynamic_kernels',
'(Experimental) Build kernels into separate shared objects.')
config_info_line('v2', 'Build TensorFlow 2.x instead of 1.x.')
print('Preconfigured Bazel build configs to DISABLE default on features:')
config_info_line('noaws', 'Disable AWS S3 filesystem support.')
config_info_line('nogcp', 'Disable GCP support.')
config_info_line('nohdfs', 'Disable HDFS support.')
config_info_line('nonccl', 'Disable NVIDIA NCCL support.')
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
plugins/dicom_viewer/plugin_tests/dicom_viewer_test.py
|
import io
import os
import json
from girder.models.collection import Collection
from girder.models.folder import Folder
from girder.models.item import Item
from girder.models.upload import Upload
from girder.models.user import User
import pydicom
from tests import base
from girder_dicom_viewer import _removeUniqueMetadata, _extractFileData
from girder_dicom_viewer.event_helper import _EventHelper
def setUpModule():
base.enabledPlugins.append('dicom_viewer')
base.startServer()
global _removeUniqueMetadata
global _extractFileData
def tearDownModule():
base.stopServer()
class DicomViewerTest(base.TestCase):
def setUp(self):
super().setUp()
self.dataDir = os.path.join(
os.environ['GIRDER_TEST_DATA_PREFIX'], 'plugins', 'dicom_viewer')
self.users = [User().createUser(
'usr%s' % num, 'passwd', 'tst', 'usr', 'u%[email protected]' % num)
for num in [0, 1]]
def testRemoveUniqueMetadata(self):
dicomMeta = {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': 35,
'key5': 54,
'key6': 'commonVal',
'uniqueKey1': 'commonVal'
}
additionalMeta = {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': 35,
'key5': 54,
'key6': 'uniqueVal',
'uniqueKey2': 'commonVal',
}
commonMeta = {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': 35,
'key5': 54
}
self.assertEqual(_removeUniqueMetadata(dicomMeta, additionalMeta), commonMeta)
def testExtractFileData(self):
dicomFile = {
'_id': '599c4cf3c9c5cb11f1ff5d97',
'assetstoreId': '599c4a19c9c5cb11f1ff5d32',
'creatorId': '5984b9fec9c5cb370447068c',
'exts': ['dcm'],
'itemId': '599c4cf3c9c5cb11f1ff5d96',
'mimeType': 'application/dicom',
'name': '000000.dcm',
'size': 133356
}
dicomMeta = {
'SeriesNumber': 1,
'InstanceNumber': 1,
'SliceLocation': 0
}
result = {
'_id': '599c4cf3c9c5cb11f1ff5d97',
'name': '000000.dcm',
'dicom': {
'SeriesNumber': 1,
'InstanceNumber': 1,
'SliceLocation': 0
}
}
self.assertEqual(_extractFileData(dicomFile, dicomMeta), result)
def testFileProcessHandler(self):
admin, user = self.users
# Create a collection, folder, and item
collection = Collection().createCollection('collection1', admin, public=True)
folder = Folder().createFolder(collection, 'folder1', parentType='collection', public=True)
item = Item().createItem('item1', admin, folder)
# Upload non-DICOM files
self._uploadNonDicomFiles(item, admin)
nonDicomItem = Item().load(item['_id'], force=True)
self.assertIsNone(nonDicomItem.get('dicom'))
# Upload DICOM files
self._uploadDicomFiles(item, admin)
# Check if the 'dicomItem' is well processed
dicomItem = Item().load(item['_id'], force=True)
self.assertIn('dicom', dicomItem)
self.assertHasKeys(dicomItem['dicom'], ['meta', 'files'])
# Check if the files list contain the good keys and all the file are well sorted
for i in range(0, 4):
self.assertTrue('_id' in dicomItem['dicom']['files'][i])
self.assertTrue('name' in dicomItem['dicom']['files'][i])
self.assertEqual(dicomItem['dicom']['files'][i]['name'], 'dicomFile{}.dcm'.format(i))
self.assertTrue('SeriesNumber' in dicomItem['dicom']['files'][i]['dicom'])
self.assertTrue('InstanceNumber' in dicomItem['dicom']['files'][i]['dicom'])
self.assertTrue('SliceLocation' in dicomItem['dicom']['files'][i]['dicom'])
# Check the common metadata
self.assertIsNotNone(dicomItem['dicom']['meta'])
def testMakeDicomItem(self):
admin, user = self.users
# create a collection, folder, and item
collection = Collection().createCollection('collection2', admin, public=True)
folder = Folder().createFolder(collection, 'folder2', parentType='collection', public=True)
item = Item().createItem('item2', admin, folder)
# Upload files
self._uploadDicomFiles(item, admin)
# Check the endpoint 'parseDicom' for an admin user
dicomItem = Item().load(item['_id'], force=True)
dicomItem = self._purgeDicomItem(dicomItem)
path = '/item/%s/parseDicom' % dicomItem.get('_id')
resp = self.request(path=path, method='POST', user=admin)
self.assertStatusOk(resp)
dicomItem = Item().load(item['_id'], force=True)
self.assertIn('dicom', dicomItem)
self.assertHasKeys(dicomItem['dicom'], ['meta', 'files'])
# Check the endpoint 'parseDicom' for an non admin user
dicomItem = Item().load(item['_id'], force=True)
dicomItem = self._purgeDicomItem(dicomItem)
path = '/item/%s/parseDicom' % dicomItem.get('_id')
resp = self.request(path=path, method='POST', user=user)
self.assertStatus(resp, 403)
def _uploadNonDicomFiles(self, item, user):
# Upload a fake file to check that the item is not traited
nonDicomContent = b'hello world\n'
ndcmFile = Upload().uploadFromFile(
obj=io.BytesIO(nonDicomContent),
size=len(nonDicomContent),
name='nonDicom.txt',
parentType='item',
parent=item,
mimeType='text/plain',
user=user
)
self.assertIsNotNone(ndcmFile)
def _uploadDicomFiles(self, item, user):
# Upload the files in the reverse order to check if they're well sorted
for i in [1, 3, 0, 2]:
file = os.path.join(self.dataDir, '00000%i.dcm' % i)
with open(file, 'rb') as fp, _EventHelper('dicom_viewer.upload.success') as helper:
dcmFile = Upload().uploadFromFile(
obj=fp,
size=os.path.getsize(file),
name='dicomFile{}.dcm'.format(i),
parentType='item',
parent=item,
mimeType='application/dicom',
user=user
)
self.assertIsNotNone(dcmFile)
# Wait for handler success event
handled = helper.wait()
self.assertTrue(handled)
def _purgeDicomItem(self, item):
item.pop('dicom')
return item
def testSearchForDicomItem(self):
admin, user = self.users
# Create a collection, folder, and item
collection = Collection().createCollection('collection3', admin, public=True)
folder = Folder().createFolder(collection, 'folder3', parentType='collection', public=True)
item = Item().createItem('item3', admin, folder)
# Upload files
self._uploadDicomFiles(item, admin)
# Search for DICOM item with 'brain research' as common key/value
resp = self.request(path='/resource/search', params={
'q': 'brain research',
'mode': 'dicom',
'types': json.dumps(['item'])
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['item']), 1)
self.assertEqual(resp.json['item'][0]['name'], 'item3')
# Search for DICOM item with substring 'in resea' as common key/value
resp = self.request(path='/resource/search', params={
'q': 'in resea',
'mode': 'dicom',
'types': json.dumps(['item'])
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['item']), 1)
self.assertEqual(resp.json['item'][0]['name'], 'item3')
# TODO: Add test to search for a private DICOM item with an other user
# this test should not found anything
def testDicomWithIOError(self):
# One of the test files in the pydicom module will throw an IOError
# when parsing metadata. We should work around that and still be able
# to import the file
samplePath = os.path.join(os.path.dirname(os.path.abspath(
pydicom.__file__)), 'data', 'test_files', 'CT_small.dcm')
admin, user = self.users
# Create a collection, folder, and item
collection = Collection().createCollection('collection4', admin, public=True)
folder = Folder().createFolder(collection, 'folder4', parentType='collection', public=True)
item = Item().createItem('item4', admin, folder)
# Upload this dicom file
with open(samplePath, 'rb') as fp, _EventHelper('dicom_viewer.upload.success') as helper:
dcmFile = Upload().uploadFromFile(
obj=fp,
size=os.path.getsize(samplePath),
name=os.path.basename(samplePath),
parentType='item',
parent=item,
mimeType='application/dicom',
user=user
)
self.assertIsNotNone(dcmFile)
# Wait for handler success event
handled = helper.wait()
self.assertTrue(handled)
# Check if the 'dicomItem' is well processed
dicomItem = Item().load(item['_id'], force=True)
self.assertIn('dicom', dicomItem)
self.assertHasKeys(dicomItem['dicom'], ['meta', 'files'])
def testDicomWithBinaryValues(self):
# One of the test files in the pydicom module will throw an IOError
# when parsing metadata. We should work around that and still be able
# to import the file
samplePath = os.path.join(os.path.dirname(os.path.abspath(
pydicom.__file__)), 'data', 'test_files', 'OBXXXX1A.dcm')
admin, user = self.users
# Create a collection, folder, and item
collection = Collection().createCollection('collection5', admin, public=True)
folder = Folder().createFolder(collection, 'folder5', parentType='collection', public=True)
item = Item().createItem('item5', admin, folder)
# Upload this dicom file
with open(samplePath, 'rb') as fp, _EventHelper('dicom_viewer.upload.success') as helper:
dcmFile = Upload().uploadFromFile(
obj=fp,
size=os.path.getsize(samplePath),
name=os.path.basename(samplePath),
parentType='item',
parent=item,
mimeType='application/dicom',
user=user
)
self.assertIsNotNone(dcmFile)
# Wait for handler success event
handled = helper.wait()
self.assertTrue(handled)
# Check if the 'dicomItem' is well processed
dicomItem = Item().load(item['_id'], force=True)
self.assertIn('dicom', dicomItem)
self.assertHasKeys(dicomItem['dicom'], ['meta', 'files'])
|
[] |
[] |
[
"GIRDER_TEST_DATA_PREFIX"
] |
[]
|
["GIRDER_TEST_DATA_PREFIX"]
|
python
| 1 | 0 | |
bootstrap.py
|
##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
"""
import os, shutil, sys, tempfile, textwrap, urllib, urllib2, subprocess
from optparse import OptionParser
from encodings import ascii
if sys.platform == 'win32':
def quote(c):
if ' ' in c:
return '"%s"' % c # work around spawn lamosity on windows
else:
return c
else:
quote = str
# See zc.buildout.easy_install._has_broken_dash_S for motivation and comments.
stdout, stderr = subprocess.Popen(
[sys.executable, '-Sc',
'try:\n'
' import ConfigParser\n'
'except ImportError:\n'
' print 1\n'
'else:\n'
' print 0\n'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
has_broken_dash_S = bool(int(stdout.strip()))
# In order to be more robust in the face of system Pythons, we want to
# run without site-packages loaded. This is somewhat tricky, in
# particular because Python 2.6's distutils imports site, so starting
# with the -S flag is not sufficient. However, we'll start with that:
if not has_broken_dash_S and 'site' in sys.modules:
# We will restart with python -S.
args = sys.argv[:]
args[0:0] = [sys.executable, '-S']
args = map(quote, args)
os.execv(sys.executable, args)
# Now we are running with -S. We'll get the clean sys.path, import site
# because distutils will do it later, and then reset the path and clean
# out any namespace packages from site-packages that might have been
# loaded by .pth files.
clean_path = sys.path[:]
import site
sys.path[:] = clean_path
for k, v in sys.modules.items():
if k in ('setuptools', 'pkg_resources') or (
hasattr(v, '__path__') and
len(v.__path__)==1 and
not os.path.exists(os.path.join(v.__path__[0],'__init__.py'))):
# This is a namespace package. Remove it.
sys.modules.pop(k)
is_jython = sys.platform.startswith('java')
setuptools_source = 'http://peak.telecommunity.com/dist/ez_setup.py'
distribute_source = 'http://python-distribute.org/distribute_setup.py'
# parsing arguments
def normalize_to_url(option, opt_str, value, parser):
if value:
if '://' not in value: # It doesn't smell like a URL.
value = 'file://%s' % (
urllib.pathname2url(
os.path.abspath(os.path.expanduser(value))),)
if opt_str == '--download-base' and not value.endswith('/'):
# Download base needs a trailing slash to make the world happy.
value += '/'
else:
value = None
name = opt_str[2:].replace('-', '_')
setattr(parser.values, name, value)
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --setup-source and --download-base to point to
local resources, you can keep this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", dest="version",
help="use a specific zc.buildout version")
parser.add_option("-d", "--distribute",
action="store_true", dest="use_distribute", default=False,
help="Use Distribute rather than Setuptools.")
parser.add_option("--setup-source", action="callback", dest="setup_source",
callback=normalize_to_url, nargs=1, type="string",
help=("Specify a URL or file location for the setup file. "
"If you use Setuptools, this will default to " +
setuptools_source + "; if you use Distribute, this "
"will default to " + distribute_source +"."))
parser.add_option("--download-base", action="callback", dest="download_base",
callback=normalize_to_url, nargs=1, type="string",
help=("Specify a URL or directory for downloading "
"zc.buildout and either Setuptools or Distribute. "
"Defaults to PyPI."))
parser.add_option("--eggs",
help=("Specify a directory for storing eggs. Defaults to "
"a temporary directory that is deleted when the "
"bootstrap script completes."))
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", None, action="store", dest="config_file",
help=("Specify the path to the buildout configuration "
"file to be used."))
options, args = parser.parse_args()
# if -c was provided, we push it back into args for buildout's main function
if options.config_file is not None:
args += ['-c', options.config_file]
if options.eggs:
eggs_dir = os.path.abspath(os.path.expanduser(options.eggs))
else:
eggs_dir = tempfile.mkdtemp()
if options.setup_source is None:
if options.use_distribute:
options.setup_source = distribute_source
else:
options.setup_source = setuptools_source
if options.accept_buildout_test_releases:
args.append('buildout:accept-buildout-test-releases=true')
args.append('bootstrap')
try:
import pkg_resources
import setuptools # A flag. Sometimes pkg_resources is installed alone.
if not hasattr(pkg_resources, '_distribute'):
raise ImportError
except ImportError:
ez_code = urllib2.urlopen(
options.setup_source).read().replace('\r\n', '\n')
ez = {}
exec ez_code in ez
setup_args = dict(to_dir=eggs_dir, download_delay=0)
if options.download_base:
setup_args['download_base'] = options.download_base
if options.use_distribute:
setup_args['no_fake'] = True
ez['use_setuptools'](**setup_args)
if 'pkg_resources' in sys.modules:
reload(sys.modules['pkg_resources'])
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
cmd = [quote(sys.executable),
'-c',
quote('from setuptools.command.easy_install import main; main()'),
'-mqNxd',
quote(eggs_dir)]
if not has_broken_dash_S:
cmd.insert(1, '-S')
find_links = options.download_base
if not find_links:
find_links = os.environ.get('bootstrap-testing-find-links')
if find_links:
cmd.extend(['-f', quote(find_links)])
if options.use_distribute:
setup_requirement = 'distribute'
else:
setup_requirement = 'setuptools'
ws = pkg_resources.working_set
setup_requirement_path = ws.find(
pkg_resources.Requirement.parse(setup_requirement)).location
env = dict(
os.environ,
PYTHONPATH=setup_requirement_path)
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setup_requirement_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
if is_jython:
import subprocess
exitcode = subprocess.Popen(cmd, env=env).wait()
else: # Windows prefers this, apparently; otherwise we would prefer subprocess
exitcode = os.spawnle(*([os.P_WAIT, sys.executable] + cmd + [env]))
if exitcode != 0:
sys.stdout.flush()
sys.stderr.flush()
print ("An error occurred when trying to install zc.buildout. "
"Look above this message for any errors that "
"were output by easy_install.")
sys.exit(exitcode)
ws.add_entry(eggs_dir)
ws.require(requirement)
import zc.buildout.buildout
zc.buildout.buildout.main(args)
if not options.eggs: # clean up temporary egg directory
shutil.rmtree(eggs_dir)
|
[] |
[] |
[
"bootstrap-testing-find-links"
] |
[]
|
["bootstrap-testing-find-links"]
|
python
| 1 | 0 | |
integration/util/k8s.go
|
package util
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"os"
"code.cloudfoundry.org/cfhttp/v2"
"code.cloudfoundry.org/eirini"
ginkgoconfig "github.com/onsi/ginkgo/config"
"gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1beta1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
const randUpperBound = 100000000
func CreateRandomNamespace(clientset kubernetes.Interface) string {
namespace := fmt.Sprintf("opi-integration-test-%d-%d", rand.Intn(randUpperBound), ginkgoconfig.GinkgoConfig.ParallelNode)
for namespaceExists(namespace, clientset) {
namespace = fmt.Sprintf("opi-integration-test-%d-%d", rand.Intn(randUpperBound), ginkgoconfig.GinkgoConfig.ParallelNode)
}
createNamespace(namespace, clientset)
return namespace
}
func namespaceExists(namespace string, clientset kubernetes.Interface) bool {
_, err := clientset.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{})
return err == nil
}
func createNamespace(namespace string, clientset kubernetes.Interface) {
namespaceSpec := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
if _, err := clientset.CoreV1().Namespaces().Create(namespaceSpec); err != nil {
panic(err)
}
}
func CreatePodCreationPSP(namespace, pspName string, clientset kubernetes.Interface) error {
roleName := "use-psp"
_, err := clientset.PolicyV1beta1().PodSecurityPolicies().Create(&policyv1.PodSecurityPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: pspName,
Annotations: map[string]string{
"seccomp.security.alpha.kubernetes.io/allowedProfileNames": "runtime/default",
"seccomp.security.alpha.kubernetes.io/defaultProfileName": "runtime/default",
},
},
Spec: policyv1.PodSecurityPolicySpec{
Privileged: false,
RunAsUser: policyv1.RunAsUserStrategyOptions{
Rule: policyv1.RunAsUserStrategyRunAsAny,
},
SELinux: policyv1.SELinuxStrategyOptions{
Rule: policyv1.SELinuxStrategyRunAsAny,
},
SupplementalGroups: policyv1.SupplementalGroupsStrategyOptions{
Rule: policyv1.SupplementalGroupsStrategyMustRunAs,
Ranges: []policyv1.IDRange{{
Min: 1,
Max: 65535,
}},
},
FSGroup: policyv1.FSGroupStrategyOptions{
Rule: policyv1.FSGroupStrategyMustRunAs,
Ranges: []policyv1.IDRange{{
Min: 1,
Max: 65535,
}},
},
},
})
if err != nil {
return err
}
_, err = clientset.RbacV1().Roles(namespace).Create(&rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
Namespace: namespace,
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{"policy"},
Resources: []string{"podsecuritypolicies"},
ResourceNames: []string{pspName},
Verbs: []string{"use"},
},
},
})
if err != nil {
return err
}
_, err = clientset.RbacV1().RoleBindings(namespace).Create(&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "default-account-psp",
Namespace: namespace,
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: roleName,
},
Subjects: []rbacv1.Subject{{
Kind: rbacv1.ServiceAccountKind,
Name: "default",
Namespace: namespace,
}},
})
return err
}
func CreateEmptySecret(namespace, secretName string, clientset kubernetes.Interface) error {
_, err := clientset.CoreV1().Secrets(namespace).Create(&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: namespace,
},
})
return err
}
func DeleteNamespace(namespace string, clientset kubernetes.Interface) error {
return clientset.CoreV1().Namespaces().Delete(namespace, &metav1.DeleteOptions{})
}
func DeletePSP(name string, clientset kubernetes.Interface) error {
return clientset.PolicyV1beta1().PodSecurityPolicies().Delete(name, &metav1.DeleteOptions{})
}
func MakeTestHTTPClient() (*http.Client, error) {
bs, err := ioutil.ReadFile(PathToTestFixture("cert"))
if err != nil {
return nil, err
}
clientCert, err := tls.LoadX509KeyPair(PathToTestFixture("cert"), PathToTestFixture("key"))
if err != nil {
return nil, err
}
certPool := x509.NewCertPool()
if !certPool.AppendCertsFromPEM(bs) {
return nil, err
}
tlsConfig := &tls.Config{
RootCAs: certPool,
Certificates: []tls.Certificate{clientCert},
}
httpClient := cfhttp.NewClient(cfhttp.WithTLSConfig(tlsConfig))
return httpClient, nil
}
func DefaultEiriniConfig(namespace, secretName string) *eirini.Config {
return &eirini.Config{
Properties: eirini.Properties{
KubeConfig: eirini.KubeConfig{
ConfigPath: os.Getenv("INTEGRATION_KUBECONFIG"),
Namespace: namespace,
},
CCCAPath: PathToTestFixture("cert"),
CCCertPath: PathToTestFixture("cert"),
CCKeyPath: PathToTestFixture("key"),
ServerCertPath: PathToTestFixture("cert"),
ServerKeyPath: PathToTestFixture("key"),
ClientCAPath: PathToTestFixture("cert"),
TLSPort: 61000 + rand.Intn(1000) + ginkgoconfig.GinkgoConfig.ParallelNode,
CCCertsSecretName: secretName,
DownloaderImage: "docker.io/eirini/integration_test_staging",
ExecutorImage: "docker.io/eirini/integration_test_staging",
UploaderImage: "docker.io/eirini/integration_test_staging",
},
}
}
func CreateOpiConfigFromFixtures(config *eirini.Config) (*os.File, error) {
bs, err := yaml.Marshal(config)
if err != nil {
return nil, err
}
return createConfigFile(bs)
}
func createConfigFile(yamlBytes []byte) (*os.File, error) {
configFile, err := ioutil.TempFile("", "config.yml")
if err != nil {
return nil, err
}
err = ioutil.WriteFile(configFile.Name(), yamlBytes, os.ModePerm)
return configFile, err
}
func PathToTestFixture(relativePath string) string {
cwd, err := os.Getwd()
if err != nil {
panic(err)
}
return fmt.Sprintf("%s/../fixtures/%s", cwd, relativePath)
}
|
[
"\"INTEGRATION_KUBECONFIG\""
] |
[] |
[
"INTEGRATION_KUBECONFIG"
] |
[]
|
["INTEGRATION_KUBECONFIG"]
|
go
| 1 | 0 | |
pkg/boot/grub/echo_test.go
|
// Copyright 2017-2020 the u-root Authors. All rights reserved
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package grub
import (
"bytes"
"context"
"flag"
"fmt"
"io/ioutil"
"net/url"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"github.com/u-root/u-root/pkg/curl"
)
var update = flag.Bool("run-bash", false, "run bash and update golden file")
// TestMain is used to warp a go utility in the same test binary
// when the environment variable BE_ECHO is set to 1, the binary will echo its
// parameters using the %#v format string, so the parameters are escaped and can
// be recovered.
func TestMain(m *testing.M) {
if os.Getenv("BE_ECHO") == "1" {
fmt.Printf("echo:%#v\n", os.Args[1:])
return
} // call flag.Parse() here if TestMain uses flags
os.Exit(m.Run())
}
// TestHelperEcho tests the echo wrapper in TestMain
func TestHelperEcho(t *testing.T) {
cmd := exec.Command(os.Args[0], "echothis")
cmd.Env = append(os.Environ(), "BE_ECHO=1")
out, err := cmd.Output()
t.Logf("%q\n", out)
if err != nil {
t.Fatalf("process ran with err %v", err)
}
want := "echo:[]string{\"echothis\"}\n"
if string(out) != want {
t.Fatalf("wrong process output got `%s` want `%s`", out, want)
}
}
// TestBashWrapper tests that the "./testdata/bash_wrapper.sh" works as expected
// bash_wrapper.sh is a script that replace the internal command echo with its
// first argument and source its second argument.
// The goal is to be able to run grub's tests scripts, see TestGrubTests
func TestBashWrapper(t *testing.T) {
if !*update {
t.Skip("use -run-bash flag to run this")
}
cmd := exec.Command("./testdata/bash_wrapper.sh", os.Args[0], "./testdata/test_bash_wrapper.sh")
cmd.Env = append(os.Environ(), "BE_ECHO=1")
out, err := cmd.Output()
t.Logf("%q\n", out)
if err != nil {
t.Fatalf("process ran with err %v", err)
}
want := "echo:[]string{\"param1\", \"param2\"}\n"
if string(out) != want {
t.Fatalf("wrong process output got `%s` want `%s`", out, want)
}
}
// TestGrubTests run tests imported from grub source to check our parser
// grub has tests in for of scripts that are run both by grub and bash, they
// mostly use echo and the test then compare the output of both runs.
// In our case we don't want to compare the output of echo, but get the token
// passed to echo. So we replace the echo command in bash with the wrapper (see
// above). We can then compare the bash output to our parser output.
// Also to avoid keeping the dependency on bash, the output are saved in the
// golden files. One must run the test with '-run-bash' to update the golden
// files in case new tests are added or the echo format is changed.
func TestGrubTests(t *testing.T) {
files, err := filepath.Glob("testdata/*.in")
if err != nil {
t.Fatal(err)
}
for _, file := range files {
name := strings.TrimSuffix(filepath.Base(file), ".in")
t.Run(name, func(t *testing.T) {
golden := strings.TrimSuffix(file, ".in") + ".out"
var out []byte
if *update {
cmd := exec.Command("./testdata/bash_wrapper.sh", os.Args[0], file)
cmd.Env = append(os.Environ(), "BE_ECHO=1")
out, err = cmd.Output()
//t.Logf("%s\n", out)
if err != nil {
t.Fatalf("process ran with err %v", err)
}
} else {
out, err = ioutil.ReadFile(golden)
if err != nil {
t.Fatalf("error loading file `%s`, %v", golden, err)
}
}
// parse with our parser and compare
var b bytes.Buffer
wd := &url.URL{
Scheme: "file",
Path: "./testdata",
}
c := newParser(wd, curl.DefaultSchemes)
c.W = &b
script, err := ioutil.ReadFile(file)
if err != nil {
t.Fatalf("error loading file `%s`, %v", file, err)
}
err = c.append(context.Background(), string(script))
if err != nil {
t.Fatalf("error parsing file `%s`, %v", file, err)
}
if b.String() != string(out) {
t.Fatalf("wrong script parsing output got `%s` want `%s`", b.String(), string(out))
}
// update/create golden file on success
if *update {
err := ioutil.WriteFile(golden, out, 0644)
if err != nil {
t.Fatalf("error writing file `%s`, %v", file, err)
}
}
})
}
}
|
[
"\"BE_ECHO\""
] |
[] |
[
"BE_ECHO"
] |
[]
|
["BE_ECHO"]
|
go
| 1 | 0 | |
topi/python/topi/generic/nn.py
|
# pylint: disable=invalid-name,unused-argument
"""Generic nn operators"""
from __future__ import absolute_import as _abs
import tvm
from .. import cpp
def _default_schedule(outs, auto_inline):
"""Default schedule for llvm."""
target = tvm.target.current_target(allow_none=False)
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
if target.target_name != "llvm":
raise RuntimeError("schedule not registered for '%s'" % target)
s = tvm.create_schedule([x.op for x in outs])
if auto_inline:
x = outs[0]
tvm.schedule.AutoInlineInjective(s)
s[x].fuse(s[x].op.axis)
return s
@tvm.target.generic_func
def schedule_conv2d_nchw(outs):
"""Schedule for conv2d_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_conv2d_nhwc(outs):
"""Schedule for conv2d_nhwc
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_conv2d_NCHWc(num_filter, kernel_size, strides,
padding, layout, out_layout, outs):
"""Schedule for conv2d_NCHW[x]c
Parameters
----------
num_filter : int
The number of filter, i.e., the output channel.
kernel_size : tuple of int
(kernel_height, kernel_width)
strides : tuple of int
(stride_of_height, stride_of_width)
padding : tuple of int
(pad_of_height, pad_of_width)
layout : str
Input data layout
out_layout : str
Output data layout
outs : Array of Tensor
The computation graph description of conv2d_NCHWc
in the format of an array of tensors.
Returns
-------
sch : Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_conv2d_transpose_nchw(outs):
"""Schedule for conv2d_transpose_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_transpose_nchw
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_depthwise_conv2d_nchw(outs):
"""Schedule for depthwise_conv2d_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_depthwise_conv2d_nhwc(outs):
"""Schedule for depthwise_conv2d_nhwc
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d_nhwc
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.override_native_generic_func("schedule_reduce")
def schedule_reduce(outs):
"""Schedule for reduction
Parameters
----------
outs: Array of Tensor
The computation graph description of reduce
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, True)
@tvm.target.override_native_generic_func("schedule_softmax")
def schedule_softmax(outs):
"""Schedule for softmax
Parameters
----------
outs: Array of Tensor
The computation graph description of softmax
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.override_native_generic_func("schedule_dense")
def schedule_dense(outs):
"""Schedule for dense
Parameters
----------
outs: Array of Tensor
The computation graph description of dense
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.override_native_generic_func("schedule_pool")
def schedule_pool(outs):
"""Schedule for pool
Parameters
----------
outs: Array of Tensor
The computation graph description of pool
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.override_native_generic_func("schedule_global_pool")
def schedule_global_pool(outs):
"""Schedule for global pool
Parameters
----------
outs: Array of Tensor
The computation graph description of global pool
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.override_native_generic_func("schedule_binarize_pack")
def schedule_binarize_pack(outs):
"""Schedule for binarize_pack
Parameters
----------
outs: Array of Tensor
The computation graph description of binarize_pack
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.override_native_generic_func("schedule_binary_dense")
def schedule_binary_dense(outs):
"""Schedule for binary_dense
Parameters
----------
outs: Array of Tensor
The computation graph description of binary_dense
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_lrn(outs):
"""Schedule for lrn
Parameters
----------
outs: Array of Tensor
The computation graph description of lrn
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
target = tvm.target.current_target(allow_none=False)
cpp_target = cpp.TEST_create_target(target.target_name)
return cpp.generic.default_schedule(cpp_target, outs, False)
@tvm.target.generic_func
def schedule_l2_normalize(outs):
"""Schedule for l2 normalize
Parameters
----------
outs: Array of Tensor
The computation graph description of l2 normalize
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
target = tvm.target.current_target(allow_none=False)
cpp_target = cpp.TEST_create_target(target.target_name)
return cpp.generic.default_schedule(cpp_target, outs, False)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
vendor/cloud.google.com/go/storage/storage.go
|
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"bytes"
"context"
"crypto"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/hex"
"encoding/pem"
"errors"
"fmt"
"net/http"
"net/url"
"os"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"time"
"unicode/utf8"
"cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/trace"
"cloud.google.com/go/internal/version"
"google.golang.org/api/googleapi"
"google.golang.org/api/option"
raw "google.golang.org/api/storage/v1"
htransport "google.golang.org/api/transport/http"
)
var (
// ErrBucketNotExist indicates that the bucket does not exist.
ErrBucketNotExist = errors.New("storage: bucket doesn't exist")
// ErrObjectNotExist indicates that the object does not exist.
ErrObjectNotExist = errors.New("storage: object doesn't exist")
)
var userAgent = fmt.Sprintf("gcloud-golang-storage/%s", version.Repo)
const (
// ScopeFullControl grants permissions to manage your
// data and permissions in Google Cloud Storage.
ScopeFullControl = raw.DevstorageFullControlScope
// ScopeReadOnly grants permissions to
// view your data in Google Cloud Storage.
ScopeReadOnly = raw.DevstorageReadOnlyScope
// ScopeReadWrite grants permissions to manage your
// data in Google Cloud Storage.
ScopeReadWrite = raw.DevstorageReadWriteScope
)
var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo)
func setClientHeader(headers http.Header) {
headers.Set("x-goog-api-client", xGoogHeader)
}
// Client is a client for interacting with Google Cloud Storage.
//
// Clients should be reused instead of created as needed.
// The methods of Client are safe for concurrent use by multiple goroutines.
type Client struct {
hc *http.Client
raw *raw.Service
// Scheme describes the scheme under the current host.
scheme string
// EnvHost is the host set on the STORAGE_EMULATOR_HOST variable.
envHost string
// ReadHost is the default host used on the reader.
readHost string
}
// NewClient creates a new Google Cloud Storage client.
// The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use option.WithScopes.
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
var host, readHost, scheme string
if host = os.Getenv("STORAGE_EMULATOR_HOST"); host == "" {
scheme = "https"
readHost = "storage.googleapis.com"
// Prepend default options to avoid overriding options passed by the user.
opts = append([]option.ClientOption{option.WithScopes(ScopeFullControl), option.WithUserAgent(userAgent)}, opts...)
} else {
scheme = "http"
readHost = host
opts = append([]option.ClientOption{option.WithoutAuthentication()}, opts...)
}
hc, ep, err := htransport.NewClient(ctx, opts...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}
rawService, err := raw.NewService(ctx, option.WithHTTPClient(hc))
if err != nil {
return nil, fmt.Errorf("storage client: %v", err)
}
if ep == "" {
// Override the default value for BasePath from the raw client.
// TODO: remove when the raw client uses this endpoint as its default (~end of 2020)
rawService.BasePath = "https://storage.googleapis.com/storage/v1/"
} else {
// If the endpoint has been set explicitly, use this for the BasePath
// as well as readHost
rawService.BasePath = ep
u, err := url.Parse(ep)
if err != nil {
return nil, fmt.Errorf("supplied endpoint %v is not valid: %v", ep, err)
}
readHost = u.Host
}
return &Client{
hc: hc,
raw: rawService,
scheme: scheme,
envHost: host,
readHost: readHost,
}, nil
}
// Close closes the Client.
//
// Close need not be called at program exit.
func (c *Client) Close() error {
// Set fields to nil so that subsequent uses will panic.
c.hc = nil
c.raw = nil
return nil
}
// SigningScheme determines the API version to use when signing URLs.
type SigningScheme int
const (
// SigningSchemeDefault is presently V2 and will change to V4 in the future.
SigningSchemeDefault SigningScheme = iota
// SigningSchemeV2 uses the V2 scheme to sign URLs.
SigningSchemeV2
// SigningSchemeV4 uses the V4 scheme to sign URLs.
SigningSchemeV4
)
// SignedURLOptions allows you to restrict the access to the signed URL.
type SignedURLOptions struct {
// GoogleAccessID represents the authorizer of the signed URL generation.
// It is typically the Google service account client email address from
// the Google Developers Console in the form of "[email protected]".
// Required.
GoogleAccessID string
// PrivateKey is the Google service account private key. It is obtainable
// from the Google Developers Console.
// At https://console.developers.google.com/project/<your-project-id>/apiui/credential,
// create a service account client ID or reuse one of your existing service account
// credentials. Click on the "Generate new P12 key" to generate and download
// a new private key. Once you download the P12 file, use the following command
// to convert it into a PEM file.
//
// $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes
//
// Provide the contents of the PEM file as a byte slice.
// Exactly one of PrivateKey or SignBytes must be non-nil.
PrivateKey []byte
// SignBytes is a function for implementing custom signing. For example, if
// your application is running on Google App Engine, you can use
// appengine's internal signing function:
// ctx := appengine.NewContext(request)
// acc, _ := appengine.ServiceAccount(ctx)
// url, err := SignedURL("bucket", "object", &SignedURLOptions{
// GoogleAccessID: acc,
// SignBytes: func(b []byte) ([]byte, error) {
// _, signedBytes, err := appengine.SignBytes(ctx, b)
// return signedBytes, err
// },
// // etc.
// })
//
// Exactly one of PrivateKey or SignBytes must be non-nil.
SignBytes func([]byte) ([]byte, error)
// Method is the HTTP method to be used with the signed URL.
// Signed URLs can be used with GET, HEAD, PUT, and DELETE requests.
// Required.
Method string
// Expires is the expiration time on the signed URL. It must be
// a datetime in the future. For SigningSchemeV4, the expiration may be no
// more than seven days in the future.
// Required.
Expires time.Time
// ContentType is the content type header the client must provide
// to use the generated signed URL.
// Optional.
ContentType string
// Headers is a list of extension headers the client must provide
// in order to use the generated signed URL.
// Optional.
Headers []string
// MD5 is the base64 encoded MD5 checksum of the file.
// If provided, the client should provide the exact value on the request
// header in order to use the signed URL.
// Optional.
MD5 string
// Scheme determines the version of URL signing to use. Default is
// SigningSchemeV2.
Scheme SigningScheme
}
var (
tabRegex = regexp.MustCompile(`[\t]+`)
// I was tempted to call this spacex. :)
spaceRegex = regexp.MustCompile(` +`)
canonicalHeaderRegexp = regexp.MustCompile(`(?i)^(x-goog-[^:]+):(.*)?$`)
excludedCanonicalHeaders = map[string]bool{
"x-goog-encryption-key": true,
"x-goog-encryption-key-sha256": true,
}
)
// v2SanitizeHeaders applies the specifications for canonical extension headers at
// https://cloud.google.com/storage/docs/access-control/signed-urls#about-canonical-extension-headers.
func v2SanitizeHeaders(hdrs []string) []string {
headerMap := map[string][]string{}
for _, hdr := range hdrs {
// No leading or trailing whitespaces.
sanitizedHeader := strings.TrimSpace(hdr)
var header, value string
// Only keep canonical headers, discard any others.
headerMatches := canonicalHeaderRegexp.FindStringSubmatch(sanitizedHeader)
if len(headerMatches) == 0 {
continue
}
header = headerMatches[1]
value = headerMatches[2]
header = strings.ToLower(strings.TrimSpace(header))
value = strings.TrimSpace(value)
if excludedCanonicalHeaders[header] {
// Do not keep any deliberately excluded canonical headers when signing.
continue
}
if len(value) > 0 {
// Remove duplicate headers by appending the values of duplicates
// in their order of appearance.
headerMap[header] = append(headerMap[header], value)
}
}
var sanitizedHeaders []string
for header, values := range headerMap {
// There should be no spaces around the colon separating the header name
// from the header value or around the values themselves. The values
// should be separated by commas.
//
// NOTE: The semantics for headers without a value are not clear.
// However from specifications these should be edge-cases anyway and we
// should assume that there will be no canonical headers using empty
// values. Any such headers are discarded at the regexp stage above.
sanitizedHeaders = append(sanitizedHeaders, fmt.Sprintf("%s:%s", header, strings.Join(values, ",")))
}
sort.Strings(sanitizedHeaders)
return sanitizedHeaders
}
// v4SanitizeHeaders applies the specifications for canonical extension headers
// at https://cloud.google.com/storage/docs/access-control/signed-urls#about-canonical-extension-headers.
//
// V4 does a couple things differently from V2:
// - Headers get sorted by key, instead of by key:value. We do this in
// signedURLV4.
// - There's no canonical regexp: we simply split headers on :.
// - We don't exclude canonical headers.
// - We replace leading and trailing spaces in header values, like v2, but also
// all intermediate space duplicates get stripped. That is, there's only ever
// a single consecutive space.
func v4SanitizeHeaders(hdrs []string) []string {
headerMap := map[string][]string{}
for _, hdr := range hdrs {
// No leading or trailing whitespaces.
sanitizedHeader := strings.TrimSpace(hdr)
var key, value string
headerMatches := strings.Split(sanitizedHeader, ":")
if len(headerMatches) < 2 {
continue
}
key = headerMatches[0]
value = headerMatches[1]
key = strings.ToLower(strings.TrimSpace(key))
value = strings.TrimSpace(value)
value = string(spaceRegex.ReplaceAll([]byte(value), []byte(" ")))
value = string(tabRegex.ReplaceAll([]byte(value), []byte("\t")))
if len(value) > 0 {
// Remove duplicate headers by appending the values of duplicates
// in their order of appearance.
headerMap[key] = append(headerMap[key], value)
}
}
var sanitizedHeaders []string
for header, values := range headerMap {
// There should be no spaces around the colon separating the header name
// from the header value or around the values themselves. The values
// should be separated by commas.
//
// NOTE: The semantics for headers without a value are not clear.
// However from specifications these should be edge-cases anyway and we
// should assume that there will be no canonical headers using empty
// values. Any such headers are discarded at the regexp stage above.
sanitizedHeaders = append(sanitizedHeaders, fmt.Sprintf("%s:%s", header, strings.Join(values, ",")))
}
return sanitizedHeaders
}
// SignedURL returns a URL for the specified object. Signed URLs allow
// the users access to a restricted resource for a limited time without having a
// Google account or signing in. For more information about the signed
// URLs, see https://cloud.google.com/storage/docs/accesscontrol#Signed-URLs.
func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) {
now := utcNow()
if err := validateOptions(opts, now); err != nil {
return "", err
}
switch opts.Scheme {
case SigningSchemeV2:
opts.Headers = v2SanitizeHeaders(opts.Headers)
return signedURLV2(bucket, name, opts)
case SigningSchemeV4:
opts.Headers = v4SanitizeHeaders(opts.Headers)
return signedURLV4(bucket, name, opts, now)
default: // SigningSchemeDefault
opts.Headers = v2SanitizeHeaders(opts.Headers)
return signedURLV2(bucket, name, opts)
}
}
func validateOptions(opts *SignedURLOptions, now time.Time) error {
if opts == nil {
return errors.New("storage: missing required SignedURLOptions")
}
if opts.GoogleAccessID == "" {
return errors.New("storage: missing required GoogleAccessID")
}
if (opts.PrivateKey == nil) == (opts.SignBytes == nil) {
return errors.New("storage: exactly one of PrivateKey or SignedBytes must be set")
}
if opts.Method == "" {
return errors.New("storage: missing required method option")
}
if opts.Expires.IsZero() {
return errors.New("storage: missing required expires option")
}
if opts.MD5 != "" {
md5, err := base64.StdEncoding.DecodeString(opts.MD5)
if err != nil || len(md5) != 16 {
return errors.New("storage: invalid MD5 checksum")
}
}
if opts.Scheme == SigningSchemeV4 {
cutoff := now.Add(604801 * time.Second) // 7 days + 1 second
if !opts.Expires.Before(cutoff) {
return errors.New("storage: expires must be within seven days from now")
}
}
return nil
}
const (
iso8601 = "20060102T150405Z"
yearMonthDay = "20060102"
)
// utcNow returns the current time in UTC and is a variable to allow for
// reassignment in tests to provide deterministic signed URL values.
var utcNow = func() time.Time {
return time.Now().UTC()
}
// extractHeaderNames takes in a series of key:value headers and returns the
// header names only.
func extractHeaderNames(kvs []string) []string {
var res []string
for _, header := range kvs {
nameValue := strings.Split(header, ":")
res = append(res, nameValue[0])
}
return res
}
// signedURLV4 creates a signed URL using the sigV4 algorithm.
func signedURLV4(bucket, name string, opts *SignedURLOptions, now time.Time) (string, error) {
buf := &bytes.Buffer{}
fmt.Fprintf(buf, "%s\n", opts.Method)
u := &url.URL{Path: bucket}
if name != "" {
u.Path += "/" + name
}
// Note: we have to add a / here because GCS does so auto-magically, despite
// Go's EscapedPath not doing so (and we have to exactly match their
// canonical query).
fmt.Fprintf(buf, "/%s\n", u.EscapedPath())
headerNames := append(extractHeaderNames(opts.Headers), "host")
if opts.ContentType != "" {
headerNames = append(headerNames, "content-type")
}
if opts.MD5 != "" {
headerNames = append(headerNames, "content-md5")
}
sort.Strings(headerNames)
signedHeaders := strings.Join(headerNames, ";")
timestamp := now.Format(iso8601)
credentialScope := fmt.Sprintf("%s/auto/storage/goog4_request", now.Format(yearMonthDay))
canonicalQueryString := url.Values{
"X-Goog-Algorithm": {"GOOG4-RSA-SHA256"},
"X-Goog-Credential": {fmt.Sprintf("%s/%s", opts.GoogleAccessID, credentialScope)},
"X-Goog-Date": {timestamp},
"X-Goog-Expires": {fmt.Sprintf("%d", int(opts.Expires.Sub(now).Seconds()))},
"X-Goog-SignedHeaders": {signedHeaders},
}
fmt.Fprintf(buf, "%s\n", canonicalQueryString.Encode())
u.Host = "storage.googleapis.com"
var headersWithValue []string
headersWithValue = append(headersWithValue, "host:"+u.Host)
headersWithValue = append(headersWithValue, opts.Headers...)
if opts.ContentType != "" {
headersWithValue = append(headersWithValue, "content-type:"+strings.TrimSpace(opts.ContentType))
}
if opts.MD5 != "" {
headersWithValue = append(headersWithValue, "content-md5:"+strings.TrimSpace(opts.MD5))
}
canonicalHeaders := strings.Join(sortHeadersByKey(headersWithValue), "\n")
fmt.Fprintf(buf, "%s\n\n", canonicalHeaders)
fmt.Fprintf(buf, "%s\n", signedHeaders)
fmt.Fprint(buf, "UNSIGNED-PAYLOAD")
sum := sha256.Sum256(buf.Bytes())
hexDigest := hex.EncodeToString(sum[:])
signBuf := &bytes.Buffer{}
fmt.Fprint(signBuf, "GOOG4-RSA-SHA256\n")
fmt.Fprintf(signBuf, "%s\n", timestamp)
fmt.Fprintf(signBuf, "%s\n", credentialScope)
fmt.Fprintf(signBuf, "%s", hexDigest)
signBytes := opts.SignBytes
if opts.PrivateKey != nil {
key, err := parseKey(opts.PrivateKey)
if err != nil {
return "", err
}
signBytes = func(b []byte) ([]byte, error) {
sum := sha256.Sum256(b)
return rsa.SignPKCS1v15(
rand.Reader,
key,
crypto.SHA256,
sum[:],
)
}
}
b, err := signBytes(signBuf.Bytes())
if err != nil {
return "", err
}
signature := hex.EncodeToString(b)
canonicalQueryString.Set("X-Goog-Signature", string(signature))
u.Scheme = "https"
u.RawQuery = canonicalQueryString.Encode()
return u.String(), nil
}
// takes a list of headerKey:headervalue1,headervalue2,etc and sorts by header
// key.
func sortHeadersByKey(hdrs []string) []string {
headersMap := map[string]string{}
var headersKeys []string
for _, h := range hdrs {
parts := strings.Split(h, ":")
k := parts[0]
v := parts[1]
headersMap[k] = v
headersKeys = append(headersKeys, k)
}
sort.Strings(headersKeys)
var sorted []string
for _, k := range headersKeys {
v := headersMap[k]
sorted = append(sorted, fmt.Sprintf("%s:%s", k, v))
}
return sorted
}
func signedURLV2(bucket, name string, opts *SignedURLOptions) (string, error) {
signBytes := opts.SignBytes
if opts.PrivateKey != nil {
key, err := parseKey(opts.PrivateKey)
if err != nil {
return "", err
}
signBytes = func(b []byte) ([]byte, error) {
sum := sha256.Sum256(b)
return rsa.SignPKCS1v15(
rand.Reader,
key,
crypto.SHA256,
sum[:],
)
}
}
u := &url.URL{
Path: fmt.Sprintf("/%s/%s", bucket, name),
}
buf := &bytes.Buffer{}
fmt.Fprintf(buf, "%s\n", opts.Method)
fmt.Fprintf(buf, "%s\n", opts.MD5)
fmt.Fprintf(buf, "%s\n", opts.ContentType)
fmt.Fprintf(buf, "%d\n", opts.Expires.Unix())
if len(opts.Headers) > 0 {
fmt.Fprintf(buf, "%s\n", strings.Join(opts.Headers, "\n"))
}
fmt.Fprintf(buf, "%s", u.String())
b, err := signBytes(buf.Bytes())
if err != nil {
return "", err
}
encoded := base64.StdEncoding.EncodeToString(b)
u.Scheme = "https"
u.Host = "storage.googleapis.com"
q := u.Query()
q.Set("GoogleAccessId", opts.GoogleAccessID)
q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix()))
q.Set("Signature", string(encoded))
u.RawQuery = q.Encode()
return u.String(), nil
}
// ObjectHandle provides operations on an object in a Google Cloud Storage bucket.
// Use BucketHandle.Object to get a handle.
type ObjectHandle struct {
c *Client
bucket string
object string
acl ACLHandle
gen int64 // a negative value indicates latest
conds *Conditions
encryptionKey []byte // AES-256 key
userProject string // for requester-pays buckets
readCompressed bool // Accept-Encoding: gzip
}
// ACL provides access to the object's access control list.
// This controls who can read and write this object.
// This call does not perform any network operations.
func (o *ObjectHandle) ACL() *ACLHandle {
return &o.acl
}
// Generation returns a new ObjectHandle that operates on a specific generation
// of the object.
// By default, the handle operates on the latest generation. Not
// all operations work when given a specific generation; check the API
// endpoints at https://cloud.google.com/storage/docs/json_api/ for details.
func (o *ObjectHandle) Generation(gen int64) *ObjectHandle {
o2 := *o
o2.gen = gen
return &o2
}
// If returns a new ObjectHandle that applies a set of preconditions.
// Preconditions already set on the ObjectHandle are ignored.
// Operations on the new handle will return an error if the preconditions are not
// satisfied. See https://cloud.google.com/storage/docs/generations-preconditions
// for more details.
func (o *ObjectHandle) If(conds Conditions) *ObjectHandle {
o2 := *o
o2.conds = &conds
return &o2
}
// Key returns a new ObjectHandle that uses the supplied encryption
// key to encrypt and decrypt the object's contents.
//
// Encryption key must be a 32-byte AES-256 key.
// See https://cloud.google.com/storage/docs/encryption for details.
func (o *ObjectHandle) Key(encryptionKey []byte) *ObjectHandle {
o2 := *o
o2.encryptionKey = encryptionKey
return &o2
}
// Attrs returns meta information about the object.
// ErrObjectNotExist will be returned if the object is not found.
func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Attrs")
defer func() { trace.EndSpan(ctx, err) }()
if err := o.validate(); err != nil {
return nil, err
}
call := o.c.raw.Objects.Get(o.bucket, o.object).Projection("full").Context(ctx)
if err := applyConds("Attrs", o.gen, o.conds, call); err != nil {
return nil, err
}
if o.userProject != "" {
call.UserProject(o.userProject)
}
if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil {
return nil, err
}
var obj *raw.Object
setClientHeader(call.Header())
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
return nil, ErrObjectNotExist
}
if err != nil {
return nil, err
}
return newObject(obj), nil
}
// Update updates an object with the provided attributes.
// All zero-value attributes are ignored.
// ErrObjectNotExist will be returned if the object is not found.
func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (oa *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Update")
defer func() { trace.EndSpan(ctx, err) }()
if err := o.validate(); err != nil {
return nil, err
}
var attrs ObjectAttrs
// Lists of fields to send, and set to null, in the JSON.
var forceSendFields, nullFields []string
if uattrs.ContentType != nil {
attrs.ContentType = optional.ToString(uattrs.ContentType)
// For ContentType, sending the empty string is a no-op.
// Instead we send a null.
if attrs.ContentType == "" {
nullFields = append(nullFields, "ContentType")
} else {
forceSendFields = append(forceSendFields, "ContentType")
}
}
if uattrs.ContentLanguage != nil {
attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage)
// For ContentLanguage it's an error to send the empty string.
// Instead we send a null.
if attrs.ContentLanguage == "" {
nullFields = append(nullFields, "ContentLanguage")
} else {
forceSendFields = append(forceSendFields, "ContentLanguage")
}
}
if uattrs.ContentEncoding != nil {
attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding)
forceSendFields = append(forceSendFields, "ContentEncoding")
}
if uattrs.ContentDisposition != nil {
attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition)
forceSendFields = append(forceSendFields, "ContentDisposition")
}
if uattrs.CacheControl != nil {
attrs.CacheControl = optional.ToString(uattrs.CacheControl)
forceSendFields = append(forceSendFields, "CacheControl")
}
if uattrs.EventBasedHold != nil {
attrs.EventBasedHold = optional.ToBool(uattrs.EventBasedHold)
forceSendFields = append(forceSendFields, "EventBasedHold")
}
if uattrs.TemporaryHold != nil {
attrs.TemporaryHold = optional.ToBool(uattrs.TemporaryHold)
forceSendFields = append(forceSendFields, "TemporaryHold")
}
if uattrs.Metadata != nil {
attrs.Metadata = uattrs.Metadata
if len(attrs.Metadata) == 0 {
// Sending the empty map is a no-op. We send null instead.
nullFields = append(nullFields, "Metadata")
} else {
forceSendFields = append(forceSendFields, "Metadata")
}
}
if uattrs.ACL != nil {
attrs.ACL = uattrs.ACL
// It's an error to attempt to delete the ACL, so
// we don't append to nullFields here.
forceSendFields = append(forceSendFields, "Acl")
}
rawObj := attrs.toRawObject(o.bucket)
rawObj.ForceSendFields = forceSendFields
rawObj.NullFields = nullFields
call := o.c.raw.Objects.Patch(o.bucket, o.object, rawObj).Projection("full").Context(ctx)
if err := applyConds("Update", o.gen, o.conds, call); err != nil {
return nil, err
}
if o.userProject != "" {
call.UserProject(o.userProject)
}
if uattrs.PredefinedACL != "" {
call.PredefinedAcl(uattrs.PredefinedACL)
}
if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil {
return nil, err
}
var obj *raw.Object
setClientHeader(call.Header())
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
return nil, ErrObjectNotExist
}
if err != nil {
return nil, err
}
return newObject(obj), nil
}
// BucketName returns the name of the bucket.
func (o *ObjectHandle) BucketName() string {
return o.bucket
}
// ObjectName returns the name of the object.
func (o *ObjectHandle) ObjectName() string {
return o.object
}
// ObjectAttrsToUpdate is used to update the attributes of an object.
// Only fields set to non-nil values will be updated.
// Set a field to its zero value to delete it.
//
// For example, to change ContentType and delete ContentEncoding and
// Metadata, use
// ObjectAttrsToUpdate{
// ContentType: "text/html",
// ContentEncoding: "",
// Metadata: map[string]string{},
// }
type ObjectAttrsToUpdate struct {
EventBasedHold optional.Bool
TemporaryHold optional.Bool
ContentType optional.String
ContentLanguage optional.String
ContentEncoding optional.String
ContentDisposition optional.String
CacheControl optional.String
Metadata map[string]string // set to map[string]string{} to delete
ACL []ACLRule
// If not empty, applies a predefined set of access controls. ACL must be nil.
// See https://cloud.google.com/storage/docs/json_api/v1/objects/patch.
PredefinedACL string
}
// Delete deletes the single specified object.
func (o *ObjectHandle) Delete(ctx context.Context) error {
if err := o.validate(); err != nil {
return err
}
call := o.c.raw.Objects.Delete(o.bucket, o.object).Context(ctx)
if err := applyConds("Delete", o.gen, o.conds, call); err != nil {
return err
}
if o.userProject != "" {
call.UserProject(o.userProject)
}
// Encryption doesn't apply to Delete.
setClientHeader(call.Header())
err := runWithRetry(ctx, func() error { return call.Do() })
switch e := err.(type) {
case nil:
return nil
case *googleapi.Error:
if e.Code == http.StatusNotFound {
return ErrObjectNotExist
}
}
return err
}
// ReadCompressed when true causes the read to happen without decompressing.
func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle {
o2 := *o
o2.readCompressed = compressed
return &o2
}
// NewWriter returns a storage Writer that writes to the GCS object
// associated with this ObjectHandle.
//
// A new object will be created unless an object with this name already exists.
// Otherwise any previous object with the same name will be replaced.
// The object will not be available (and any previous object will remain)
// until Close has been called.
//
// Attributes can be set on the object by modifying the returned Writer's
// ObjectAttrs field before the first call to Write. If no ContentType
// attribute is specified, the content type will be automatically sniffed
// using net/http.DetectContentType.
//
// It is the caller's responsibility to call Close when writing is done. To
// stop writing without saving the data, cancel the context.
func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer {
return &Writer{
ctx: ctx,
o: o,
donec: make(chan struct{}),
ObjectAttrs: ObjectAttrs{Name: o.object},
ChunkSize: googleapi.DefaultUploadChunkSize,
}
}
func (o *ObjectHandle) validate() error {
if o.bucket == "" {
return errors.New("storage: bucket name is empty")
}
if o.object == "" {
return errors.New("storage: object name is empty")
}
if !utf8.ValidString(o.object) {
return fmt.Errorf("storage: object name %q is not valid UTF-8", o.object)
}
return nil
}
// parseKey converts the binary contents of a private key file to an
// *rsa.PrivateKey. It detects whether the private key is in a PEM container or
// not. If so, it extracts the private key from PEM container before
// conversion. It only supports PEM containers with no passphrase.
func parseKey(key []byte) (*rsa.PrivateKey, error) {
if block, _ := pem.Decode(key); block != nil {
key = block.Bytes
}
parsedKey, err := x509.ParsePKCS8PrivateKey(key)
if err != nil {
parsedKey, err = x509.ParsePKCS1PrivateKey(key)
if err != nil {
return nil, err
}
}
parsed, ok := parsedKey.(*rsa.PrivateKey)
if !ok {
return nil, errors.New("oauth2: private key is invalid")
}
return parsed, nil
}
// toRawObject copies the editable attributes from o to the raw library's Object type.
func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object {
var ret string
if !o.RetentionExpirationTime.IsZero() {
ret = o.RetentionExpirationTime.Format(time.RFC3339)
}
return &raw.Object{
Bucket: bucket,
Name: o.Name,
EventBasedHold: o.EventBasedHold,
TemporaryHold: o.TemporaryHold,
RetentionExpirationTime: ret,
ContentType: o.ContentType,
ContentEncoding: o.ContentEncoding,
ContentLanguage: o.ContentLanguage,
CacheControl: o.CacheControl,
ContentDisposition: o.ContentDisposition,
StorageClass: o.StorageClass,
Acl: toRawObjectACL(o.ACL),
Metadata: o.Metadata,
}
}
// ObjectAttrs represents the metadata for a Google Cloud Storage (GCS) object.
type ObjectAttrs struct {
// Bucket is the name of the bucket containing this GCS object.
// This field is read-only.
Bucket string
// Name is the name of the object within the bucket.
// This field is read-only.
Name string
// ContentType is the MIME type of the object's content.
ContentType string
// ContentLanguage is the content language of the object's content.
ContentLanguage string
// CacheControl is the Cache-Control header to be sent in the response
// headers when serving the object data.
CacheControl string
// EventBasedHold specifies whether an object is under event-based hold. New
// objects created in a bucket whose DefaultEventBasedHold is set will
// default to that value.
EventBasedHold bool
// TemporaryHold specifies whether an object is under temporary hold. While
// this flag is set to true, the object is protected against deletion and
// overwrites.
TemporaryHold bool
// RetentionExpirationTime is a server-determined value that specifies the
// earliest time that the object's retention period expires.
// This is a read-only field.
RetentionExpirationTime time.Time
// ACL is the list of access control rules for the object.
ACL []ACLRule
// If not empty, applies a predefined set of access controls. It should be set
// only when writing, copying or composing an object. When copying or composing,
// it acts as the destinationPredefinedAcl parameter.
// PredefinedACL is always empty for ObjectAttrs returned from the service.
// See https://cloud.google.com/storage/docs/json_api/v1/objects/insert
// for valid values.
PredefinedACL string
// Owner is the owner of the object. This field is read-only.
//
// If non-zero, it is in the form of "user-<userId>".
Owner string
// Size is the length of the object's content. This field is read-only.
Size int64
// ContentEncoding is the encoding of the object's content.
ContentEncoding string
// ContentDisposition is the optional Content-Disposition header of the object
// sent in the response headers.
ContentDisposition string
// MD5 is the MD5 hash of the object's content. This field is read-only,
// except when used from a Writer. If set on a Writer, the uploaded
// data is rejected if its MD5 hash does not match this field.
MD5 []byte
// CRC32C is the CRC32 checksum of the object's content using
// the Castagnoli93 polynomial. This field is read-only, except when
// used from a Writer. If set on a Writer and Writer.SendCRC32C
// is true, the uploaded data is rejected if its CRC32c hash does not
// match this field.
CRC32C uint32
// MediaLink is an URL to the object's content. This field is read-only.
MediaLink string
// Metadata represents user-provided metadata, in key/value pairs.
// It can be nil if no metadata is provided.
Metadata map[string]string
// Generation is the generation number of the object's content.
// This field is read-only.
Generation int64
// Metageneration is the version of the metadata for this
// object at this generation. This field is used for preconditions
// and for detecting changes in metadata. A metageneration number
// is only meaningful in the context of a particular generation
// of a particular object. This field is read-only.
Metageneration int64
// StorageClass is the storage class of the object. This defines
// how objects are stored and determines the SLA and the cost of storage.
// Typical values are "STANDARD", "NEARLINE", "COLDLINE" and "ARCHIVE".
// Defaults to "STANDARD".
// See https://cloud.google.com/storage/docs/storage-classes for all
// valid values.
StorageClass string
// Created is the time the object was created. This field is read-only.
Created time.Time
// Deleted is the time the object was deleted.
// If not deleted, it is the zero value. This field is read-only.
Deleted time.Time
// Updated is the creation or modification time of the object.
// For buckets with versioning enabled, changing an object's
// metadata does not change this property. This field is read-only.
Updated time.Time
// CustomerKeySHA256 is the base64-encoded SHA-256 hash of the
// customer-supplied encryption key for the object. It is empty if there is
// no customer-supplied encryption key.
// See // https://cloud.google.com/storage/docs/encryption for more about
// encryption in Google Cloud Storage.
CustomerKeySHA256 string
// Cloud KMS key name, in the form
// projects/P/locations/L/keyRings/R/cryptoKeys/K, used to encrypt this object,
// if the object is encrypted by such a key.
//
// Providing both a KMSKeyName and a customer-supplied encryption key (via
// ObjectHandle.Key) will result in an error when writing an object.
KMSKeyName string
// Prefix is set only for ObjectAttrs which represent synthetic "directory
// entries" when iterating over buckets using Query.Delimiter. See
// ObjectIterator.Next. When set, no other fields in ObjectAttrs will be
// populated.
Prefix string
// Etag is the HTTP/1.1 Entity tag for the object.
// This field is read-only.
Etag string
}
// convertTime converts a time in RFC3339 format to time.Time.
// If any error occurs in parsing, the zero-value time.Time is silently returned.
func convertTime(t string) time.Time {
var r time.Time
if t != "" {
r, _ = time.Parse(time.RFC3339, t)
}
return r
}
func newObject(o *raw.Object) *ObjectAttrs {
if o == nil {
return nil
}
owner := ""
if o.Owner != nil {
owner = o.Owner.Entity
}
md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash)
crc32c, _ := decodeUint32(o.Crc32c)
var sha256 string
if o.CustomerEncryption != nil {
sha256 = o.CustomerEncryption.KeySha256
}
return &ObjectAttrs{
Bucket: o.Bucket,
Name: o.Name,
ContentType: o.ContentType,
ContentLanguage: o.ContentLanguage,
CacheControl: o.CacheControl,
EventBasedHold: o.EventBasedHold,
TemporaryHold: o.TemporaryHold,
RetentionExpirationTime: convertTime(o.RetentionExpirationTime),
ACL: toObjectACLRules(o.Acl),
Owner: owner,
ContentEncoding: o.ContentEncoding,
ContentDisposition: o.ContentDisposition,
Size: int64(o.Size),
MD5: md5,
CRC32C: crc32c,
MediaLink: o.MediaLink,
Metadata: o.Metadata,
Generation: o.Generation,
Metageneration: o.Metageneration,
StorageClass: o.StorageClass,
CustomerKeySHA256: sha256,
KMSKeyName: o.KmsKeyName,
Created: convertTime(o.TimeCreated),
Deleted: convertTime(o.TimeDeleted),
Updated: convertTime(o.Updated),
Etag: o.Etag,
}
}
// Decode a uint32 encoded in Base64 in big-endian byte order.
func decodeUint32(b64 string) (uint32, error) {
d, err := base64.StdEncoding.DecodeString(b64)
if err != nil {
return 0, err
}
if len(d) != 4 {
return 0, fmt.Errorf("storage: %q does not encode a 32-bit value", d)
}
return uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3]), nil
}
// Encode a uint32 as Base64 in big-endian byte order.
func encodeUint32(u uint32) string {
b := []byte{byte(u >> 24), byte(u >> 16), byte(u >> 8), byte(u)}
return base64.StdEncoding.EncodeToString(b)
}
// Query represents a query to filter objects from a bucket.
type Query struct {
// Delimiter returns results in a directory-like fashion.
// Results will contain only objects whose names, aside from the
// prefix, do not contain delimiter. Objects whose names,
// aside from the prefix, contain delimiter will have their name,
// truncated after the delimiter, returned in prefixes.
// Duplicate prefixes are omitted.
// Optional.
Delimiter string
// Prefix is the prefix filter to query objects
// whose names begin with this prefix.
// Optional.
Prefix string
// Versions indicates whether multiple versions of the same
// object will be included in the results.
Versions bool
// fieldSelection is used to select only specific fields to be returned by
// the query. It's used internally and is populated for the user by
// calling Query.SetAttrSelection
fieldSelection string
}
// attrToFieldMap maps the field names of ObjectAttrs to the underlying field
// names in the API call. Only the ObjectAttrs field names are visible to users
// because they are already part of the public API of the package.
var attrToFieldMap = map[string]string{
"Bucket": "bucket",
"Name": "name",
"ContentType": "contentType",
"ContentLanguage": "contentLanguage",
"CacheControl": "cacheControl",
"EventBasedHold": "eventBasedHold",
"TemporaryHold": "temporaryHold",
"RetentionExpirationTime": "retentionExpirationTime",
"ACL": "acl",
"Owner": "owner",
"ContentEncoding": "contentEncoding",
"ContentDisposition": "contentDisposition",
"Size": "size",
"MD5": "md5Hash",
"CRC32C": "crc32c",
"MediaLink": "mediaLink",
"Metadata": "metadata",
"Generation": "generation",
"Metageneration": "metageneration",
"StorageClass": "storageClass",
"CustomerKeySHA256": "customerEncryption",
"KMSKeyName": "kmsKeyName",
"Created": "timeCreated",
"Deleted": "timeDeleted",
"Updated": "updated",
"Etag": "etag",
}
// SetAttrSelection makes the query populate only specific attributes of
// objects. When iterating over objects, if you only need each object's name
// and size, pass []string{"Name", "Size"} to this method. Only these fields
// will be fetched for each object across the network; the other fields of
// ObjectAttr will remain at their default values. This is a performance
// optimization; for more information, see
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/performance
func (q *Query) SetAttrSelection(attrs []string) error {
fieldSet := make(map[string]bool)
for _, attr := range attrs {
field, ok := attrToFieldMap[attr]
if !ok {
return fmt.Errorf("storage: attr %v is not valid", attr)
}
fieldSet[field] = true
}
if len(fieldSet) > 0 {
var b strings.Builder
b.WriteString("items(")
first := true
for field := range fieldSet {
if !first {
b.WriteString(",")
}
first = false
b.WriteString(field)
}
b.WriteString(")")
q.fieldSelection = b.String()
}
return nil
}
// Conditions constrain methods to act on specific generations of
// objects.
//
// The zero value is an empty set of constraints. Not all conditions or
// combinations of conditions are applicable to all methods.
// See https://cloud.google.com/storage/docs/generations-preconditions
// for details on how these operate.
type Conditions struct {
// Generation constraints.
// At most one of the following can be set to a non-zero value.
// GenerationMatch specifies that the object must have the given generation
// for the operation to occur.
// If GenerationMatch is zero, it has no effect.
// Use DoesNotExist to specify that the object does not exist in the bucket.
GenerationMatch int64
// GenerationNotMatch specifies that the object must not have the given
// generation for the operation to occur.
// If GenerationNotMatch is zero, it has no effect.
GenerationNotMatch int64
// DoesNotExist specifies that the object must not exist in the bucket for
// the operation to occur.
// If DoesNotExist is false, it has no effect.
DoesNotExist bool
// Metadata generation constraints.
// At most one of the following can be set to a non-zero value.
// MetagenerationMatch specifies that the object must have the given
// metageneration for the operation to occur.
// If MetagenerationMatch is zero, it has no effect.
MetagenerationMatch int64
// MetagenerationNotMatch specifies that the object must not have the given
// metageneration for the operation to occur.
// If MetagenerationNotMatch is zero, it has no effect.
MetagenerationNotMatch int64
}
func (c *Conditions) validate(method string) error {
if *c == (Conditions{}) {
return fmt.Errorf("storage: %s: empty conditions", method)
}
if !c.isGenerationValid() {
return fmt.Errorf("storage: %s: multiple conditions specified for generation", method)
}
if !c.isMetagenerationValid() {
return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method)
}
return nil
}
func (c *Conditions) isGenerationValid() bool {
n := 0
if c.GenerationMatch != 0 {
n++
}
if c.GenerationNotMatch != 0 {
n++
}
if c.DoesNotExist {
n++
}
return n <= 1
}
func (c *Conditions) isMetagenerationValid() bool {
return c.MetagenerationMatch == 0 || c.MetagenerationNotMatch == 0
}
// applyConds modifies the provided call using the conditions in conds.
// call is something that quacks like a *raw.WhateverCall.
func applyConds(method string, gen int64, conds *Conditions, call interface{}) error {
cval := reflect.ValueOf(call)
if gen >= 0 {
if !setConditionField(cval, "Generation", gen) {
return fmt.Errorf("storage: %s: generation not supported", method)
}
}
if conds == nil {
return nil
}
if err := conds.validate(method); err != nil {
return err
}
switch {
case conds.GenerationMatch != 0:
if !setConditionField(cval, "IfGenerationMatch", conds.GenerationMatch) {
return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method)
}
case conds.GenerationNotMatch != 0:
if !setConditionField(cval, "IfGenerationNotMatch", conds.GenerationNotMatch) {
return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method)
}
case conds.DoesNotExist:
if !setConditionField(cval, "IfGenerationMatch", int64(0)) {
return fmt.Errorf("storage: %s: DoesNotExist not supported", method)
}
}
switch {
case conds.MetagenerationMatch != 0:
if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) {
return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method)
}
case conds.MetagenerationNotMatch != 0:
if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) {
return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method)
}
}
return nil
}
func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall) error {
if gen >= 0 {
call.SourceGeneration(gen)
}
if conds == nil {
return nil
}
if err := conds.validate("CopyTo source"); err != nil {
return err
}
switch {
case conds.GenerationMatch != 0:
call.IfSourceGenerationMatch(conds.GenerationMatch)
case conds.GenerationNotMatch != 0:
call.IfSourceGenerationNotMatch(conds.GenerationNotMatch)
case conds.DoesNotExist:
call.IfSourceGenerationMatch(0)
}
switch {
case conds.MetagenerationMatch != 0:
call.IfSourceMetagenerationMatch(conds.MetagenerationMatch)
case conds.MetagenerationNotMatch != 0:
call.IfSourceMetagenerationNotMatch(conds.MetagenerationNotMatch)
}
return nil
}
// setConditionField sets a field on a *raw.WhateverCall.
// We can't use anonymous interfaces because the return type is
// different, since the field setters are builders.
func setConditionField(call reflect.Value, name string, value interface{}) bool {
m := call.MethodByName(name)
if !m.IsValid() {
return false
}
m.Call([]reflect.Value{reflect.ValueOf(value)})
return true
}
// conditionsQuery returns the generation and conditions as a URL query
// string suitable for URL.RawQuery. It assumes that the conditions
// have been validated.
func conditionsQuery(gen int64, conds *Conditions) string {
// URL escapes are elided because integer strings are URL-safe.
var buf []byte
appendParam := func(s string, n int64) {
if len(buf) > 0 {
buf = append(buf, '&')
}
buf = append(buf, s...)
buf = strconv.AppendInt(buf, n, 10)
}
if gen >= 0 {
appendParam("generation=", gen)
}
if conds == nil {
return string(buf)
}
switch {
case conds.GenerationMatch != 0:
appendParam("ifGenerationMatch=", conds.GenerationMatch)
case conds.GenerationNotMatch != 0:
appendParam("ifGenerationNotMatch=", conds.GenerationNotMatch)
case conds.DoesNotExist:
appendParam("ifGenerationMatch=", 0)
}
switch {
case conds.MetagenerationMatch != 0:
appendParam("ifMetagenerationMatch=", conds.MetagenerationMatch)
case conds.MetagenerationNotMatch != 0:
appendParam("ifMetagenerationNotMatch=", conds.MetagenerationNotMatch)
}
return string(buf)
}
// composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods
// that modifyCall searches for by name.
type composeSourceObj struct {
src *raw.ComposeRequestSourceObjects
}
func (c composeSourceObj) Generation(gen int64) {
c.src.Generation = gen
}
func (c composeSourceObj) IfGenerationMatch(gen int64) {
// It's safe to overwrite ObjectPreconditions, since its only field is
// IfGenerationMatch.
c.src.ObjectPreconditions = &raw.ComposeRequestSourceObjectsObjectPreconditions{
IfGenerationMatch: gen,
}
}
func setEncryptionHeaders(headers http.Header, key []byte, copySource bool) error {
if key == nil {
return nil
}
// TODO(jbd): Ask the API team to return a more user-friendly error
// and avoid doing this check at the client level.
if len(key) != 32 {
return errors.New("storage: not a 32-byte AES-256 key")
}
var cs string
if copySource {
cs = "copy-source-"
}
headers.Set("x-goog-"+cs+"encryption-algorithm", "AES256")
headers.Set("x-goog-"+cs+"encryption-key", base64.StdEncoding.EncodeToString(key))
keyHash := sha256.Sum256(key)
headers.Set("x-goog-"+cs+"encryption-key-sha256", base64.StdEncoding.EncodeToString(keyHash[:]))
return nil
}
// ServiceAccount fetches the email address of the given project's Google Cloud Storage service account.
func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) {
r := c.raw.Projects.ServiceAccount.Get(projectID)
res, err := r.Context(ctx).Do()
if err != nil {
return "", err
}
return res.EmailAddress, nil
}
|
[
"\"STORAGE_EMULATOR_HOST\""
] |
[] |
[
"STORAGE_EMULATOR_HOST"
] |
[]
|
["STORAGE_EMULATOR_HOST"]
|
go
| 1 | 0 | |
noxfile.py
|
import os
import shutil
import nox
nox.options.reuse_existing_virtualenvs = True
PYTHON_VERSIONS = ['pypy3', '3.6', '3.7', '3.8', '3.9', 'pypy3']
CI_ENVIRONMENT = 'GITHUB_ACTIONS' in os.environ
@nox.session(python=PYTHON_VERSIONS[-1])
def lint(session):
"""Performs pep8 and security checks."""
source_code = 'click_params'
session.install('flake8==3.9.2', 'bandit==1.7.0')
session.run('flake8', source_code)
session.run('bandit', '-r', source_code)
@nox.session(python=PYTHON_VERSIONS)
def tests(session):
"""Runs the test suite."""
session.install('poetry>=1.0.0,<2.0.0')
session.run('poetry', 'install')
session.run('pytest')
# we notify codecov when the latest version of python is used
if session.python == PYTHON_VERSIONS[-1] and CI_ENVIRONMENT:
session.run('codecov', '-f', 'coverage.xml')
@nox.session(python=PYTHON_VERSIONS[-1])
def docs(session):
"""Builds the documentation."""
session.install('mkdocs==1.0.4')
session.run('mkdocs', 'build', '--clean')
@nox.session(python=PYTHON_VERSIONS[-1])
def deploy(session):
"""
Deploys on pypi.
"""
if 'POETRY_PYPI_TOKEN_PYPI' not in os.environ:
session.error('you must specify your pypi token api to deploy your package')
session.install('poetry>=1.0.0,<2.0.0')
session.run('poetry', 'publish', '--build')
@nox.session(python=False)
def clean(*_):
"""Since nox take a bit of memory, this command helps to clean nox environment."""
shutil.rmtree('.nox', ignore_errors=True)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
functions/infraservices/handler.go
|
package function
import (
"encoding/json"
"io/ioutil"
"log"
"os"
types "github.com/automium/types/go/gateway"
v1beta1 "github.com/automium/types/go/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
func getAPISecret(secretName string) (secretBytes []byte, err error) {
root := "/var/openfaas/secrets/"
// read from the openfaas secrets folder
secretBytes, err = ioutil.ReadFile(root + secretName)
return secretBytes, err
}
// Handle a serverless request
func Handle(req []byte) string {
key := os.Getenv("Http_x_api_key")
err := validateInput(key)
if err != nil {
log.Fatalf("[ERROR] Invalid input: %s", err.Error())
}
secretBytes, err := getAPISecret("KubeConfig")
if err != nil {
log.Fatal(err)
}
var inputData types.KubernetesConfig
err = json.Unmarshal(secretBytes, &inputData)
err = validateData(inputData)
if err != nil {
log.Fatalf("[ERROR] Invalid data: %s", err.Error())
}
config, err := clientcmd.RESTConfigFromKubeConfig([]byte(inputData.Kubeconfig))
if err != nil {
log.Fatalf("[ERROR] Cannot create configuration from provided kubeconfig: %s", err.Error())
}
client, err := createRESTClient(config)
if err != nil {
log.Fatalf("[ERROR] Cannot prepare the client: %s", err.Error())
}
result := v1beta1.ServiceList{}
err = client.Get().Resource("services").Do().Into(&result)
if err != nil {
log.Fatalf("[ERROR] Cannot retrieve services: %s", err.Error())
}
serviceListJSON, err := json.Marshal(result)
if err != nil {
log.Fatalf("[ERROR] Cannot marshal output: %s", err.Error())
}
return string(serviceListJSON)
}
func createRESTClient(config *rest.Config) (*rest.RESTClient, error) {
v1beta1.AddToScheme(scheme.Scheme)
crdConfig := *config
crdConfig.ContentConfig.GroupVersion = &schema.GroupVersion{Group: v1beta1.GroupName, Version: v1beta1.GroupVersion}
crdConfig.APIPath = "/apis"
crdConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
crdConfig.UserAgent = rest.DefaultKubernetesUserAgent()
rc, err := rest.UnversionedRESTClientFor(&crdConfig)
if err != nil {
return nil, err
}
return rc, nil
}
func validateInput(input string) error {
//log.Printf("request with %s key", input)
// TODO: validation
return nil
}
func validateData(input types.KubernetesConfig) error {
// TODO: validation
return nil
}
|
[
"\"Http_x_api_key\""
] |
[] |
[
"Http_x_api_key"
] |
[]
|
["Http_x_api_key"]
|
go
| 1 | 0 | |
win_windows.go
|
package win
import (
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime/debug"
"syscall"
"time"
"unicode/utf16"
"github.com/gonutz/w32/v2"
)
type WindowOptions struct {
X, Y int
Width, Height int
ClassName string
Title string
Cursor w32.HCURSOR
// ClassStyle should include w32.CS_OWNDC for OpenGL
ClassStyle uint32
WindowStyle uint
Background w32.HBRUSH
}
type MessageCallback func(window w32.HWND, msg uint32, w, l uintptr) uintptr
func DefaultOptions() WindowOptions {
return WindowOptions{
X: w32.CW_USEDEFAULT,
Y: w32.CW_USEDEFAULT,
Width: w32.CW_USEDEFAULT,
Height: w32.CW_USEDEFAULT,
ClassName: "window_class",
Title: "",
Cursor: w32.LoadCursor(0, w32.MakeIntResource(w32.IDC_ARROW)),
ClassStyle: 0,
WindowStyle: w32.WS_OVERLAPPEDWINDOW | w32.WS_VISIBLE,
Background: 0,
}
}
// NewWindow creates a window.
func NewWindow(opts WindowOptions, f MessageCallback) (w32.HWND, error) {
if opts.Width == 0 {
opts.Width = 640
}
if opts.Height == 0 {
opts.Height = 480
}
if opts.ClassName == "" {
opts.ClassName = "window_class"
}
if opts.Cursor == 0 {
opts.Cursor = w32.LoadCursor(0, w32.MakeIntResource(w32.IDC_ARROW))
}
if opts.WindowStyle == 0 {
opts.WindowStyle = w32.WS_OVERLAPPEDWINDOW
}
opts.WindowStyle |= w32.WS_VISIBLE
class := w32.WNDCLASSEX{
Background: opts.Background,
WndProc: syscall.NewCallback(f),
Cursor: opts.Cursor,
ClassName: syscall.StringToUTF16Ptr(opts.ClassName),
Style: opts.ClassStyle,
}
atom := w32.RegisterClassEx(&class)
if atom == 0 {
return 0, errors.New("win.NewWindow: RegisterClassEx failed")
}
window := w32.CreateWindowEx(
0,
syscall.StringToUTF16Ptr(opts.ClassName),
syscall.StringToUTF16Ptr(opts.Title),
opts.WindowStyle,
opts.X, opts.Y, opts.Width, opts.Height,
0, 0, 0, nil,
)
if window == 0 {
return 0, errors.New("win.NewWindow: CreateWindowEx failed")
}
return window, nil
}
// SetIconFromExe sets the icon in the window title bar, in the taskbar and when
// using Alt-Tab to switch between applications.
// The icon is loaded from the running executable file using the given resource
// ID. This means that the icon must be embedded in the executable when building
// by using a resource file for example.
func SetIconFromExe(window w32.HWND, resourceID uint16) {
iconHandle := w32.LoadImage(
w32.GetModuleHandle(""),
w32.MakeIntResource(resourceID),
w32.IMAGE_ICON,
0,
0,
w32.LR_DEFAULTSIZE|w32.LR_SHARED,
)
if iconHandle != 0 {
w32.SendMessage(window, w32.WM_SETICON, w32.ICON_SMALL, uintptr(iconHandle))
w32.SendMessage(window, w32.WM_SETICON, w32.ICON_SMALL2, uintptr(iconHandle))
w32.SendMessage(window, w32.WM_SETICON, w32.ICON_BIG, uintptr(iconHandle))
}
}
// IsFullscreen returns true if the window has a style different from
// WS_OVERLAPPEDWINDOW. The EnableFullscreen function will change the style to
// borderless so this reports whether that function was called on the window.
// It is not a universally valid test for any window to see if it is fullscreen.
// It is intended for use in conjunction with EnableFullscreen and
// DisableFullscreen.
func IsFullscreen(window w32.HWND) bool {
style := w32.GetWindowLong(window, w32.GWL_STYLE)
return style&w32.WS_OVERLAPPEDWINDOW == 0
}
// EnableFullscreen makes the window a borderless window that covers the full
// area of the monitor under the window.
// It returns the previous window placement. Store that value and use it with
// DisableFullscreen to reset the window to what it was before.
func EnableFullscreen(window w32.HWND) (windowed w32.WINDOWPLACEMENT) {
style := w32.GetWindowLong(window, w32.GWL_STYLE)
var monitorInfo w32.MONITORINFO
monitor := w32.MonitorFromWindow(window, w32.MONITOR_DEFAULTTOPRIMARY)
if w32.GetWindowPlacement(window, &windowed) &&
w32.GetMonitorInfo(monitor, &monitorInfo) {
w32.SetWindowLong(
window,
w32.GWL_STYLE,
style & ^w32.WS_OVERLAPPEDWINDOW,
)
w32.SetWindowPos(
window,
0,
int(monitorInfo.RcMonitor.Left),
int(monitorInfo.RcMonitor.Top),
int(monitorInfo.RcMonitor.Right-monitorInfo.RcMonitor.Left),
int(monitorInfo.RcMonitor.Bottom-monitorInfo.RcMonitor.Top),
w32.SWP_NOOWNERZORDER|w32.SWP_FRAMECHANGED,
)
}
w32.ShowCursor(false)
return
}
// DisableFullscreen makes the window have a border and the standard icons
// (style WS_OVERLAPPEDWINDOW) and places it at the position given by the window
// placement parameter.
// Use this in conjunction with IsFullscreen and EnableFullscreen to toggle a
// window's fullscreen state.
func DisableFullscreen(window w32.HWND, placement w32.WINDOWPLACEMENT) {
style := w32.GetWindowLong(window, w32.GWL_STYLE)
w32.SetWindowLong(
window,
w32.GWL_STYLE,
style|w32.WS_OVERLAPPEDWINDOW,
)
w32.SetWindowPlacement(window, &placement)
w32.SetWindowPos(window, 0, 0, 0, 0, 0,
w32.SWP_NOMOVE|w32.SWP_NOSIZE|w32.SWP_NOZORDER|
w32.SWP_NOOWNERZORDER|w32.SWP_FRAMECHANGED,
)
w32.ShowCursor(true)
}
// RunMainLoop starts the applications window message handling. It loops until
// the window is closed. Messages are forwarded to the handler function that was
// passed to NewWindow.
func RunMainLoop() {
var msg w32.MSG
for w32.GetMessage(&msg, 0, 0, 0) != 0 {
w32.TranslateMessage(&msg)
w32.DispatchMessage(&msg)
}
}
// RunMainGameLoop starts the application's window message handling. It loops
// until the window is closed. Messages are forwarded to the handler function
// that was passed to NewWindow.
// In contrast to RunMainLoop, RunMainGameLoop calls the given function whenever
// there are now messages to be handled at the moment. You can use this like a
// classical DOS era endless loop to run any real-time logic in between
// messages.
// Tip: if you do not want the game to use all your CPU, do some kind of
// blocking operation in the function you pass. A simple time.Sleep(0) will do
// the trick.
func RunMainGameLoop(f func()) {
var msg w32.MSG
w32.PeekMessage(&msg, 0, 0, 0, w32.PM_NOREMOVE)
for msg.Message != w32.WM_QUIT {
if w32.PeekMessage(&msg, 0, 0, 0, w32.PM_REMOVE) {
w32.TranslateMessage(&msg)
w32.DispatchMessage(&msg)
} else {
f()
}
}
}
// CloseWindow sends a WM_CLOSE event to the given window.
func CloseWindow(window w32.HWND) {
w32.SendMessage(window, w32.WM_CLOSE, 0, 0)
}
// HideConsoleWindow hides the associated console window if it was created
// because the ldflag H=windowsgui was not provided when building.
func HideConsoleWindow() {
console := w32.GetConsoleWindow()
if console == 0 {
return // no console attached
}
// If this application is the process that created the console window, then
// this program was not compiled with the -H=windowsgui flag and on start-up
// it created a console along with the main application window. In this case
// hide the console window.
// See
// http://stackoverflow.com/questions/9009333/how-to-check-if-the-program-is-run-from-a-console
// and thanks to
// https://github.com/hajimehoshi
// for the tip.
_, consoleProcID := w32.GetWindowThreadProcessId(console)
if w32.GetCurrentProcessId() == consoleProcID {
w32.ShowWindowAsync(console, w32.SW_HIDE)
}
}
// HandlePanics is designed to be deferred as the first statement in an
// application's main function. It calls recover to catch unhandled panics. The
// current stack is output to standard output, to a file in the user's APPDATA
// folder (which is then opened with the default .txt editor) and to a message
// box that is shown to the user.
// The id is used in the log file name.
func HandlePanics(id string) {
if err := recover(); err != nil {
// in case of a panic, create a message with the current stack
msg := fmt.Sprintf("panic: %v\nstack:\n\n%s\n", err, debug.Stack())
// print it to stdout
fmt.Println(msg)
// write it to a log file
filename := filepath.Join(
os.Getenv("APPDATA"),
id+"_panic_log_"+time.Now().Format("2006_01_02__15_04_05")+".txt",
)
ioutil.WriteFile(filename, []byte(msg), 0777)
// open the log file with the default text viewer
exec.Command("cmd", "/C", filename).Start()
// pop up a message box
w32.MessageBox(
0,
msg,
"The program crashed",
w32.MB_OK|w32.MB_ICONERROR|w32.MB_TOPMOST,
)
}
}
// Callback can be used as the callback function for a window. It will translate
// common messages into nice function calls. No need to handle generic W and L
// parameters yourself.
func (m *MessageHandler) Callback(window w32.HWND, msg uint32, w, l uintptr) uintptr {
if msg == w32.WM_TIMER && m.OnTimer != nil {
m.OnTimer(w)
return 0
} else if msg == w32.WM_KEYDOWN && m.OnKeyDown != nil {
m.OnKeyDown(w, KeyOptions(l))
return 0
} else if msg == w32.WM_KEYUP && m.OnKeyUp != nil {
m.OnKeyDown(w, KeyOptions(l))
return 0
} else if msg == w32.WM_CHAR && m.OnChar != nil {
r := utf16.Decode([]uint16{uint16(w)})[0]
m.OnChar(r)
return 0
} else if msg == w32.WM_MOUSEMOVE && m.OnMouseMove != nil {
x := int((uint(l)) & 0xFFFF)
y := int((uint(l) >> 16) & 0xFFFF)
m.OnMouseMove(x, y, MouseOptions(w))
return 0
} else if msg == w32.WM_SIZE && m.OnSize != nil {
w := int((uint(l)) & 0xFFFF)
h := int((uint(l) >> 16) & 0xFFFF)
m.OnSize(w, h)
return 0
} else if msg == w32.WM_MOVE && m.OnMove != nil {
x := int((uint(l)) & 0xFFFF)
y := int((uint(l) >> 16) & 0xFFFF)
m.OnMove(x, y)
return 0
} else if msg == w32.WM_ACTIVATE && m.OnActivate != nil {
if w != 0 && m.OnActivate != nil {
m.OnActivate()
}
if w == 0 && m.OnDeactivate != nil {
m.OnDeactivate()
}
return 0
} else if msg == w32.WM_LBUTTONDOWN && m.OnLeftMouseDown != nil {
m.OnLeftMouseDown(mouseX(l), mouseY(l), MouseOptions(w))
return 0
} else if msg == w32.WM_RBUTTONDOWN && m.OnRightMouseDown != nil {
m.OnRightMouseDown(mouseX(l), mouseY(l), MouseOptions(w))
return 0
} else if msg == w32.WM_MBUTTONDOWN && m.OnMiddleMouseDown != nil {
m.OnMiddleMouseDown(mouseX(l), mouseY(l), MouseOptions(w))
return 0
} else if msg == w32.WM_LBUTTONUP && m.OnLeftMouseUp != nil {
m.OnLeftMouseUp(mouseX(l), mouseY(l), MouseOptions(w))
return 0
} else if msg == w32.WM_RBUTTONUP && m.OnRightMouseUp != nil {
m.OnRightMouseUp(mouseX(l), mouseY(l), MouseOptions(w))
return 0
} else if msg == w32.WM_MBUTTONUP && m.OnMiddleMouseUp != nil {
m.OnMiddleMouseUp(mouseX(l), mouseY(l), MouseOptions(w))
return 0
} else if msg == w32.WM_MOUSEWHEEL && m.OnMouseWheel != nil {
delta := float32(int16((w>>16)&0xFFFF)) / 120.0
m.OnMouseWheel(delta, mouseX(l), mouseY(l), MouseOptions(w&0xFFFF))
return 0
} else if msg == w32.WM_DESTROY {
w32.PostQuitMessage(0)
return 0
} else if m.OnOther != nil {
if !m.OnOther(msg, w, l) {
return w32.DefWindowProc(window, msg, w, l)
}
return 0
} else {
return w32.DefWindowProc(window, msg, w, l)
}
}
func mouseX(l uintptr) int {
return int(int16(l & 0xFFFF))
}
func mouseY(l uintptr) int {
return int(int16((l >> 16) & 0xFFFF))
}
// MessageHandler translates common Windows messages for you instead of
// providing generic W and L parameters. Set the handlers that you want and
// leave the rest at nil. Use the MessageHandler's Callback function as the
// callback for a window.
type MessageHandler struct {
OnKeyDown func(key uintptr, options KeyOptions)
OnKeyUp func(key uintptr, options KeyOptions)
OnMouseMove func(x, y int, options MouseOptions)
OnMouseWheel func(forward float32, x, y int, options MouseOptions)
OnLeftMouseDown func(x, y int, options MouseOptions)
OnRightMouseDown func(x, y int, options MouseOptions)
OnMiddleMouseDown func(x, y int, options MouseOptions)
OnLeftMouseUp func(x, y int, options MouseOptions)
OnRightMouseUp func(x, y int, options MouseOptions)
OnMiddleMouseUp func(x, y int, options MouseOptions)
OnChar func(r rune)
OnSize func(width, height int)
OnMove func(x, y int)
OnActivate func()
OnDeactivate func()
OnTimer func(id uintptr)
OnOther func(msg uint32, w, l uintptr) (handled bool)
}
type KeyOptions uintptr
func (o KeyOptions) RepeatCount() int {
return int(o & 0xFFFF)
}
func (o KeyOptions) ScanCode() int {
return int((o >> 16) & 0xFF)
}
func (o KeyOptions) IsExtended() bool {
return o&(1<<24) != 0
}
func (o KeyOptions) WasDown() bool {
return o&(1<<30) != 0
}
type MouseOptions uintptr
func (o MouseOptions) ControlDown() bool {
return o&w32.MK_CONTROL != 0
}
func (o MouseOptions) LButtonDown() bool {
return o&w32.MK_LBUTTON != 0
}
func (o MouseOptions) MButtonDown() bool {
return o&w32.MK_MBUTTON != 0
}
func (o MouseOptions) RButtonDown() bool {
return o&w32.MK_RBUTTON != 0
}
func (o MouseOptions) ShiftDown() bool {
return o&w32.MK_SHIFT != 0
}
func (o MouseOptions) XButton1Down() bool {
return o&w32.MK_XBUTTON1 != 0
}
func (o MouseOptions) XButton2Down() bool {
return o&w32.MK_XBUTTON2 != 0
}
func ClientSize(window w32.HWND) (w, h int) {
r := w32.GetClientRect(window)
if r == nil {
return 0, 0
}
return int(r.Width()), int(r.Height())
}
|
[
"\"APPDATA\""
] |
[] |
[
"APPDATA"
] |
[]
|
["APPDATA"]
|
go
| 1 | 0 | |
integration/cmd_test.go
|
package integration_test
import (
"bytes"
"context"
"database/sql"
"flag"
"fmt"
"io"
"math/rand"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"time"
"github.com/benbjohnson/litestream/internal"
"github.com/benbjohnson/litestream/internal/testingutil"
_ "github.com/mattn/go-sqlite3"
)
var longRunningDuration = flag.Duration("long-running-duration", 0, "")
func init() {
fmt.Fprintln(os.Stderr, "# ")
fmt.Fprintln(os.Stderr, "# NOTE: Build litestream to your PATH before running integration tests")
fmt.Fprintln(os.Stderr, "#")
fmt.Fprintln(os.Stderr, "")
}
// Ensure the default configuration works with light database load.
func TestCmd_Replicate_OK(t *testing.T) {
ctx := context.Background()
testDir, tempDir := filepath.Join("testdata", "replicate", "ok"), t.TempDir()
env := []string{"LITESTREAM_TEMPDIR=" + tempDir}
cmd, stdout, _ := commandContext(ctx, env, "replicate", "-config", filepath.Join(testDir, "litestream.yml"))
if err := cmd.Start(); err != nil {
t.Fatal(err)
}
db, err := sql.Open("sqlite3", filepath.Join(tempDir, "db"))
if err != nil {
t.Fatal(err)
} else if _, err := db.ExecContext(ctx, `PRAGMA journal_mode = wal`); err != nil {
t.Fatal(err)
} else if _, err := db.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil {
t.Fatal(err)
}
defer db.Close()
// Execute writes periodically.
for i := 0; i < 100; i++ {
t.Logf("[exec] INSERT INTO t (id) VALUES (%d)", i)
if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (?)`, i); err != nil {
t.Fatal(err)
}
time.Sleep(10 * time.Millisecond)
}
// Stop & wait for Litestream command.
killLitestreamCmd(t, cmd, stdout)
// Ensure signal and shutdown are logged.
if s := stdout.String(); !strings.Contains(s, `signal received, litestream shutting down`) {
t.Fatal("missing log output for signal received")
} else if s := stdout.String(); !strings.Contains(s, `litestream shut down`) {
t.Fatal("missing log output for shut down")
}
// Checkpoint & verify original SQLite database.
if _, err := db.ExecContext(ctx, `PRAGMA wal_checkpoint(TRUNCATE)`); err != nil {
t.Fatal(err)
}
restoreAndVerify(t, ctx, env, filepath.Join(testDir, "litestream.yml"), filepath.Join(tempDir, "db"))
}
// Ensure that stopping and restarting Litestream before an application-induced
// checkpoint will cause Litestream to continue replicating using the same generation.
func TestCmd_Replicate_ResumeWithCurrentGeneration(t *testing.T) {
ctx := context.Background()
testDir, tempDir := filepath.Join("testdata", "replicate", "resume-with-current-generation"), t.TempDir()
env := []string{"LITESTREAM_TEMPDIR=" + tempDir}
cmd, stdout, _ := commandContext(ctx, env, "replicate", "-config", filepath.Join(testDir, "litestream.yml"))
if err := cmd.Start(); err != nil {
t.Fatal(err)
}
t.Log("writing to database during replication")
db, err := sql.Open("sqlite3", filepath.Join(tempDir, "db"))
if err != nil {
t.Fatal(err)
} else if _, err := db.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil {
t.Fatal(err)
}
defer db.Close()
// Execute a few writes to populate the WAL.
if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (1)`); err != nil {
t.Fatal(err)
} else if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (2)`); err != nil {
t.Fatal(err)
}
// Wait for replication to occur & shutdown.
waitForLogMessage(t, stdout, `wal segment written`)
killLitestreamCmd(t, cmd, stdout)
t.Log("replication shutdown, continuing database writes")
// Execute a few more writes while replication is stopped.
if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (3)`); err != nil {
t.Fatal(err)
} else if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (4)`); err != nil {
t.Fatal(err)
}
t.Log("restarting replication")
cmd, stdout, _ = commandContext(ctx, env, "replicate", "-config", filepath.Join(testDir, "litestream.yml"))
if err := cmd.Start(); err != nil {
t.Fatal(err)
}
waitForLogMessage(t, stdout, `wal segment written`)
killLitestreamCmd(t, cmd, stdout)
t.Log("replication shutdown again")
// Litestream should resume replication from the previous generation.
if s := stdout.String(); strings.Contains(s, "no generation exists") {
t.Fatal("expected existing generation to resume; started new generation instead")
}
// Checkpoint & verify original SQLite database.
if _, err := db.ExecContext(ctx, `PRAGMA wal_checkpoint(TRUNCATE)`); err != nil {
t.Fatal(err)
}
restoreAndVerify(t, ctx, env, filepath.Join(testDir, "litestream.yml"), filepath.Join(tempDir, "db"))
}
// Ensure that restarting Litestream after a full checkpoint has occurred will
// cause it to begin a new generation.
func TestCmd_Replicate_ResumeWithNewGeneration(t *testing.T) {
ctx := context.Background()
testDir, tempDir := filepath.Join("testdata", "replicate", "resume-with-new-generation"), t.TempDir()
env := []string{"LITESTREAM_TEMPDIR=" + tempDir}
cmd, stdout, _ := commandContext(ctx, env, "replicate", "-config", filepath.Join(testDir, "litestream.yml"))
if err := cmd.Start(); err != nil {
t.Fatal(err)
}
t.Log("writing to database during replication")
db, err := sql.Open("sqlite3", filepath.Join(tempDir, "db"))
if err != nil {
t.Fatal(err)
} else if _, err := db.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil {
t.Fatal(err)
}
defer db.Close()
// Execute a few writes to populate the WAL.
if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (1)`); err != nil {
t.Fatal(err)
} else if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (2)`); err != nil {
t.Fatal(err)
}
// Wait for replication to occur & shutdown.
waitForLogMessage(t, stdout, `wal segment written`)
killLitestreamCmd(t, cmd, stdout)
t.Log("replication shutdown, continuing database writes")
// Execute a few more writes while replication is stopped.
if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (3)`); err != nil {
t.Fatal(err)
} else if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (4)`); err != nil {
t.Fatal(err)
}
t.Log("issuing checkpoint")
// Issue a checkpoint to restart WAL.
if _, err := db.ExecContext(ctx, `PRAGMA wal_checkpoint(RESTART)`); err != nil {
t.Fatal(err)
} else if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (5)`); err != nil {
t.Fatal(err)
}
t.Log("restarting replication")
cmd, stdout, _ = commandContext(ctx, env, "replicate", "-config", filepath.Join(testDir, "litestream.yml"))
if err := cmd.Start(); err != nil {
t.Fatal(err)
}
waitForLogMessage(t, stdout, `wal segment written`)
killLitestreamCmd(t, cmd, stdout)
t.Log("replication shutdown again")
// Litestream should resume replication from the previous generation.
if s := stdout.String(); !strings.Contains(s, "no generation exists") {
t.Fatal("expected new generation to start; continued existing generation instead")
}
// Checkpoint & verify original SQLite database.
if _, err := db.ExecContext(ctx, `PRAGMA wal_checkpoint(TRUNCATE)`); err != nil {
t.Fatal(err)
}
restoreAndVerify(t, ctx, env, filepath.Join(testDir, "litestream.yml"), filepath.Join(tempDir, "db"))
}
// Ensure the monitor interval can be turned off.
func TestCmd_Replicate_NoMonitorDelayInterval(t *testing.T) {
ctx := context.Background()
testDir, tempDir := filepath.Join("testdata", "replicate", "no-monitor-delay-interval"), t.TempDir()
env := []string{"LITESTREAM_TEMPDIR=" + tempDir}
cmd, stdout, _ := commandContext(ctx, env, "replicate", "-config", filepath.Join(testDir, "litestream.yml"))
if err := cmd.Start(); err != nil {
t.Fatal(err)
}
db, err := sql.Open("sqlite3", filepath.Join(tempDir, "db"))
if err != nil {
t.Fatal(err)
} else if _, err := db.ExecContext(ctx, `PRAGMA journal_mode = wal`); err != nil {
t.Fatal(err)
} else if _, err := db.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil {
t.Fatal(err)
}
defer db.Close()
time.Sleep(1 * time.Second)
// Execute writes periodically.
for i := 0; i < 10; i++ {
t.Logf("[exec] INSERT INTO t (id) VALUES (%d)", i)
if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (?)`, i); err != nil {
t.Fatal(err)
}
time.Sleep(100 * time.Millisecond)
}
// Stop & wait for Litestream command.
killLitestreamCmd(t, cmd, stdout)
// Ensure signal and shutdown are logged.
if s := stdout.String(); !strings.Contains(s, `signal received, litestream shutting down`) {
t.Fatal("missing log output for signal received")
} else if s := stdout.String(); !strings.Contains(s, `litestream shut down`) {
t.Fatal("missing log output for shut down")
}
// Checkpoint & verify original SQLite database.
if _, err := db.ExecContext(ctx, `PRAGMA wal_checkpoint(TRUNCATE)`); err != nil {
t.Fatal(err)
}
restoreAndVerify(t, ctx, env, filepath.Join(testDir, "litestream.yml"), filepath.Join(tempDir, "db"))
}
// Ensure the default configuration works with heavy write load.
func TestCmd_Replicate_HighLoad(t *testing.T) {
if testing.Short() {
t.Skip("short mode enabled, skipping")
} else if os.Getenv("CI") != "" {
t.Skip("ci, skipping")
}
const writeDuration = 30 * time.Second
ctx := context.Background()
testDir, tempDir := filepath.Join("testdata", "replicate", "high-load"), t.TempDir()
env := []string{"LITESTREAM_TEMPDIR=" + tempDir}
cmd, stdout, _ := commandContext(ctx, env, "replicate", "-config", filepath.Join(testDir, "litestream.yml"))
if err := cmd.Start(); err != nil {
t.Fatal(err)
}
db, err := sql.Open("sqlite3", filepath.Join(tempDir, "db"))
if err != nil {
t.Fatal(err)
} else if _, err := db.ExecContext(ctx, `PRAGMA journal_mode = WAL`); err != nil {
t.Fatal(err)
} else if _, err := db.ExecContext(ctx, `PRAGMA synchronous = NORMAL`); err != nil {
t.Fatal(err)
} else if _, err := db.ExecContext(ctx, `PRAGMA wal_autocheckpoint = 0`); err != nil {
t.Fatal(err)
} else if _, err := db.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil {
t.Fatal(err)
}
defer db.Close()
// Execute writes as fast as possible for a period of time.
timer := time.NewTimer(writeDuration)
defer timer.Stop()
t.Logf("executing writes for %s", writeDuration)
LOOP:
for i := 0; ; i++ {
select {
case <-timer.C:
break LOOP
default:
if i%1000 == 0 {
t.Logf("[exec] INSERT INTO t (id) VALUES (%d)", i)
}
if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (?)`, i); err != nil {
t.Fatal(err)
}
}
}
t.Logf("writes complete, shutting down")
// Stop & wait for Litestream command.
time.Sleep(5 * time.Second)
killLitestreamCmd(t, cmd, stdout)
// Checkpoint & verify original SQLite database.
if _, err := db.ExecContext(ctx, `PRAGMA wal_checkpoint(TRUNCATE)`); err != nil {
t.Fatal(err)
}
restoreAndVerify(t, ctx, env, filepath.Join(testDir, "litestream.yml"), filepath.Join(tempDir, "db"))
}
// Ensure replication works for an extended period.
func TestCmd_Replicate_LongRunning(t *testing.T) {
if *longRunningDuration == 0 {
t.Skip("long running test duration not specified, skipping")
}
ctx := context.Background()
testDir, tempDir := filepath.Join("testdata", "replicate", "long-running"), t.TempDir()
env := []string{"LITESTREAM_TEMPDIR=" + tempDir}
cmd, stdout, _ := commandContext(ctx, env, "replicate", "-config", filepath.Join(testDir, "litestream.yml"))
if err := cmd.Start(); err != nil {
t.Fatal(err)
}
db, err := sql.Open("sqlite3", filepath.Join(tempDir, "db"))
if err != nil {
t.Fatal(err)
} else if _, err := db.ExecContext(ctx, `PRAGMA journal_mode = WAL`); err != nil {
t.Fatal(err)
} else if _, err := db.ExecContext(ctx, `PRAGMA synchronous = NORMAL`); err != nil {
t.Fatal(err)
} else if _, err := db.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil {
t.Fatal(err)
}
defer db.Close()
// Execute writes as fast as possible for a period of time.
timer := time.NewTimer(*longRunningDuration)
defer timer.Stop()
t.Logf("executing writes for %s", longRunningDuration)
LOOP:
for i := 0; ; i++ {
select {
case <-timer.C:
break LOOP
default:
t.Logf("[exec] INSERT INTO t (id) VALUES (%d)", i)
if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (?)`, i); err != nil {
t.Fatal(err)
}
time.Sleep(time.Duration(rand.Intn(int(time.Second))))
}
}
t.Logf("writes complete, shutting down")
// Stop & wait for Litestream command.
killLitestreamCmd(t, cmd, stdout)
// Checkpoint & verify original SQLite database.
if _, err := db.ExecContext(ctx, `PRAGMA wal_checkpoint(TRUNCATE)`); err != nil {
t.Fatal(err)
}
restoreAndVerify(t, ctx, env, filepath.Join(testDir, "litestream.yml"), filepath.Join(tempDir, "db"))
}
// Ensure a database can be replicated over HTTP.
func TestCmd_Replicate_HTTP(t *testing.T) {
ctx := context.Background()
testDir, tempDir := filepath.Join("testdata", "replicate", "http"), t.TempDir()
if err := os.Mkdir(filepath.Join(tempDir, "0"), 0777); err != nil {
t.Fatal(err)
} else if err := os.Mkdir(filepath.Join(tempDir, "1"), 0777); err != nil {
t.Fatal(err)
}
env0 := []string{"LITESTREAM_TEMPDIR=" + tempDir}
env1 := []string{"LITESTREAM_TEMPDIR=" + tempDir, "LITESTREAM_UPSTREAM_URL=http://localhost:10001"}
cmd0, stdout0, _ := commandContext(ctx, env0, "replicate", "-config", filepath.Join(testDir, "litestream.0.yml"))
if err := cmd0.Start(); err != nil {
t.Fatal(err)
}
cmd1, stdout1, _ := commandContext(ctx, env1, "replicate", "-config", filepath.Join(testDir, "litestream.1.yml"))
if err := cmd1.Start(); err != nil {
t.Fatal(err)
}
db0, err := sql.Open("sqlite3", filepath.Join(tempDir, "0", "db"))
if err != nil {
t.Fatal(err)
} else if _, err := db0.ExecContext(ctx, `PRAGMA journal_mode = wal`); err != nil {
t.Fatal(err)
} else if _, err := db0.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil {
t.Fatal(err)
}
defer db0.Close()
// Execute writes periodically.
for i := 0; i < 100; i++ {
t.Logf("[exec] INSERT INTO t (id) VALUES (%d)", i)
if _, err := db0.ExecContext(ctx, `INSERT INTO t (id) VALUES (?)`, i); err != nil {
t.Fatal(err)
}
time.Sleep(100 * time.Millisecond)
}
// Wait for replica to catch up.
time.Sleep(1 * time.Second)
// Verify count in replica table.
db1, err := sql.Open("sqlite3", filepath.Join(tempDir, "1", "db"))
if err != nil {
t.Fatal(err)
}
defer db1.Close()
var n int
if err := db1.QueryRowContext(ctx, `SELECT COUNT(*) FROM t`).Scan(&n); err != nil {
t.Fatal(err)
} else if got, want := n, 100; got != want {
t.Fatalf("replica count=%d, want %d", got, want)
}
// Stop & wait for Litestream command.
killLitestreamCmd(t, cmd1, stdout1) // kill
killLitestreamCmd(t, cmd0, stdout0)
}
// Ensure a database can recover when disconnected from HTTP.
func TestCmd_Replicate_HTTP_PartialRecovery(t *testing.T) {
ctx := context.Background()
testDir, tempDir := filepath.Join("testdata", "replicate", "http-partial-recovery"), t.TempDir()
if err := os.Mkdir(filepath.Join(tempDir, "0"), 0777); err != nil {
t.Fatal(err)
} else if err := os.Mkdir(filepath.Join(tempDir, "1"), 0777); err != nil {
t.Fatal(err)
}
env0 := []string{"LITESTREAM_TEMPDIR=" + tempDir}
env1 := []string{"LITESTREAM_TEMPDIR=" + tempDir, "LITESTREAM_UPSTREAM_URL=http://localhost:10002"}
cmd0, stdout0, _ := commandContext(ctx, env0, "replicate", "-config", filepath.Join(testDir, "litestream.0.yml"))
if err := cmd0.Start(); err != nil {
t.Fatal(err)
}
cmd1, stdout1, _ := commandContext(ctx, env1, "replicate", "-config", filepath.Join(testDir, "litestream.1.yml"))
if err := cmd1.Start(); err != nil {
t.Fatal(err)
}
db0, err := sql.Open("sqlite3", filepath.Join(tempDir, "0", "db"))
if err != nil {
t.Fatal(err)
} else if _, err := db0.ExecContext(ctx, `PRAGMA journal_mode = wal`); err != nil {
t.Fatal(err)
} else if _, err := db0.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil {
t.Fatal(err)
}
defer db0.Close()
var index int
insertAndWait := func() {
index++
t.Logf("[exec] INSERT INTO t (id) VALUES (%d)", index)
if _, err := db0.ExecContext(ctx, `INSERT INTO t (id) VALUES (?)`, index); err != nil {
t.Fatal(err)
}
time.Sleep(100 * time.Millisecond)
}
// Execute writes periodically.
for i := 0; i < 50; i++ {
insertAndWait()
}
// Kill the replica.
t.Logf("Killing replica...")
killLitestreamCmd(t, cmd1, stdout1)
t.Logf("Replica killed")
// Keep writing.
for i := 0; i < 25; i++ {
insertAndWait()
}
// Restart replica.
t.Logf("Restarting replica...")
cmd1, stdout1, _ = commandContext(ctx, env1, "replicate", "-config", filepath.Join(testDir, "litestream.1.yml"))
if err := cmd1.Start(); err != nil {
t.Fatal(err)
}
t.Logf("Replica restarted")
// Continue writing...
for i := 0; i < 25; i++ {
insertAndWait()
}
// Wait for replica to catch up.
time.Sleep(1 * time.Second)
// Verify count in replica table.
db1, err := sql.Open("sqlite3", filepath.Join(tempDir, "1", "db"))
if err != nil {
t.Fatal(err)
}
defer db1.Close()
var n int
if err := db1.QueryRowContext(ctx, `SELECT COUNT(*) FROM t`).Scan(&n); err != nil {
t.Fatal(err)
} else if got, want := n, 100; got != want {
t.Fatalf("replica count=%d, want %d", got, want)
}
// Stop & wait for Litestream command.
killLitestreamCmd(t, cmd1, stdout1) // kill
killLitestreamCmd(t, cmd0, stdout0)
}
// Ensure a database can recover when disconnected from HTTP but when last index
// is no longer available.
func TestCmd_Replicate_HTTP_FullRecovery(t *testing.T) {
ctx := context.Background()
testDir, tempDir := filepath.Join("testdata", "replicate", "http-full-recovery"), t.TempDir()
if err := os.Mkdir(filepath.Join(tempDir, "0"), 0777); err != nil {
t.Fatal(err)
} else if err := os.Mkdir(filepath.Join(tempDir, "1"), 0777); err != nil {
t.Fatal(err)
}
env0 := []string{"LITESTREAM_TEMPDIR=" + tempDir}
env1 := []string{"LITESTREAM_TEMPDIR=" + tempDir, "LITESTREAM_UPSTREAM_URL=http://localhost:10002"}
cmd0, stdout0, _ := commandContext(ctx, env0, "replicate", "-config", filepath.Join(testDir, "litestream.0.yml"))
if err := cmd0.Start(); err != nil {
t.Fatal(err)
}
cmd1, stdout1, _ := commandContext(ctx, env1, "replicate", "-config", filepath.Join(testDir, "litestream.1.yml"))
if err := cmd1.Start(); err != nil {
t.Fatal(err)
}
db0, err := sql.Open("sqlite3", filepath.Join(tempDir, "0", "db"))
if err != nil {
t.Fatal(err)
} else if _, err := db0.ExecContext(ctx, `PRAGMA journal_mode = wal`); err != nil {
t.Fatal(err)
} else if _, err := db0.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil {
t.Fatal(err)
}
defer db0.Close()
var index int
insertAndWait := func() {
index++
t.Logf("[exec] INSERT INTO t (id) VALUES (%d)", index)
if _, err := db0.ExecContext(ctx, `INSERT INTO t (id) VALUES (?)`, index); err != nil {
t.Fatal(err)
}
time.Sleep(100 * time.Millisecond)
}
// Execute writes periodically.
for i := 0; i < 50; i++ {
insertAndWait()
}
// Kill the replica.
t.Logf("Killing replica...")
killLitestreamCmd(t, cmd1, stdout1)
t.Logf("Replica killed")
// Keep writing.
for i := 0; i < 25; i++ {
insertAndWait()
}
// Restart replica.
t.Logf("Restarting replica...")
cmd1, stdout1, _ = commandContext(ctx, env1, "replicate", "-config", filepath.Join(testDir, "litestream.1.yml"))
if err := cmd1.Start(); err != nil {
t.Fatal(err)
}
t.Logf("Replica restarted")
// Continue writing...
for i := 0; i < 25; i++ {
insertAndWait()
}
// Wait for replica to catch up.
time.Sleep(1 * time.Second)
// Verify count in replica table.
db1, err := sql.Open("sqlite3", filepath.Join(tempDir, "1", "db"))
if err != nil {
t.Fatal(err)
}
defer db1.Close()
var n int
if err := db1.QueryRowContext(ctx, `SELECT COUNT(*) FROM t`).Scan(&n); err != nil {
t.Fatal(err)
} else if got, want := n, 100; got != want {
t.Fatalf("replica count=%d, want %d", got, want)
}
// Stop & wait for Litestream command.
killLitestreamCmd(t, cmd1, stdout1) // kill
killLitestreamCmd(t, cmd0, stdout0)
}
// commandContext returns a "litestream" command with stdout/stderr buffers.
func commandContext(ctx context.Context, env []string, arg ...string) (cmd *exec.Cmd, stdout, stderr *internal.LockingBuffer) {
cmd = exec.CommandContext(ctx, "litestream", arg...)
cmd.Env = env
var outBuf, errBuf internal.LockingBuffer
// Split stdout/stderr to terminal if verbose flag set.
cmd.Stdout, cmd.Stderr = &outBuf, &errBuf
if testing.Verbose() {
cmd.Stdout = io.MultiWriter(&outBuf, os.Stdout)
cmd.Stderr = io.MultiWriter(&errBuf, os.Stderr)
}
return cmd, &outBuf, &errBuf
}
// waitForLogMessage continuously checks b for a message and returns when it occurs.
func waitForLogMessage(tb testing.TB, b *internal.LockingBuffer, msg string) {
timer := time.NewTimer(30 * time.Second)
defer timer.Stop()
ticker := time.NewTicker(10 * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-timer.C:
tb.Fatal("timed out waiting for cmd initialization")
case <-ticker.C:
if strings.Contains(b.String(), msg) {
return
}
}
}
}
// killLitestreamCmd interrupts the process and waits for a clean shutdown.
func killLitestreamCmd(tb testing.TB, cmd *exec.Cmd, stdout *internal.LockingBuffer) {
tb.Helper()
if err := cmd.Process.Signal(os.Interrupt); err != nil {
tb.Fatal("kill litestream: signal:", err)
} else if err := cmd.Wait(); err != nil {
tb.Fatal("kill litestream: cmd:", err)
}
}
// restoreAndVerify executes a "restore" and compares byte with the original database.
func restoreAndVerify(tb testing.TB, ctx context.Context, env []string, configPath, dbPath string) {
restorePath := filepath.Join(tb.TempDir(), "db")
// Restore database.
cmd, _, _ := commandContext(ctx, env, "restore", "-config", configPath, "-o", restorePath, dbPath)
if err := cmd.Run(); err != nil {
tb.Fatalf("error running 'restore' command: %s", err)
}
// Compare original database & restored database.
buf0 := testingutil.ReadFile(tb, dbPath)
buf1 := testingutil.ReadFile(tb, restorePath)
if bytes.Equal(buf0, buf1) {
return // ok, exit
}
// On mismatch, copy out original & restored DBs.
dir, err := os.MkdirTemp("", "litestream-*")
if err != nil {
tb.Fatal(err)
}
testingutil.CopyFile(tb, dbPath, filepath.Join(dir, "original.db"))
testingutil.CopyFile(tb, restorePath, filepath.Join(dir, "restored.db"))
tb.Fatalf("database mismatch; databases copied to %s", dir)
}
|
[
"\"CI\""
] |
[] |
[
"CI"
] |
[]
|
["CI"]
|
go
| 1 | 0 | |
client_resource_test.go
|
package sls
import (
"os"
"testing"
"github.com/stretchr/testify/suite"
)
func TestResource(t *testing.T) {
suite.Run(t, new(ResourceTestSuite))
}
type ResourceTestSuite struct {
suite.Suite
endpoint string
projectName string
logstoreName string
accessKeyID string
accessKeySecret string
client Client
resourceName string
}
func (s *ResourceTestSuite) SetupSuite() {
s.endpoint = os.Getenv("LOG_TEST_ENDPOINT")
s.projectName = os.Getenv("LOG_TEST_PROJECT")
s.logstoreName = os.Getenv("LOG_TEST_LOGSTORE")
s.accessKeyID = os.Getenv("LOG_TEST_ACCESS_KEY_ID")
s.accessKeySecret = os.Getenv("LOG_TEST_ACCESS_KEY_SECRET")
s.client.AccessKeyID = s.accessKeyID
s.client.AccessKeySecret = s.accessKeySecret
s.client.Endpoint = s.endpoint
s.resourceName = "user.test_resource_1"
}
func (s *ResourceTestSuite) TearDownSuite() {
}
func (s *ResourceTestSuite) createResource() error {
rs := &ResourceSchema{
Schema: []*ResourceSchemaItem{
&ResourceSchemaItem{
Column: "col1",
Desc: "col1 desc",
ExtInfo: map[string]string{},
Required: true,
Type: "string",
},
&ResourceSchemaItem{
Column: "col2",
Desc: "col2 desc",
ExtInfo: "optional",
Required: true,
Type: "string",
},
},
}
customResource := new(Resource)
customResource.Type = ResourceTypeUserDefine
customResource.Name = s.resourceName
customResource.Schema = rs.ToString()
customResource.Description = "user test resource 1 descc"
return s.client.CreateResource(customResource)
}
func (s *ResourceTestSuite) TestClient_CreateResource() {
err := s.createResource()
s.Require().Nil(err)
err = s.client.DeleteResource(s.resourceName)
s.Require().Nil(err)
}
func (s *ResourceTestSuite) TestClient_UpdateResource() {
err := s.createResource()
s.Require().Nil(err)
resource, err := s.client.GetResource(s.resourceName)
s.Require().Nil(err)
rs := new(ResourceSchema)
err = rs.FromJsonString(resource.Schema)
s.Require().Nil(err)
rs.Schema[0].Desc = "new desc"
resource.Schema = rs.ToString()
err = s.client.UpdateResource(resource)
s.Require().Nil(err)
resource, err = s.client.GetResource(s.resourceName)
s.Require().Nil(err)
nrs := new(ResourceSchema)
err = nrs.FromJsonString(resource.Schema)
s.Require().Nil(err)
s.Require().Equal("new desc", rs.Schema[0].Desc, "update resource failed")
err = s.client.DeleteResource(s.resourceName)
s.Require().Nil(err)
}
func (s *ResourceTestSuite) TestClient_DeleteResource() {
err := s.createResource()
s.Require().Nil(err)
_, err = s.client.GetResource(s.resourceName)
s.Require().Nil(err)
err = s.client.DeleteResource(s.resourceName)
s.Require().Nil(err)
_, err = s.client.GetResource(s.resourceName)
s.Require().NotNil(err)
}
func (s *ResourceTestSuite) TestClient_GetResource() {
err := s.createResource()
s.Require().Nil(err)
getResource, err := s.client.GetResource(s.resourceName)
s.Require().Nil(err)
s.Require().Equal(getResource.Name, s.resourceName)
rs := new(ResourceSchema)
err = rs.FromJsonString(getResource.Schema)
s.Require().Nil(err)
s.Require().Equal(len(rs.Schema), 2)
s.Require().Equal(rs.Schema[0].Desc, "col1 desc")
err = s.client.DeleteResource(s.resourceName)
s.Require().Nil(err)
}
func (s *ResourceTestSuite) TestClient_ListResource() {
err := s.createResource()
s.Require().Nil(err)
resources, count, total, err := s.client.ListResource(ResourceTypeUserDefine, s.resourceName, 0, 100)
s.Require().Nil(err)
if total != 1 || count != 1 {
s.Require().Fail("list resource failed")
}
s.Require().Equal(1, len(resources), "there should be only one resource")
resource := resources[0]
s.Require().Equal(s.resourceName, resource.Name, "list resource failed")
err = s.client.DeleteResource(s.resourceName)
s.Require().Nil(err)
}
|
[
"\"LOG_TEST_ENDPOINT\"",
"\"LOG_TEST_PROJECT\"",
"\"LOG_TEST_LOGSTORE\"",
"\"LOG_TEST_ACCESS_KEY_ID\"",
"\"LOG_TEST_ACCESS_KEY_SECRET\""
] |
[] |
[
"LOG_TEST_ACCESS_KEY_ID",
"LOG_TEST_ENDPOINT",
"LOG_TEST_ACCESS_KEY_SECRET",
"LOG_TEST_PROJECT",
"LOG_TEST_LOGSTORE"
] |
[]
|
["LOG_TEST_ACCESS_KEY_ID", "LOG_TEST_ENDPOINT", "LOG_TEST_ACCESS_KEY_SECRET", "LOG_TEST_PROJECT", "LOG_TEST_LOGSTORE"]
|
go
| 5 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
from pathlib import Path
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# smallbusiness directory.
current_path = Path(__file__).parent.resolve()
sys.path.append(str(current_path / "smallbusiness"))
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
terraform/providers/ibm/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex/structures.go
|
// Copyright IBM Corp. 2017, 2021 All Rights Reserved.
// Licensed under the Mozilla Public License v2.0
package flex
import (
"bytes"
b64 "encoding/base64"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"net/url"
"os"
"path"
"reflect"
"strconv"
"strings"
"time"
"github.com/IBM-Cloud/bluemix-go/bmxerror"
"github.com/IBM-Cloud/bluemix-go/models"
"github.com/IBM-Cloud/container-services-go-sdk/kubernetesserviceapiv1"
"github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns"
"github.com/IBM/go-sdk-core/v5/core"
"github.com/IBM/ibm-cos-sdk-go-config/resourceconfigurationv1"
"github.com/IBM/ibm-cos-sdk-go/service/s3"
kp "github.com/IBM/keyprotect-go-client"
"github.com/IBM/platform-services-go-sdk/globaltaggingv1"
"github.com/IBM/platform-services-go-sdk/iampolicymanagementv1"
rg "github.com/IBM/platform-services-go-sdk/resourcemanagerv2"
"github.com/apache/openwhisk-client-go/whisk"
"github.com/go-openapi/strfmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/softlayer/softlayer-go/datatypes"
"github.com/softlayer/softlayer-go/sl"
"github.com/IBM-Cloud/bluemix-go/api/container/containerv1"
"github.com/IBM-Cloud/bluemix-go/api/container/containerv2"
"github.com/IBM-Cloud/bluemix-go/api/icd/icdv4"
"github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2"
"github.com/IBM-Cloud/bluemix-go/api/schematics"
"github.com/IBM-Cloud/bluemix-go/api/usermanagement/usermanagementv2"
"github.com/IBM/platform-services-go-sdk/iamaccessgroupsv2"
"github.com/IBM/platform-services-go-sdk/iamidentityv1"
)
const (
prodBaseController = "https://cloud.ibm.com"
stageBaseController = "https://test.cloud.ibm.com"
//ResourceControllerURL ...
ResourceControllerURL = "resource_controller_url"
//ResourceName ...
ResourceName = "resource_name"
//ResourceCRN ...
ResourceCRN = "resource_crn"
//ResourceStatus ...
ResourceStatus = "resource_status"
//ResourceGroupName ...
ResourceGroupName = "resource_group_name"
//RelatedCRN ...
RelatedCRN = "related_crn"
SystemIBMLabelPrefix = "ibm-cloud.kubernetes.io/"
KubernetesLabelPrefix = "kubernetes.io/"
K8sLabelPrefix = "k8s.io/"
isLBListenerPolicyAction = "action"
isLBListenerPolicyTargetID = "target_id"
isLBListenerPolicyTargetURL = "target_url"
isLBListenerPolicyHTTPSRedirectStatusCode = "target_https_redirect_status_code"
isLBListenerPolicyHTTPSRedirectURI = "target_https_redirect_uri"
isLBListenerPolicyHTTPSRedirectListener = "target_https_redirect_listener"
isLBPoolSessPersistenceType = "session_persistence_type"
isLBPoolSessPersistenceAppCookieName = "session_persistence_app_cookie_name"
isLBProfile = "profile"
isLBRouteMode = "route_mode"
isLBType = "type"
)
//HashInt ...
func HashInt(v interface{}) int { return v.(int) }
func ExpandStringList(input []interface{}) []string {
vs := make([]string, len(input))
for i, v := range input {
vs[i] = v.(string)
}
return vs
}
func FlattenStringList(list []string) []interface{} {
vs := make([]interface{}, len(list))
for i, v := range list {
vs[i] = v
}
return vs
}
func ExpandIntList(input []interface{}) []int {
vs := make([]int, len(input))
for i, v := range input {
vs[i] = v.(int)
}
return vs
}
func FlattenIntList(list []int) []interface{} {
vs := make([]interface{}, len(list))
for i, v := range list {
vs[i] = v
}
return vs
}
func NewStringSet(f schema.SchemaSetFunc, in []string) *schema.Set {
var out = make([]interface{}, len(in), len(in))
for i, v := range in {
out[i] = v
}
return schema.NewSet(f, out)
}
func FlattenRoute(in []mccpv2.Route) *schema.Set {
vs := make([]string, len(in))
for i, v := range in {
vs[i] = v.GUID
}
return NewStringSet(schema.HashString, vs)
}
func stringSliceToSet(in []string) *schema.Set {
vs := make([]string, len(in))
for i, v := range in {
vs[i] = v
}
return NewStringSet(schema.HashString, vs)
}
func FlattenServiceBindings(in []mccpv2.ServiceBinding) *schema.Set {
vs := make([]string, len(in))
for i, v := range in {
vs[i] = v.ServiceInstanceGUID
}
return NewStringSet(schema.HashString, vs)
}
func flattenPort(in []int) *schema.Set {
var out = make([]interface{}, len(in))
for i, v := range in {
out[i] = v
}
return schema.NewSet(HashInt, out)
}
func FlattenFileStorageID(in []datatypes.Network_Storage) *schema.Set {
var out = []interface{}{}
for _, v := range in {
if *v.NasType == "NAS" {
out = append(out, *v.Id)
}
}
return schema.NewSet(HashInt, out)
}
func FlattenBlockStorageID(in []datatypes.Network_Storage) *schema.Set {
var out = []interface{}{}
for _, v := range in {
if *v.NasType == "ISCSI" {
out = append(out, *v.Id)
}
}
return schema.NewSet(HashInt, out)
}
func FlattenSSHKeyIDs(in []datatypes.Security_Ssh_Key) *schema.Set {
var out = []interface{}{}
for _, v := range in {
out = append(out, *v.Id)
}
return schema.NewSet(HashInt, out)
}
func FlattenSpaceRoleUsers(in []mccpv2.SpaceRole) *schema.Set {
var out = []interface{}{}
for _, v := range in {
out = append(out, v.UserName)
}
return schema.NewSet(schema.HashString, out)
}
func FlattenOrgRole(in []mccpv2.OrgRole, excludeUsername string) *schema.Set {
var out = []interface{}{}
for _, v := range in {
if excludeUsername == "" {
out = append(out, v.UserName)
} else {
if v.UserName != excludeUsername {
out = append(out, v.UserName)
}
}
}
return schema.NewSet(schema.HashString, out)
}
func flattenMapInterfaceVal(m map[string]interface{}) map[string]string {
out := make(map[string]string)
for k, v := range m {
out[k] = fmt.Sprintf("%v", v)
}
return out
}
func flattenCredentials(creds map[string]interface{}) map[string]string {
return flattenMapInterfaceVal(creds)
}
func flattenServiceKeyCredentials(creds map[string]interface{}) map[string]string {
return flattenCredentials(creds)
}
func FlattenServiceInstanceCredentials(keys []mccpv2.ServiceKeyFields) []interface{} {
var out = make([]interface{}, len(keys), len(keys))
for i, k := range keys {
m := make(map[string]interface{})
m["name"] = k.Entity.Name
m["credentials"] = Flatten(k.Entity.Credentials)
out[i] = m
}
return out
}
func FlattenUsersSet(userList *schema.Set) []string {
users := make([]string, 0)
for _, user := range userList.List() {
users = append(users, user.(string))
}
return users
}
func ExpandProtocols(configured []interface{}) ([]datatypes.Network_LBaaS_LoadBalancerProtocolConfiguration, error) {
protocols := make([]datatypes.Network_LBaaS_LoadBalancerProtocolConfiguration, 0, len(configured))
var lbMethodToId = make(map[string]string)
for _, lRaw := range configured {
data := lRaw.(map[string]interface{})
p := &datatypes.Network_LBaaS_LoadBalancerProtocolConfiguration{
FrontendProtocol: sl.String(data["frontend_protocol"].(string)),
BackendProtocol: sl.String(data["backend_protocol"].(string)),
FrontendPort: sl.Int(data["frontend_port"].(int)),
BackendPort: sl.Int(data["backend_port"].(int)),
}
if v, ok := data["session_stickiness"]; ok && v.(string) != "" {
p.SessionType = sl.String(v.(string))
}
if v, ok := data["max_conn"]; ok && v.(int) != 0 {
p.MaxConn = sl.Int(v.(int))
}
if v, ok := data["tls_certificate_id"]; ok && v.(int) != 0 {
p.TlsCertificateId = sl.Int(v.(int))
}
if v, ok := data["load_balancing_method"]; ok {
p.LoadBalancingMethod = sl.String(lbMethodToId[v.(string)])
}
if v, ok := data["protocol_id"]; ok && v.(string) != "" {
p.ListenerUuid = sl.String(v.(string))
}
var isValid bool
if p.TlsCertificateId != nil && *p.TlsCertificateId != 0 {
// validate the protocol is correct
if *p.FrontendProtocol == "HTTPS" {
isValid = true
}
} else {
isValid = true
}
if isValid {
protocols = append(protocols, *p)
} else {
return protocols, fmt.Errorf("tls_certificate_id may be set only when frontend protocol is 'HTTPS'")
}
}
return protocols, nil
}
func ExpandMembers(configured []interface{}) []datatypes.Network_LBaaS_LoadBalancerServerInstanceInfo {
members := make([]datatypes.Network_LBaaS_LoadBalancerServerInstanceInfo, 0, len(configured))
for _, lRaw := range configured {
data := lRaw.(map[string]interface{})
p := &datatypes.Network_LBaaS_LoadBalancerServerInstanceInfo{}
if v, ok := data["private_ip_address"]; ok && v.(string) != "" {
p.PrivateIpAddress = sl.String(v.(string))
}
if v, ok := data["weight"]; ok && v.(int) != 0 {
p.Weight = sl.Int(v.(int))
}
members = append(members, *p)
}
return members
}
func FlattenServerInstances(list []datatypes.Network_LBaaS_Member) []map[string]interface{} {
result := make([]map[string]interface{}, 0, len(list))
for _, i := range list {
l := map[string]interface{}{
"private_ip_address": *i.Address,
"member_id": *i.Uuid,
}
if i.Weight != nil {
l["weight"] = *i.Weight
}
result = append(result, l)
}
return result
}
func FlattenProtocols(list []datatypes.Network_LBaaS_Listener) []map[string]interface{} {
var lbIdToMethod = make(map[string]string)
result := make([]map[string]interface{}, 0, len(list))
for _, i := range list {
l := map[string]interface{}{
"frontend_protocol": *i.Protocol,
"frontend_port": *i.ProtocolPort,
"backend_protocol": *i.DefaultPool.Protocol,
"backend_port": *i.DefaultPool.ProtocolPort,
"load_balancing_method": lbIdToMethod[*i.DefaultPool.LoadBalancingAlgorithm],
"protocol_id": *i.Uuid,
}
if i.DefaultPool.SessionAffinity != nil && i.DefaultPool.SessionAffinity.Type != nil && *i.DefaultPool.SessionAffinity.Type != "" {
l["session_stickiness"] = *i.DefaultPool.SessionAffinity.Type
}
if i.ConnectionLimit != nil && *i.ConnectionLimit != 0 {
l["max_conn"] = *i.ConnectionLimit
}
if i.TlsCertificateId != nil && *i.TlsCertificateId != 0 {
l["tls_certificate_id"] = *i.TlsCertificateId
}
result = append(result, l)
}
return result
}
func FlattenVpcWorkerPools(list []containerv2.GetWorkerPoolResponse) []map[string]interface{} {
workerPools := make([]map[string]interface{}, len(list))
for i, workerPool := range list {
l := map[string]interface{}{
"id": workerPool.ID,
"name": workerPool.PoolName,
"flavor": workerPool.Flavor,
"worker_count": workerPool.WorkerCount,
"isolation": workerPool.Isolation,
"labels": workerPool.Labels,
"state": workerPool.Lifecycle.ActualState,
}
zones := workerPool.Zones
zonesConfig := make([]map[string]interface{}, len(zones))
for j, zone := range zones {
z := map[string]interface{}{
"zone": zone.ID,
"worker_count": zone.WorkerCount,
}
subnets := zone.Subnets
subnetConfig := make([]map[string]interface{}, len(subnets))
for k, subnet := range subnets {
s := map[string]interface{}{
"id": subnet.ID,
"primary": subnet.Primary,
}
subnetConfig[k] = s
}
z["subnets"] = subnetConfig
zonesConfig[j] = z
}
l["zones"] = zonesConfig
workerPools[i] = l
}
return workerPools
}
func flattenVpcZones(list []containerv2.ZoneResp) []map[string]interface{} {
zones := make([]map[string]interface{}, len(list))
for i, zone := range list {
l := map[string]interface{}{
"id": zone.ID,
"subnet_id": FlattenSubnets(zone.Subnets),
"worker_count": zone.WorkerCount,
}
zones[i] = l
}
return zones
}
func FlattenConditions(list []iamaccessgroupsv2.RuleConditions) []map[string]interface{} {
conditions := make([]map[string]interface{}, len(list))
for i, cond := range list {
l := map[string]interface{}{
"claim": cond.Claim,
"operator": cond.Operator,
"value": strings.ReplaceAll(*cond.Value, "\"", ""),
}
conditions[i] = l
}
return conditions
}
func FlattenAccessGroupRules(list *iamaccessgroupsv2.RulesList) []map[string]interface{} {
rules := make([]map[string]interface{}, len(list.Rules))
for i, item := range list.Rules {
l := map[string]interface{}{
"name": item.Name,
"expiration": item.Expiration,
"identity_provider": item.RealmName,
"conditions": FlattenConditions(item.Conditions),
}
rules[i] = l
}
return rules
}
func FlattenSubnets(list []containerv2.Subnet) []map[string]interface{} {
subs := make([]map[string]interface{}, len(list))
for i, sub := range list {
l := map[string]interface{}{
"id": sub.ID,
"worker_count": sub.Primary,
}
subs[i] = l
}
return subs
}
func FlattenZones(list []containerv1.WorkerPoolZoneResponse) []map[string]interface{} {
zones := make([]map[string]interface{}, len(list))
for i, zone := range list {
l := map[string]interface{}{
"zone": zone.WorkerPoolZone.ID,
"private_vlan": zone.WorkerPoolZone.WorkerPoolZoneNetwork.PrivateVLAN,
"public_vlan": zone.WorkerPoolZone.WorkerPoolZoneNetwork.PublicVLAN,
"worker_count": zone.WorkerCount,
}
zones[i] = l
}
return zones
}
func FlattenWorkerPools(list []containerv1.WorkerPoolResponse) []map[string]interface{} {
workerPools := make([]map[string]interface{}, len(list))
for i, workerPool := range list {
l := map[string]interface{}{
"id": workerPool.ID,
"hardware": workerPool.Isolation,
"name": workerPool.Name,
"machine_type": workerPool.MachineType,
"size_per_zone": workerPool.Size,
"state": workerPool.State,
"labels": workerPool.Labels,
}
zones := workerPool.Zones
zonesConfig := make([]map[string]interface{}, len(zones))
for j, zone := range zones {
z := map[string]interface{}{
"zone": zone.ID,
"private_vlan": zone.PrivateVLAN,
"public_vlan": zone.PublicVLAN,
"worker_count": zone.WorkerCount,
}
zonesConfig[j] = z
}
l["zones"] = zonesConfig
workerPools[i] = l
}
return workerPools
}
func FlattenAlbs(list []containerv1.ALBConfig, filterType string) []map[string]interface{} {
albs := make([]map[string]interface{}, 0)
for _, alb := range list {
if alb.ALBType == filterType || filterType == "all" {
l := map[string]interface{}{
"id": alb.ALBID,
"name": alb.Name,
"alb_type": alb.ALBType,
"enable": alb.Enable,
"state": alb.State,
"num_of_instances": alb.NumOfInstances,
"alb_ip": alb.ALBIP,
"resize": alb.Resize,
"disable_deployment": alb.DisableDeployment,
}
albs = append(albs, l)
}
}
return albs
}
func FlattenVpcAlbs(list []containerv2.AlbConfig, filterType string) []map[string]interface{} {
albs := make([]map[string]interface{}, 0)
for _, alb := range list {
if alb.AlbType == filterType || filterType == "all" {
l := map[string]interface{}{
"id": alb.AlbID,
"name": alb.Name,
"alb_type": alb.AlbType,
"enable": alb.Enable,
"state": alb.State,
"resize": alb.Resize,
"disable_deployment": alb.DisableDeployment,
"load_balancer_hostname": alb.LoadBalancerHostname,
}
albs = append(albs, l)
}
}
return albs
}
func FlattenNetworkInterfaces(list []containerv2.Network) []map[string]interface{} {
nwInterfaces := make([]map[string]interface{}, len(list))
for i, nw := range list {
l := map[string]interface{}{
"cidr": nw.Cidr,
"ip_address": nw.IpAddress,
"subnet_id": nw.SubnetID,
}
nwInterfaces[i] = l
}
return nwInterfaces
}
func FlattenVlans(list []containerv1.Vlan) []map[string]interface{} {
vlans := make([]map[string]interface{}, len(list))
for i, vlanR := range list {
subnets := make([]map[string]interface{}, len(vlanR.Subnets))
for j, subnetR := range vlanR.Subnets {
subnet := make(map[string]interface{})
subnet["id"] = subnetR.ID
subnet["cidr"] = subnetR.Cidr
subnet["is_byoip"] = subnetR.IsByOIP
subnet["is_public"] = subnetR.IsPublic
ips := make([]string, len(subnetR.Ips))
for k, ip := range subnetR.Ips {
ips[k] = ip
}
subnet["ips"] = ips
subnets[j] = subnet
}
l := map[string]interface{}{
"id": vlanR.ID,
"subnets": subnets,
}
vlans[i] = l
}
return vlans
}
func FlattenIcdGroups(grouplist icdv4.GroupList) []map[string]interface{} {
groups := make([]map[string]interface{}, len(grouplist.Groups))
for i, group := range grouplist.Groups {
memorys := make([]map[string]interface{}, 1)
memory := make(map[string]interface{})
memory["units"] = group.Memory.Units
memory["allocation_mb"] = group.Memory.AllocationMb
memory["minimum_mb"] = group.Memory.MinimumMb
memory["step_size_mb"] = group.Memory.StepSizeMb
memory["is_adjustable"] = group.Memory.IsAdjustable
memory["can_scale_down"] = group.Memory.CanScaleDown
memorys[0] = memory
cpus := make([]map[string]interface{}, 1)
cpu := make(map[string]interface{})
cpu["units"] = group.Cpu.Units
cpu["allocation_count"] = group.Cpu.AllocationCount
cpu["minimum_count"] = group.Cpu.MinimumCount
cpu["step_size_count"] = group.Cpu.StepSizeCount
cpu["is_adjustable"] = group.Cpu.IsAdjustable
cpu["can_scale_down"] = group.Cpu.CanScaleDown
cpus[0] = cpu
disks := make([]map[string]interface{}, 1)
disk := make(map[string]interface{})
disk["units"] = group.Disk.Units
disk["allocation_mb"] = group.Disk.AllocationMb
disk["minimum_mb"] = group.Disk.MinimumMb
disk["step_size_mb"] = group.Disk.StepSizeMb
disk["is_adjustable"] = group.Disk.IsAdjustable
disk["can_scale_down"] = group.Disk.CanScaleDown
disks[0] = disk
l := map[string]interface{}{
"group_id": group.Id,
"count": group.Count,
"memory": memorys,
"cpu": cpus,
"disk": disks,
}
groups[i] = l
}
return groups
}
func NormalizeJSONString(jsonString interface{}) (string, error) {
var j interface{}
if jsonString == nil || jsonString.(string) == "" {
return "", nil
}
s := jsonString.(string)
err := json.Unmarshal([]byte(s), &j)
if err != nil {
return s, err
}
bytes, err := json.Marshal(j)
if err != nil {
return "", err
}
return string(bytes[:]), nil
}
func ExpandAnnotations(annotations string) (whisk.KeyValueArr, error) {
var result whisk.KeyValueArr
dc := json.NewDecoder(strings.NewReader(annotations))
dc.UseNumber()
err := dc.Decode(&result)
return result, err
}
func FlattenAnnotations(in whisk.KeyValueArr) (string, error) {
b, err := json.Marshal(in)
if err != nil {
return "", err
}
return string(b[:]), nil
}
func ExpandParameters(annotations string) (whisk.KeyValueArr, error) {
var result whisk.KeyValueArr
dc := json.NewDecoder(strings.NewReader(annotations))
dc.UseNumber()
err := dc.Decode(&result)
return result, err
}
func FlattenParameters(in whisk.KeyValueArr) (string, error) {
b, err := json.Marshal(in)
if err != nil {
return "", err
}
return string(b[:]), nil
}
func ExpandLimits(l []interface{}) *whisk.Limits {
if len(l) == 0 || l[0] == nil {
return &whisk.Limits{}
}
in := l[0].(map[string]interface{})
obj := &whisk.Limits{
Timeout: ptrToInt(in["timeout"].(int)),
Memory: ptrToInt(in["memory"].(int)),
Logsize: ptrToInt(in["log_size"].(int)),
}
return obj
}
func FlattenActivityTrack(in *resourceconfigurationv1.ActivityTracking) []interface{} {
att := make(map[string]interface{})
if in != nil {
if in.ReadDataEvents != nil {
att["read_data_events"] = *in.ReadDataEvents
}
if in.WriteDataEvents != nil {
att["write_data_events"] = *in.WriteDataEvents
}
if in.ActivityTrackerCrn != nil {
att["activity_tracker_crn"] = *in.ActivityTrackerCrn
}
}
return []interface{}{att}
}
func FlattenMetricsMonitor(in *resourceconfigurationv1.MetricsMonitoring) []interface{} {
att := make(map[string]interface{})
if in != nil {
if in.UsageMetricsEnabled != nil {
att["usage_metrics_enabled"] = *in.UsageMetricsEnabled
}
if in.MetricsMonitoringCrn != nil {
att["metrics_monitoring_crn"] = *in.MetricsMonitoringCrn
}
if in.RequestMetricsEnabled != nil {
att["request_metrics_enabled"] = *in.RequestMetricsEnabled
}
}
return []interface{}{att}
}
func ArchiveRuleGet(in []*s3.LifecycleRule) []interface{} {
rules := make([]interface{}, 0, len(in))
for _, r := range in {
// Checking this is not an expire_rule. LifeCycle rules are either archive or expire or non current version or abort incomplete multipart upload
if r.Expiration == nil && r.NoncurrentVersionExpiration == nil && r.AbortIncompleteMultipartUpload == nil {
rule := make(map[string]interface{})
if r.Status != nil {
if *r.Status == "Enabled" {
rule["enable"] = true
} else {
rule["enable"] = false
}
}
if r.ID != nil {
rule["rule_id"] = *r.ID
}
for _, transition := range r.Transitions {
if transition.Days != nil {
rule["days"] = int(*transition.Days)
}
if transition.StorageClass != nil {
rule["type"] = *transition.StorageClass
}
}
rules = append(rules, rule)
}
}
return rules
}
func ExpireRuleGet(in []*s3.LifecycleRule) []interface{} {
rules := make([]interface{}, 0, len(in))
for _, r := range in {
if r.Expiration != nil && r.Transitions == nil {
rule := make(map[string]interface{})
if r.Status != nil {
if *r.Status == "Enabled" {
rule["enable"] = true
} else {
rule["enable"] = false
}
}
if r.ID != nil {
rule["rule_id"] = *r.ID
}
if r.Expiration != nil {
if r.Expiration.Days != nil {
days := int(*(r.Expiration).Days)
if days > 0 {
rule["days"] = days
}
}
if r.Expiration.Date != nil {
expirationTime := *(r.Expiration).Date
d := strings.Split(expirationTime.Format(time.RFC3339), "T")
rule["date"] = d[0]
}
if r.Expiration.ExpiredObjectDeleteMarker != nil {
rule["expired_object_delete_marker"] = *(r.Expiration).ExpiredObjectDeleteMarker
}
}
if r.Filter != nil && r.Filter.Prefix != nil {
rule["prefix"] = *(r.Filter).Prefix
}
rules = append(rules, rule)
}
}
return rules
}
func Nc_exp_RuleGet(in []*s3.LifecycleRule) []interface{} {
rules := make([]interface{}, 0, len(in))
for _, r := range in {
if r.Expiration == nil && r.AbortIncompleteMultipartUpload == nil && r.Transitions == nil {
rule := make(map[string]interface{})
if r.Status != nil {
if *r.Status == "Enabled" {
rule["enable"] = true
} else {
rule["enable"] = false
}
}
if r.ID != nil {
rule["rule_id"] = *r.ID
}
if r.NoncurrentVersionExpiration != nil {
rule["noncurrent_days"] = int(*(r.NoncurrentVersionExpiration).NoncurrentDays)
}
if r.Filter != nil && r.Filter.Prefix != nil {
rule["prefix"] = *(r.Filter).Prefix
}
rules = append(rules, rule)
}
}
return rules
}
func Abort_mpu_RuleGet(in []*s3.LifecycleRule) []interface{} {
rules := make([]interface{}, 0, len(in))
for _, r := range in {
if r.Expiration == nil && r.NoncurrentVersionExpiration == nil && r.Transitions == nil {
rule := make(map[string]interface{})
if r.Status != nil {
if *r.Status == "Enabled" {
rule["enable"] = true
} else {
rule["enable"] = false
}
}
if r.ID != nil {
rule["rule_id"] = *r.ID
}
if r.AbortIncompleteMultipartUpload != nil {
rule["days_after_initiation"] = int(*(r.AbortIncompleteMultipartUpload).DaysAfterInitiation)
}
if r.Filter != nil && r.Filter.Prefix != nil {
rule["prefix"] = *(r.Filter).Prefix
}
rules = append(rules, rule)
}
}
return rules
}
func RetentionRuleGet(in *s3.ProtectionConfiguration) []interface{} {
rules := make([]interface{}, 0, 1)
if in != nil && in.Status != nil && *in.Status == "COMPLIANCE" {
protectConfig := make(map[string]interface{})
if in.DefaultRetention != nil {
protectConfig["default"] = int(*(in.DefaultRetention).Days)
}
if in.MaximumRetention != nil {
protectConfig["maximum"] = int(*(in.MaximumRetention).Days)
}
if in.MinimumRetention != nil {
protectConfig["minimum"] = int(*(in.MinimumRetention).Days)
}
if in.EnablePermanentRetention != nil {
protectConfig["permanent"] = *in.EnablePermanentRetention
}
rules = append(rules, protectConfig)
}
return rules
}
func FlattenCosObejctVersioning(in *s3.GetBucketVersioningOutput) []interface{} {
versioning := make([]interface{}, 0, 1)
if in != nil {
if in.Status != nil {
att := make(map[string]interface{})
if *in.Status == "Enabled" {
att["enable"] = true
} else {
att["enable"] = false
}
versioning = append(versioning, att)
}
}
return versioning
}
func FlattenLimits(in *whisk.Limits) []interface{} {
att := make(map[string]interface{})
if in.Timeout != nil {
att["timeout"] = *in.Timeout
}
if in.Memory != nil {
att["memory"] = *in.Memory
}
if in.Memory != nil {
att["log_size"] = *in.Logsize
}
return []interface{}{att}
}
func ExpandExec(execs []interface{}) *whisk.Exec {
var code string
var document []byte
for _, exec := range execs {
e, _ := exec.(map[string]interface{})
code_path := e["code_path"].(string)
if code_path != "" {
ext := path.Ext(code_path)
if strings.ToLower(ext) == ".zip" {
data, err := ioutil.ReadFile(code_path)
if err != nil {
log.Println("Error reading file", err)
return &whisk.Exec{}
}
sEnc := b64.StdEncoding.EncodeToString([]byte(data))
code = sEnc
} else {
data, err := ioutil.ReadFile(code_path)
if err != nil {
log.Println("Error reading file", err)
return &whisk.Exec{}
}
document = data
code = string(document)
}
} else {
code = e["code"].(string)
}
obj := &whisk.Exec{
Image: e["image"].(string),
Init: e["init"].(string),
Code: PtrToString(code),
Kind: e["kind"].(string),
Main: e["main"].(string),
Components: ExpandStringList(e["components"].([]interface{})),
}
return obj
}
return &whisk.Exec{}
}
func FlattenExec(in *whisk.Exec, d *schema.ResourceData) []interface{} {
code_data := 4194304 // length of 'code' parameter should be always <= 4MB data
att := make(map[string]interface{})
// open-whisk SDK will not return the value for code_path
// Hence using d.GetOk method to setback the code_path value.
if cPath, ok := d.GetOk("exec.0.code_path"); ok {
att["code_path"] = cPath.(string)
}
if in.Image != "" {
att["image"] = in.Image
}
if in.Init != "" {
att["init"] = in.Init
}
if in != nil && in.Code != nil && len(*in.Code) <= code_data {
att["code"] = *in.Code
}
if in.Kind != "" {
att["kind"] = in.Kind
}
if in.Main != "" {
att["main"] = in.Main
}
if len(in.Components) > 0 {
att["components"] = FlattenStringList(in.Components)
}
return []interface{}{att}
}
func ptrToInt(i int) *int {
return &i
}
func PtrToString(s string) *string {
return &s
}
func IntValue(i64 *int64) (i int) {
if i64 != nil {
i = int(*i64)
}
return
}
func float64Value(f32 *float32) (f float64) {
if f32 != nil {
f = float64(*f32)
}
return
}
func dateToString(d *strfmt.Date) (s string) {
if d != nil {
s = d.String()
}
return
}
func DateTimeToString(dt *strfmt.DateTime) (s string) {
if dt != nil {
s = dt.String()
}
return
}
func FilterActionAnnotations(in whisk.KeyValueArr) (string, error) {
noExec := make(whisk.KeyValueArr, 0, len(in))
for _, v := range in {
if v.Key == "exec" {
continue
}
noExec = append(noExec, v)
}
return FlattenAnnotations(noExec)
}
func FilterActionParameters(in whisk.KeyValueArr) (string, error) {
noAction := make(whisk.KeyValueArr, 0, len(in))
for _, v := range in {
if v.Key == "_actions" {
continue
}
noAction = append(noAction, v)
}
return FlattenParameters(noAction)
}
func FilterInheritedAnnotations(inheritedAnnotations, annotations whisk.KeyValueArr) whisk.KeyValueArr {
userDefinedAnnotations := make(whisk.KeyValueArr, 0)
for _, a := range annotations {
insert := false
if a.Key == "binding" || a.Key == "exec" {
insert = false
break
}
for _, b := range inheritedAnnotations {
if a.Key == b.Key && reflect.DeepEqual(a.Value, b.Value) {
insert = false
break
}
insert = true
}
if insert {
userDefinedAnnotations = append(userDefinedAnnotations, a)
}
}
return userDefinedAnnotations
}
func FilterInheritedParameters(inheritedParameters, parameters whisk.KeyValueArr) whisk.KeyValueArr {
userDefinedParameters := make(whisk.KeyValueArr, 0)
for _, p := range parameters {
insert := false
if p.Key == "_actions" {
insert = false
break
}
for _, b := range inheritedParameters {
if p.Key == b.Key && reflect.DeepEqual(p.Value, b.Value) {
insert = false
break
}
insert = true
}
if insert {
userDefinedParameters = append(userDefinedParameters, p)
}
}
return userDefinedParameters
}
func IsEmpty(object interface{}) bool {
//First check normal definitions of empty
if object == nil {
return true
} else if object == "" {
return true
} else if object == false {
return true
}
//Then see if it's a struct
if reflect.ValueOf(object).Kind() == reflect.Struct {
// and create an empty copy of the struct object to compare against
empty := reflect.New(reflect.TypeOf(object)).Elem().Interface()
if reflect.DeepEqual(object, empty) {
return true
}
}
return false
}
func FilterTriggerAnnotations(in whisk.KeyValueArr) (string, error) {
noFeed := make(whisk.KeyValueArr, 0, len(in))
for _, v := range in {
if v.Key == "feed" {
continue
}
noFeed = append(noFeed, v)
}
return FlattenParameters(noFeed)
}
func FlattenFeed(feedName string) []interface{} {
att := make(map[string]interface{})
att["name"] = feedName
att["parameters"] = "[]"
return []interface{}{att}
}
func FlattenGatewayVlans(list []datatypes.Network_Gateway_Vlan) []map[string]interface{} {
vlans := make([]map[string]interface{}, len(list))
for i, ele := range list {
vlan := make(map[string]interface{})
vlan["bypass"] = *ele.BypassFlag
vlan["network_vlan_id"] = *ele.NetworkVlanId
vlan["vlan_id"] = *ele.Id
vlans[i] = vlan
}
return vlans
}
func FlattenGatewayMembers(d *schema.ResourceData, list []datatypes.Network_Gateway_Member) []map[string]interface{} {
members := make([]map[string]interface{}, len(list))
for i, ele := range list {
hardware := *ele.Hardware
member := make(map[string]interface{})
member["member_id"] = *ele.HardwareId
member["hostname"] = *hardware.Hostname
member["domain"] = *hardware.Domain
if hardware.Notes != nil {
member["notes"] = *hardware.Notes
}
if hardware.Datacenter != nil {
member["datacenter"] = *hardware.Datacenter.Name
}
if hardware.PrimaryNetworkComponent.MaxSpeed != nil {
member["network_speed"] = *hardware.PrimaryNetworkComponent.MaxSpeed
}
member["redundant_network"] = false
member["unbonded_network"] = false
backendNetworkComponent := ele.Hardware.BackendNetworkComponents
if len(backendNetworkComponent) > 2 && ele.Hardware.PrimaryBackendNetworkComponent != nil {
if *hardware.PrimaryBackendNetworkComponent.RedundancyEnabledFlag {
member["redundant_network"] = true
} else {
member["unbonded_network"] = true
}
}
tagReferences := ele.Hardware.TagReferences
tagReferencesLen := len(tagReferences)
if tagReferencesLen > 0 {
tags := make([]interface{}, 0, tagReferencesLen)
for _, tagRef := range tagReferences {
tags = append(tags, *tagRef.Tag.Name)
}
member["tags"] = schema.NewSet(schema.HashString, tags)
}
member["redundant_power_supply"] = false
if *hardware.PowerSupplyCount == 2 {
member["redundant_power_supply"] = true
}
member["memory"] = *hardware.MemoryCapacity
if !(*hardware.PrivateNetworkOnlyFlag) {
member["public_vlan_id"] = *hardware.NetworkVlans[1].Id
}
member["private_vlan_id"] = *hardware.NetworkVlans[0].Id
if hardware.PrimaryIpAddress != nil {
member["public_ipv4_address"] = *hardware.PrimaryIpAddress
}
if hardware.PrimaryBackendIpAddress != nil {
member["private_ipv4_address"] = *hardware.PrimaryBackendIpAddress
}
member["ipv6_enabled"] = false
if ele.Hardware.PrimaryNetworkComponent.PrimaryVersion6IpAddressRecord != nil {
member["ipv6_enabled"] = true
member["ipv6_address"] = *hardware.PrimaryNetworkComponent.PrimaryVersion6IpAddressRecord.IpAddress
}
member["private_network_only"] = *hardware.PrivateNetworkOnlyFlag
userData := hardware.UserData
if len(userData) > 0 && userData[0].Value != nil {
member["user_metadata"] = *userData[0].Value
}
members[i] = member
}
return members
}
func FlattenDisks(result datatypes.Virtual_Guest) []int {
var out = make([]int, 0)
for _, v := range result.BlockDevices {
// skip 1,7 which is reserved for the swap disk and metadata
_, ok := sl.GrabOk(result, "BillingItem.OrderItem.Preset")
if ok {
if *v.Device != "1" && *v.Device != "7" && *v.Device != "0" {
capacity, ok := sl.GrabOk(v, "DiskImage.Capacity")
if ok {
out = append(out, capacity.(int))
}
}
} else {
if *v.Device != "1" && *v.Device != "7" {
capacity, ok := sl.GrabOk(v, "DiskImage.Capacity")
if ok {
out = append(out, capacity.(int))
}
}
}
}
return out
}
func FlattenDisksForWindows(result datatypes.Virtual_Guest) []int {
var out = make([]int, 0)
for _, v := range result.BlockDevices {
// skip 1,7 which is reserved for the swap disk and metadata
_, ok := sl.GrabOk(result, "BillingItem.OrderItem.Preset")
if ok {
if *v.Device != "1" && *v.Device != "7" && *v.Device != "0" && *v.Device != "3" {
capacity, ok := sl.GrabOk(v, "DiskImage.Capacity")
if ok {
out = append(out, capacity.(int))
}
}
} else {
if *v.Device != "1" && *v.Device != "7" && *v.Device != "3" {
capacity, ok := sl.GrabOk(v, "DiskImage.Capacity")
if ok {
out = append(out, capacity.(int))
}
}
}
}
return out
}
func filterResourceKeyParameters(params map[string]interface{}) map[string]interface{} {
delete(params, "role_crn")
return params
}
func IdParts(id string) ([]string, error) {
if strings.Contains(id, "/") {
parts := strings.Split(id, "/")
return parts, nil
}
return []string{}, fmt.Errorf("The given id %s does not contain / please check documentation on how to provider id during import command", id)
}
func SepIdParts(id string, separator string) ([]string, error) {
if strings.Contains(id, separator) {
parts := strings.Split(id, separator)
return parts, nil
}
return []string{}, fmt.Errorf("The given id %s does not contain %s please check documentation on how to provider id during import command", id, separator)
}
func VmIdParts(id string) ([]string, error) {
parts := strings.Split(id, "/")
return parts, nil
}
func CfIdParts(id string) ([]string, error) {
parts := strings.Split(id, ":")
return parts, nil
}
// getCustomAttributes will return all attributes which are not system defined
func getCustomAttributes(r iampolicymanagementv1.PolicyResource) []iampolicymanagementv1.ResourceAttribute {
attributes := []iampolicymanagementv1.ResourceAttribute{}
for _, a := range r.Attributes {
switch *a.Name {
case "accesGroupId":
case "accountId":
case "organizationId":
case "spaceId":
case "region":
case "resource":
case "resourceType":
case "resourceGroupId":
case "serviceType":
case "serviceName":
case "serviceInstance":
default:
attributes = append(attributes, a)
}
}
return attributes
}
func FlattenPolicyResource(list []iampolicymanagementv1.PolicyResource) []map[string]interface{} {
result := make([]map[string]interface{}, 0, len(list))
for _, i := range list {
l := map[string]interface{}{
"service": GetResourceAttribute("serviceName", i),
"resource_instance_id": GetResourceAttribute("serviceInstance", i),
"region": GetResourceAttribute("region", i),
"resource_type": GetResourceAttribute("resourceType", i),
"resource": GetResourceAttribute("resource", i),
"resource_group_id": GetResourceAttribute("resourceGroupId", i),
"service_type": GetResourceAttribute("serviceType", i),
}
customAttributes := getCustomAttributes(i)
if len(customAttributes) > 0 {
out := make(map[string]string)
for _, a := range customAttributes {
out[*a.Name] = *a.Value
}
l["attributes"] = out
}
result = append(result, l)
}
return result
}
func FlattenPolicyResourceAttributes(list []iampolicymanagementv1.PolicyResource) []map[string]interface{} {
result := make([]map[string]interface{}, 0)
for _, i := range list {
for _, a := range i.Attributes {
if *a.Name != "accountId" {
l := map[string]interface{}{
"name": a.Name,
"value": a.Value,
"operator": a.Operator,
}
result = append(result, l)
}
}
}
return result
}
func FlattenPolicyResourceTags(resources []iampolicymanagementv1.PolicyResource) []map[string]interface{} {
result := make([]map[string]interface{}, 0)
for _, resource := range resources {
if resource.Tags != nil {
for _, tags := range resource.Tags {
tag := map[string]interface{}{
"name": tags.Name,
"value": tags.Value,
"operator": tags.Operator,
}
result = append(result, tag)
}
}
}
return result
}
// Cloud Internet Services
func FlattenHealthMonitors(list []datatypes.Network_LBaaS_Listener) []map[string]interface{} {
result := make([]map[string]interface{}, 0, len(list))
ports := make([]int, 0, 0)
for _, i := range list {
l := map[string]interface{}{
"protocol": *i.DefaultPool.Protocol,
"port": *i.DefaultPool.ProtocolPort,
"interval": *i.DefaultPool.HealthMonitor.Interval,
"max_retries": *i.DefaultPool.HealthMonitor.MaxRetries,
"timeout": *i.DefaultPool.HealthMonitor.Timeout,
"monitor_id": *i.DefaultPool.HealthMonitor.Uuid,
}
if i.DefaultPool.HealthMonitor.UrlPath != nil {
l["url_path"] = *i.DefaultPool.HealthMonitor.UrlPath
}
if !contains(ports, *i.DefaultPool.ProtocolPort) {
result = append(result, l)
}
ports = append(ports, *i.DefaultPool.ProtocolPort)
}
return result
}
func contains(s []int, e int) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
func StringContains(s []string, str string) bool {
for _, a := range s {
if a == str {
return true
}
}
return false
}
func FlattenMembersData(list []iamaccessgroupsv2.ListGroupMembersResponseMember, users []usermanagementv2.UserInfo, serviceids []iamidentityv1.ServiceID) ([]string, []string) {
var ibmid []string
var serviceid []string
for _, m := range list {
if *m.Type == "user" {
for _, user := range users {
if user.IamID == *m.IamID {
ibmid = append(ibmid, user.Email)
break
}
}
} else {
for _, srid := range serviceids {
if *srid.IamID == *m.IamID {
serviceid = append(serviceid, *srid.ID)
break
}
}
}
}
return ibmid, serviceid
}
func FlattenAccessGroupMembers(list []iamaccessgroupsv2.ListGroupMembersResponseMember, users []usermanagementv2.UserInfo, serviceids []iamidentityv1.ServiceID) []map[string]interface{} {
result := make([]map[string]interface{}, 0, len(list))
for _, m := range list {
var value, vtype string
vtype = *m.Type
if *m.Type == "user" {
for _, user := range users {
if user.IamID == *m.IamID {
value = user.Email
break
}
}
} else {
for _, srid := range serviceids {
if *srid.IamID == *m.IamID {
value = *srid.ID
break
}
}
}
l := map[string]interface{}{
"iam_id": value,
"type": vtype,
}
result = append(result, l)
}
return result
}
func FlattenUserIds(accountID string, users []string, meta interface{}) ([]string, error) {
userids := make([]string, len(users))
for i, name := range users {
iamID, err := GetIBMUniqueId(accountID, name, meta)
if err != nil {
return nil, err
}
userids[i] = iamID
}
return userids, nil
}
func ExpandUsers(userList *schema.Set) (users []icdv4.User) {
for _, iface := range userList.List() {
userEl := iface.(map[string]interface{})
user := icdv4.User{
UserName: userEl["name"].(string),
Password: userEl["password"].(string),
}
users = append(users, user)
}
return
}
type CsEntry struct {
Name string
Password string
String string
Composed string
CertName string
CertBase64 string
Hosts []struct {
HostName string `json:"hostname"`
Port int `json:"port"`
}
Scheme string
QueryOptions map[string]interface{}
Path string
Database string
BundleName string
BundleBase64 string
}
// IBM Cloud Databases
func FlattenConnectionStrings(cs []CsEntry) []map[string]interface{} {
entries := make([]map[string]interface{}, len(cs), len(cs))
for i, csEntry := range cs {
l := map[string]interface{}{
"name": csEntry.Name,
"password": csEntry.Password,
"composed": csEntry.Composed,
"certname": csEntry.CertName,
"certbase64": csEntry.CertBase64,
"queryoptions": csEntry.QueryOptions,
"scheme": csEntry.Scheme,
"path": csEntry.Path,
"database": csEntry.Database,
"bundlename": csEntry.BundleName,
"bundlebase64": csEntry.BundleBase64,
}
hosts := csEntry.Hosts
hostsList := make([]map[string]interface{}, len(hosts), len(hosts))
for j, host := range hosts {
z := map[string]interface{}{
"hostname": host.HostName,
"port": strconv.Itoa(host.Port),
}
hostsList[j] = z
}
l["hosts"] = hostsList
var queryOpts string
if len(csEntry.QueryOptions) != 0 {
queryOpts = "?"
count := 0
for k, v := range csEntry.QueryOptions {
if count >= 1 {
queryOpts = queryOpts + "&"
}
queryOpts = queryOpts + fmt.Sprintf("%v", k) + "=" + fmt.Sprintf("%v", v)
count++
}
} else {
queryOpts = ""
}
l["queryoptions"] = queryOpts
entries[i] = l
}
return entries
}
func FlattenPhaseOneAttributes(vpn *datatypes.Network_Tunnel_Module_Context) []map[string]interface{} {
phaseoneAttributesMap := make([]map[string]interface{}, 0, 1)
phaseoneAttributes := make(map[string]interface{})
phaseoneAttributes["authentication"] = *vpn.PhaseOneAuthentication
phaseoneAttributes["encryption"] = *vpn.PhaseOneEncryption
phaseoneAttributes["diffie_hellman_group"] = *vpn.PhaseOneDiffieHellmanGroup
phaseoneAttributes["keylife"] = *vpn.PhaseOneKeylife
phaseoneAttributesMap = append(phaseoneAttributesMap, phaseoneAttributes)
return phaseoneAttributesMap
}
func FlattenPhaseTwoAttributes(vpn *datatypes.Network_Tunnel_Module_Context) []map[string]interface{} {
phasetwoAttributesMap := make([]map[string]interface{}, 0, 1)
phasetwoAttributes := make(map[string]interface{})
phasetwoAttributes["authentication"] = *vpn.PhaseTwoAuthentication
phasetwoAttributes["encryption"] = *vpn.PhaseTwoEncryption
phasetwoAttributes["diffie_hellman_group"] = *vpn.PhaseTwoDiffieHellmanGroup
phasetwoAttributes["keylife"] = *vpn.PhaseTwoKeylife
phasetwoAttributesMap = append(phasetwoAttributesMap, phasetwoAttributes)
return phasetwoAttributesMap
}
func FlattenaddressTranslation(vpn *datatypes.Network_Tunnel_Module_Context, fwID int) []map[string]interface{} {
addressTranslationMap := make([]map[string]interface{}, 0, 1)
addressTranslationAttributes := make(map[string]interface{})
for _, networkAddressTranslation := range vpn.AddressTranslations {
if *networkAddressTranslation.NetworkTunnelContext.Id == fwID {
addressTranslationAttributes["remote_ip_adress"] = *networkAddressTranslation.CustomerIpAddress
addressTranslationAttributes["internal_ip_adress"] = *networkAddressTranslation.InternalIpAddress
addressTranslationAttributes["notes"] = *networkAddressTranslation.Notes
}
}
addressTranslationMap = append(addressTranslationMap, addressTranslationAttributes)
return addressTranslationMap
}
func FlattenremoteSubnet(vpn *datatypes.Network_Tunnel_Module_Context) []map[string]interface{} {
remoteSubnetMap := make([]map[string]interface{}, 0, 1)
remoteSubnetAttributes := make(map[string]interface{})
for _, customerSubnet := range vpn.CustomerSubnets {
remoteSubnetAttributes["remote_ip_adress"] = customerSubnet.NetworkIdentifier
remoteSubnetAttributes["remote_ip_cidr"] = customerSubnet.Cidr
remoteSubnetAttributes["account_id"] = customerSubnet.AccountId
}
remoteSubnetMap = append(remoteSubnetMap, remoteSubnetAttributes)
return remoteSubnetMap
}
// IBM Cloud Databases
func ExpandWhitelist(whiteList *schema.Set) (whitelist []icdv4.WhitelistEntry) {
for _, iface := range whiteList.List() {
wlItem := iface.(map[string]interface{})
wlEntry := icdv4.WhitelistEntry{
Address: wlItem["address"].(string),
Description: wlItem["description"].(string),
}
whitelist = append(whitelist, wlEntry)
}
return
}
// Cloud Internet Services
func FlattenWhitelist(whitelist icdv4.Whitelist) []map[string]interface{} {
entries := make([]map[string]interface{}, len(whitelist.WhitelistEntrys), len(whitelist.WhitelistEntrys))
for i, whitelistEntry := range whitelist.WhitelistEntrys {
l := map[string]interface{}{
"address": whitelistEntry.Address,
"description": whitelistEntry.Description,
}
entries[i] = l
}
return entries
}
func expandStringMap(inVal interface{}) map[string]string {
outVal := make(map[string]string)
if inVal == nil {
return outVal
}
for k, v := range inVal.(map[string]interface{}) {
strValue := fmt.Sprintf("%v", v)
outVal[k] = strValue
}
return outVal
}
// Cloud Internet Services
func ConvertTfToCisThreeVar(glbTfId string) (glbId string, zoneId string, cisId string, err error) {
g := strings.SplitN(glbTfId, ":", 3)
glbId = g[0]
if len(g) > 2 {
zoneId = g[1]
cisId = g[2]
} else {
err = errors.New("cis_id or zone_id not passed")
return
}
return
}
func ConvertCisToTfFourVar(firewallType string, ID string, ID2 string, cisID string) (buildID string) {
if ID != "" {
buildID = firewallType + ":" + ID + ":" + ID2 + ":" + cisID
} else {
buildID = ""
}
return
}
func ConvertTfToCisFourVar(TfID string) (firewallType string, ID string, zoneID string, cisID string, err error) {
g := strings.SplitN(TfID, ":", 4)
firewallType = g[0]
if len(g) > 3 {
ID = g[1]
zoneID = g[2]
cisID = g[3]
} else {
err = errors.New("Id or cis_id or zone_id not passed")
return
}
return
}
// Cloud Internet Services
func ConvertCisToTfThreeVar(Id string, Id2 string, cisId string) (buildId string) {
if Id != "" {
buildId = Id + ":" + Id2 + ":" + cisId
} else {
buildId = ""
}
return
}
// Cloud Internet Services
func ConvertTfToCisTwoVarSlice(tfIds []string) (Ids []string, cisId string, err error) {
for _, item := range tfIds {
Id := strings.SplitN(item, ":", 2)
if len(Id) < 2 {
err = errors.New("cis_id not passed")
return
}
Ids = append(Ids, Id[0])
cisId = Id[1]
}
return
}
// Cloud Internet Services
func ConvertCisToTfTwoVarSlice(Ids []string, cisId string) (buildIds []string) {
for _, Id := range Ids {
buildIds = append(buildIds, Id+":"+cisId)
}
return
}
// Cloud Internet Services
func ConvertCisToTfTwoVar(Id string, cisId string) (buildId string) {
if Id != "" {
buildId = Id + ":" + cisId
} else {
buildId = ""
}
return
}
// Cloud Internet Services
func ConvertTftoCisTwoVar(tfId string) (Id string, cisId string, err error) {
g := strings.SplitN(tfId, ":", 2)
Id = g[0]
if len(g) > 1 {
cisId = g[1]
} else {
err = errors.New(" cis_id or zone_id not passed")
return
}
return
}
func stringInSlice(str string, list []string) bool {
for _, v := range list {
if v == str {
return true
}
}
return false
}
var dnsTypeIntFields = []string{
"algorithm",
"key_tag",
"type",
"usage",
"selector",
"matching_type",
"weight",
"priority",
"port",
"long_degrees",
"lat_degrees",
"long_minutes",
"lat_minutes",
"protocol",
"digest_type",
"order",
"preference",
}
var dnsTypeFloatFields = []string{
"size",
"altitude",
"precision_horz",
"precision_vert",
"long_seconds",
"lat_seconds",
}
// Cloud Internet Services
func TransformToIBMCISDnsData(recordType string, id string, value interface{}) (newValue interface{}, err error) {
switch {
case id == "flags":
switch {
case strings.ToUpper(recordType) == "SRV",
strings.ToUpper(recordType) == "CAA",
strings.ToUpper(recordType) == "DNSKEY":
newValue, err = strconv.Atoi(value.(string))
case strings.ToUpper(recordType) == "NAPTR":
newValue, err = value.(string), nil
}
case stringInSlice(id, dnsTypeIntFields):
newValue, err = strconv.Atoi(value.(string))
case stringInSlice(id, dnsTypeFloatFields):
newValue, err = strconv.ParseFloat(value.(string), 32)
default:
newValue, err = value.(string), nil
}
return
}
func IndexOf(element string, data []string) int {
for k, v := range data {
if element == v {
return k
}
}
return -1 //not found.
}
func rcInstanceExists(resourceId string, resourceType string, meta interface{}) (bool, error) {
// Check to see if Resource Manager instance exists
rsConClient, err := meta.(conns.ClientSession).ResourceControllerAPI()
if err != nil {
return true, nil
}
exists := true
instance, err := rsConClient.ResourceServiceInstance().GetInstance(resourceId)
if err != nil {
if strings.Contains(err.Error(), "Object not found") ||
strings.Contains(err.Error(), "status code: 404") {
exists = false
} else {
return true, fmt.Errorf("[ERROR] Error checking resource instance exists: %s", err)
}
} else {
if strings.Contains(instance.State, "removed") {
exists = false
}
}
if exists {
return true, nil
}
// Implement when pointer to terraform.State available
// If rcInstance is now in removed state, set TF state to removed
// s := *terraform.State
// for _, r := range s.RootModule().Resources {
// if r.Type != resourceType {
// continue
// }
// if r.Primary.ID == resourceId {
// r.Primary.Set("status", "removed")
// }
// }
return false, nil
}
// Implement when pointer to terraform.State available
// func resourceInstanceExistsTf(resourceId string, resourceType string) bool {
// // Check TF state to see if Cloud resource instance has already been removed
// s := *terraform.State
// for _, r := range s.RootModule().Resources {
// if r.Type != resourceType {
// continue
// }
// if r.Primary.ID == resourceId {
// if strings.Contains(r.Primary.Attributes["status"], "removed") {
// return false
// }
// }
// }
// return true
// }
// convert CRN to be url safe
func EscapeUrlParm(urlParm string) string {
if strings.Contains(urlParm, "/") {
newUrlParm := url.PathEscape(urlParm)
return newUrlParm
}
return urlParm
}
func GetLocation(instance models.ServiceInstanceV2) string {
region := instance.Crn.Region
cName := instance.Crn.CName
if cName == "bluemix" || cName == "staging" {
return region
} else {
return cName + "-" + region
}
}
func GetTags(d *schema.ResourceData, meta interface{}) error {
resourceID := d.Id()
gtClient, err := meta.(conns.ClientSession).GlobalTaggingAPI()
if err != nil {
return fmt.Errorf("[ERROR] Error getting global tagging client settings: %s", err)
}
taggingResult, err := gtClient.Tags().GetTags(resourceID)
if err != nil {
return err
}
var taglist []string
for _, item := range taggingResult.Items {
taglist = append(taglist, item.Name)
}
d.Set("tags", FlattenStringList(taglist))
return nil
}
// func UpdateTags(d *schema.ResourceData, meta interface{}) error {
// resourceID := d.Id()
// gtClient, err := meta.(conns.ClientSession).GlobalTaggingAPI()
// if err != nil {
// return fmt.Errorf("[ERROR] Error getting global tagging client settings: %s", err)
// }
// oldList, newList := d.GetChange("tags")
// if oldList == nil {
// oldList = new(schema.Set)
// }
// if newList == nil {
// newList = new(schema.Set)
// }
// olds := oldList.(*schema.Set)
// news := newList.(*schema.Set)
// removeInt := olds.Difference(news).List()
// addInt := news.Difference(olds).List()
// add := make([]string, len(addInt))
// for i, v := range addInt {
// add[i] = fmt.Sprint(v)
// }
// remove := make([]string, len(removeInt))
// for i, v := range removeInt {
// remove[i] = fmt.Sprint(v)
// }
// if len(add) > 0 {
// _, err := gtClient.Tags().AttachTags(resourceID, add)
// if err != nil {
// return fmt.Errorf("[ERROR] Error updating database tags %v : %s", add, err)
// }
// }
// if len(remove) > 0 {
// _, err := gtClient.Tags().DetachTags(resourceID, remove)
// if err != nil {
// return fmt.Errorf("[ERROR] Error detaching database tags %v: %s", remove, err)
// }
// for _, v := range remove {
// _, err := gtClient.Tags().DeleteTag(v)
// if err != nil {
// return fmt.Errorf("[ERROR] Error deleting database tag %v: %s", v, err)
// }
// }
// }
// return nil
// }
func GetGlobalTagsUsingCRN(meta interface{}, resourceID, resourceType, tagType string) (*schema.Set, error) {
gtClient, err := meta.(conns.ClientSession).GlobalTaggingAPIv1()
if err != nil {
return nil, fmt.Errorf("[ERROR] Error getting global tagging client settings: %s", err)
}
userDetails, err := meta.(conns.ClientSession).BluemixUserDetails()
if err != nil {
return nil, err
}
accountID := userDetails.UserAccount
var providers []string
if strings.Contains(resourceType, "SoftLayer_") {
providers = []string{"ims"}
}
ListTagsOptions := &globaltaggingv1.ListTagsOptions{}
if resourceID != "" {
ListTagsOptions.AttachedTo = &resourceID
}
ListTagsOptions.Providers = providers
if len(tagType) > 0 {
ListTagsOptions.TagType = PtrToString(tagType)
if tagType == "service" {
ListTagsOptions.AccountID = PtrToString(accountID)
}
}
taggingResult, _, err := gtClient.ListTags(ListTagsOptions)
if err != nil {
return nil, err
}
var taglist []string
for _, item := range taggingResult.Items {
taglist = append(taglist, *item.Name)
}
log.Println("tagList: ", taglist)
return NewStringSet(ResourceIBMVPCHash, taglist), nil
}
func UpdateGlobalTagsUsingCRN(oldList, newList interface{}, meta interface{}, resourceID, resourceType, tagType string) error {
gtClient, err := meta.(conns.ClientSession).GlobalTaggingAPIv1()
if err != nil {
return fmt.Errorf("[ERROR] Error getting global tagging client settings: %s", err)
}
userDetails, err := meta.(conns.ClientSession).BluemixUserDetails()
if err != nil {
return err
}
acctID := userDetails.UserAccount
resources := []globaltaggingv1.Resource{}
r := globaltaggingv1.Resource{ResourceID: PtrToString(resourceID), ResourceType: PtrToString(resourceType)}
resources = append(resources, r)
if oldList == nil {
oldList = new(schema.Set)
}
if newList == nil {
newList = new(schema.Set)
}
olds := oldList.(*schema.Set)
news := newList.(*schema.Set)
removeInt := olds.Difference(news).List()
addInt := news.Difference(olds).List()
add := make([]string, len(addInt))
for i, v := range addInt {
add[i] = fmt.Sprint(v)
}
remove := make([]string, len(removeInt))
for i, v := range removeInt {
remove[i] = fmt.Sprint(v)
}
if strings.TrimSpace(tagType) == "" || tagType == "user" {
schematicTags := os.Getenv("IC_ENV_TAGS")
var envTags []string
if schematicTags != "" {
envTags = strings.Split(schematicTags, ",")
add = append(add, envTags...)
}
}
if len(remove) > 0 {
detachTagOptions := &globaltaggingv1.DetachTagOptions{}
detachTagOptions.Resources = resources
detachTagOptions.TagNames = remove
if len(tagType) > 0 {
detachTagOptions.TagType = PtrToString(tagType)
if tagType == "service" {
detachTagOptions.AccountID = PtrToString(acctID)
}
}
_, resp, err := gtClient.DetachTag(detachTagOptions)
if err != nil {
return fmt.Errorf("[ERROR] Error detaching database tags %v: %s\n%s", remove, err, resp)
}
for _, v := range remove {
delTagOptions := &globaltaggingv1.DeleteTagOptions{
TagName: PtrToString(v),
}
_, resp, err := gtClient.DeleteTag(delTagOptions)
if err != nil {
return fmt.Errorf("[ERROR] Error deleting database tag %v: %s\n%s", v, err, resp)
}
}
}
if len(add) > 0 {
AttachTagOptions := &globaltaggingv1.AttachTagOptions{}
AttachTagOptions.Resources = resources
AttachTagOptions.TagNames = add
if len(tagType) > 0 {
AttachTagOptions.TagType = PtrToString(tagType)
if tagType == "service" {
AttachTagOptions.AccountID = PtrToString(acctID)
}
}
_, resp, err := gtClient.AttachTag(AttachTagOptions)
if err != nil {
return fmt.Errorf("[ERROR] Error updating database tags %v : %s\n%s", add, err, resp)
}
}
return nil
}
func ResourceIBMVPCHash(v interface{}) int {
var buf bytes.Buffer
buf.WriteString(fmt.Sprintf("%s",
strings.ToLower(v.(string))))
return conns.String(buf.String())
}
// Use this function for attributes which only should be applied in resource creation time.
func ApplyOnce(k, o, n string, d *schema.ResourceData) bool {
if len(d.Id()) == 0 {
return false
}
return true
}
func GetTagsUsingCRN(meta interface{}, resourceCRN string) (*schema.Set, error) {
gtClient, err := meta.(conns.ClientSession).GlobalTaggingAPI()
if err != nil {
return nil, fmt.Errorf("[ERROR] Error getting global tagging client settings: %s", err)
}
taggingResult, err := gtClient.Tags().GetTags(resourceCRN)
if err != nil {
return nil, err
}
var taglist []string
for _, item := range taggingResult.Items {
taglist = append(taglist, item.Name)
}
log.Println("tagList: ", taglist)
return NewStringSet(ResourceIBMVPCHash, taglist), nil
}
func UpdateTagsUsingCRN(oldList, newList interface{}, meta interface{}, resourceCRN string) error {
gtClient, err := meta.(conns.ClientSession).GlobalTaggingAPI()
if err != nil {
return fmt.Errorf("[ERROR] Error getting global tagging client settings: %s", err)
}
if oldList == nil {
oldList = new(schema.Set)
}
if newList == nil {
newList = new(schema.Set)
}
olds := oldList.(*schema.Set)
news := newList.(*schema.Set)
removeInt := olds.Difference(news).List()
addInt := news.Difference(olds).List()
add := make([]string, len(addInt))
for i, v := range addInt {
add[i] = fmt.Sprint(v)
}
remove := make([]string, len(removeInt))
for i, v := range removeInt {
remove[i] = fmt.Sprint(v)
}
schematicTags := os.Getenv("IC_ENV_TAGS")
var envTags []string
if schematicTags != "" {
envTags = strings.Split(schematicTags, ",")
add = append(add, envTags...)
}
if len(remove) > 0 {
_, err := gtClient.Tags().DetachTags(resourceCRN, remove)
if err != nil {
return fmt.Errorf("[ERROR] Error detaching database tags %v: %s", remove, err)
}
for _, v := range remove {
_, err := gtClient.Tags().DeleteTag(v)
if err != nil {
return fmt.Errorf("[ERROR] Error deleting database tag %v: %s", v, err)
}
}
}
if len(add) > 0 {
_, err := gtClient.Tags().AttachTags(resourceCRN, add)
if err != nil {
return fmt.Errorf("[ERROR] Error updating database tags %v : %s", add, err)
}
}
return nil
}
func GetBaseController(meta interface{}) (string, error) {
userDetails, err := meta.(conns.ClientSession).BluemixUserDetails()
if err != nil {
return "", err
}
if userDetails != nil && userDetails.CloudName == "staging" {
return stageBaseController, nil
}
return prodBaseController, nil
}
func FlattenSSLCiphers(ciphers []datatypes.Network_LBaaS_SSLCipher) *schema.Set {
c := make([]string, len(ciphers))
for i, v := range ciphers {
c[i] = *v.Name
}
return NewStringSet(schema.HashString, c)
}
func ResourceTagsCustomizeDiff(diff *schema.ResourceDiff) error {
if diff.Id() != "" && diff.HasChange("tags") {
o, n := diff.GetChange("tags")
oldSet := o.(*schema.Set)
newSet := n.(*schema.Set)
removeInt := oldSet.Difference(newSet).List()
addInt := newSet.Difference(oldSet).List()
if v := os.Getenv("IC_ENV_TAGS"); v != "" {
s := strings.Split(v, ",")
if len(removeInt) == len(s) && len(addInt) == 0 {
fmt.Println("Suppresing the TAG diff ")
return diff.Clear("tags")
}
}
}
return nil
}
func ResourceLBListenerPolicyCustomizeDiff(diff *schema.ResourceDiff) error {
policyActionIntf, _ := diff.GetOk(isLBListenerPolicyAction)
policyAction := policyActionIntf.(string)
if policyAction == "forward" {
_, policyTargetIDSet := diff.GetOk(isLBListenerPolicyTargetID)
if !policyTargetIDSet && diff.NewValueKnown(isLBListenerPolicyTargetID) {
return fmt.Errorf("Load balancer listener policy: When action is forward please specify target_id")
}
} else if policyAction == "redirect" {
_, httpsStatusCodeSet := diff.GetOk(isLBListenerPolicyHTTPSRedirectStatusCode)
_, targetURLSet := diff.GetOk(isLBListenerPolicyTargetURL)
if !httpsStatusCodeSet && diff.NewValueKnown(isLBListenerPolicyHTTPSRedirectStatusCode) {
return fmt.Errorf("Load balancer listener policy: When action is redirect please specify target_http_status_code")
}
if !targetURLSet && diff.NewValueKnown(isLBListenerPolicyTargetURL) {
return fmt.Errorf("Load balancer listener policy: When action is redirect please specify target_url")
}
} else if policyAction == "https_redirect" {
_, listenerSet := diff.GetOk(isLBListenerPolicyHTTPSRedirectListener)
_, httpsStatusSet := diff.GetOk(isLBListenerPolicyHTTPSRedirectStatusCode)
if !listenerSet && diff.NewValueKnown(isLBListenerPolicyHTTPSRedirectListener) {
return fmt.Errorf("Load balancer listener policy: When action is https_redirect please specify target_https_redirect_listener")
}
if !httpsStatusSet && diff.NewValueKnown(isLBListenerPolicyHTTPSRedirectStatusCode) {
return fmt.Errorf("When action is https_redirect please specify target_https_redirect_status_code")
}
}
return nil
}
func ResourceIBMISLBPoolCookieValidate(diff *schema.ResourceDiff) error {
_, sessionPersistenceTypeIntf := diff.GetChange(isLBPoolSessPersistenceType)
_, sessionPersistenceCookieNameIntf := diff.GetChange(isLBPoolSessPersistenceAppCookieName)
sessionPersistenceType := sessionPersistenceTypeIntf.(string)
sessionPersistenceCookieName := sessionPersistenceCookieNameIntf.(string)
if sessionPersistenceType == "app_cookie" {
if sessionPersistenceCookieName == "" {
return fmt.Errorf("Load Balancer Pool: %s is required for %s 'app_cookie'", isLBPoolSessPersistenceAppCookieName, isLBPoolSessPersistenceType)
}
if strings.HasPrefix(sessionPersistenceCookieName, "IBM") {
return fmt.Errorf("Load Balancer Pool: %s starting with IBM are not allowed", isLBPoolSessPersistenceAppCookieName)
}
}
if sessionPersistenceCookieName != "" && sessionPersistenceType != "app_cookie" {
return fmt.Errorf("Load Balancer Pool: %s is only applicable for %s 'app_cookie'.", isLBPoolSessPersistenceAppCookieName, isLBPoolSessPersistenceType)
}
return nil
}
func ResourceVolumeAttachmentValidate(diff *schema.ResourceDiff) error {
if volsintf, ok := diff.GetOk("volume_attachments"); ok {
vols := volsintf.([]interface{})
for volAttIdx := range vols {
volumeid := "volume_attachments." + strconv.Itoa(volAttIdx) + "." + "volume"
volumePrototype := "volume_attachments." + strconv.Itoa(volAttIdx) + "." + "volume_prototype"
var volIdnterpolated = false
var volumeIdFound = false
if _, volumeIdFound = diff.GetOk(volumeid); !volumeIdFound {
if !diff.NewValueKnown(volumeid) {
volIdnterpolated = true
}
}
_, volPrototypeFound := diff.GetOk(volumePrototype)
if volPrototypeFound && (volumeIdFound || volIdnterpolated) {
return fmt.Errorf("InstanceTemplate - volume_attachments[%d]: Cannot provide both 'volume' and 'volume_prototype' together.", volAttIdx)
}
if !volPrototypeFound && !volumeIdFound && !volIdnterpolated {
return fmt.Errorf("InstanceTemplate - volume_attachments[%d]: Volume details missing. Provide either 'volume' or 'volume_prototype'.", volAttIdx)
}
}
}
return nil
}
func ResourceVolumeValidate(diff *schema.ResourceDiff) error {
if diff.Id() != "" && diff.HasChange("capacity") {
o, n := diff.GetChange("capacity")
old := int64(o.(int))
new := int64(n.(int))
if new < old {
return fmt.Errorf("'%s' attribute has a constraint, it supports only expansion and can't be changed from %d to %d.", "capacity", old, new)
}
}
profile := ""
var capacity, iops int64
if profileOk, ok := diff.GetOk("profile"); ok {
profile = profileOk.(string)
}
if capacityOk, ok := diff.GetOk("capacity"); ok {
capacity = int64(capacityOk.(int))
}
if capacity == int64(0) {
capacity = int64(100)
}
if profile == "5iops-tier" && capacity > 9600 {
return fmt.Errorf("'%s' storage block supports capacity up to %d.", profile, 9600)
} else if profile == "10iops-tier" && capacity > 4800 {
return fmt.Errorf("'%s' storage block supports capacity up to %d.", profile, 4800)
}
if iopsOk, ok := diff.GetOk("iops"); ok {
iops = int64(iopsOk.(int))
}
if diff.HasChange("profile") {
oldProfile, newProfile := diff.GetChange("profile")
if oldProfile.(string) == "custom" || newProfile.(string) == "custom" {
diff.ForceNew("profile")
}
}
if profile != "custom" {
if iops != 0 && diff.NewValueKnown("iops") && diff.HasChange("iops") {
return fmt.Errorf("VolumeError : iops is applicable for only custom volume profiles")
}
} else {
if capacity == 0 {
capacity = int64(100)
}
if capacity >= 10 && capacity <= 39 {
min := int64(100)
max := int64(1000)
if !(iops >= min && iops <= max) {
return fmt.Errorf("VolumeError : allowed iops value for capacity(%d) is [%d-%d] ", capacity, min, max)
}
}
if capacity >= 40 && capacity <= 79 {
min := int64(100)
max := int64(2000)
if !(iops >= min && iops <= max) {
return fmt.Errorf("VolumeError : allowed iops value for capacity(%d) is [%d-%d] ", capacity, min, max)
}
}
if capacity >= 80 && capacity <= 99 {
min := int64(100)
max := int64(4000)
if !(iops >= min && iops <= max) {
return fmt.Errorf("VolumeError : allowed iops value for capacity(%d) is [%d-%d] ", capacity, min, max)
}
}
if capacity >= 100 && capacity <= 499 {
min := int64(100)
max := int64(6000)
if !(iops >= min && iops <= max) {
return fmt.Errorf("VolumeError : allowed iops value for capacity(%d) is [%d-%d] ", capacity, min, max)
}
}
if capacity >= 500 && capacity <= 999 {
min := int64(100)
max := int64(10000)
if !(iops >= min && iops <= max) {
return fmt.Errorf("VolumeError : allowed iops value for capacity(%d) is [%d-%d] ", capacity, min, max)
}
}
if capacity >= 1000 && capacity <= 1999 {
min := int64(100)
max := int64(20000)
if !(iops >= min && iops <= max) {
return fmt.Errorf("VolumeError : allowed iops value for capacity(%d) is [%d-%d] ", capacity, min, max)
}
}
if capacity >= 2000 && capacity <= 3999 {
min := int64(200)
max := int64(40000)
if !(iops >= min && iops <= max) {
return fmt.Errorf("VolumeError : allowed iops value for capacity(%d) is [%d-%d] ", capacity, min, max)
}
}
if capacity >= 4000 && capacity <= 7999 {
min := int64(300)
max := int64(40000)
if !(iops >= min && iops <= max) {
return fmt.Errorf("VolumeError : allowed iops value for capacity(%d) is [%d-%d] ", capacity, min, max)
}
}
if capacity >= 8000 && capacity <= 9999 {
min := int64(500)
max := int64(48000)
if !(iops >= min && iops <= max) {
return fmt.Errorf("VolumeError : allowed iops value for capacity(%d) is [%d-%d] ", capacity, min, max)
}
}
if capacity >= 10000 && capacity <= 16000 {
min := int64(1000)
max := int64(48000)
if !(iops >= min && iops <= max) {
return fmt.Errorf("VolumeError : allowed iops value for capacity(%d) is [%d-%d] ", capacity, min, max)
}
}
}
return nil
}
func ResourceRouteModeValidate(diff *schema.ResourceDiff) error {
var lbtype, lbprofile string
if typeOk, ok := diff.GetOk(isLBType); ok {
lbtype = typeOk.(string)
}
if profileOk, ok := diff.GetOk(isLBProfile); ok {
lbprofile = profileOk.(string)
}
if rmOk, ok := diff.GetOk(isLBRouteMode); ok {
routeMode := rmOk.(bool)
if routeMode && lbtype != "private" {
return fmt.Errorf("'type' must be 'private', at present public load balancers are not supported with route mode enabled.")
}
if routeMode && lbprofile != "network-fixed" {
return fmt.Errorf("'profile' must be 'network-fixed', route mode is supported by private network load balancer.")
}
}
return nil
}
func FlattenRoleData(object []iampolicymanagementv1.Role, roleType string) []map[string]string {
var roles []map[string]string
for _, item := range object {
role := make(map[string]string)
role["name"] = *item.DisplayName
role["type"] = roleType
role["description"] = *item.Description
roles = append(roles, role)
}
return roles
}
func FlattenCustomRoleData(object []iampolicymanagementv1.CustomRole, roleType string) []map[string]string {
var roles []map[string]string
for _, item := range object {
role := make(map[string]string)
role["name"] = *item.DisplayName
role["type"] = roleType
role["description"] = *item.Description
roles = append(roles, role)
}
return roles
}
func flattenActions(object []iampolicymanagementv1.Role) map[string]interface{} {
actions := map[string]interface{}{
"reader": FlattenActionbyDisplayName("Reader", object),
"manager": FlattenActionbyDisplayName("Manager", object),
"reader_plus": FlattenActionbyDisplayName("ReaderPlus", object),
"writer": FlattenActionbyDisplayName("Writer", object),
}
return actions
}
func FlattenActionbyDisplayName(displayName string, object []iampolicymanagementv1.Role) []string {
var actionIDs []string
for _, role := range object {
if *role.DisplayName == displayName {
actionIDs = role.Actions
}
}
return actionIDs
}
func flattenCatalogRef(object schematics.CatalogInfo) map[string]interface{} {
catalogRef := map[string]interface{}{
"item_id": object.ItemID,
"item_name": object.ItemName,
"item_url": object.ItemURL,
"offering_version": object.OfferingVersion,
}
return catalogRef
}
// GetNext ...
func GetNext(next interface{}) string {
if reflect.ValueOf(next).IsNil() {
return ""
}
u, err := url.Parse(reflect.ValueOf(next).Elem().FieldByName("Href").Elem().String())
if err != nil {
return ""
}
q := u.Query()
return q.Get("start")
}
// GetNextIAM ...
func GetNextIAM(next interface{}) string {
if reflect.ValueOf(next).IsNil() {
return ""
}
u, err := url.Parse(reflect.ValueOf(next).Elem().String())
if err != nil {
return ""
}
q := u.Query()
return q.Get("pagetoken")
}
/* Return the default resource group */
func DefaultResourceGroup(meta interface{}) (string, error) {
rMgtClient, err := meta.(conns.ClientSession).ResourceManagerV2API()
if err != nil {
return "", err
}
defaultGrp := true
resourceGroupList := rg.ListResourceGroupsOptions{
Default: &defaultGrp,
}
grpList, resp, err := rMgtClient.ListResourceGroups(&resourceGroupList)
if err != nil || grpList == nil || grpList.Resources == nil {
return "", fmt.Errorf("[ERROR] Error retrieving resource group: %s %s", err, resp)
}
if len(grpList.Resources) <= 0 {
return "", fmt.Errorf("[ERROR] The default resource group could not be found. Make sure you have required permissions to access the resource group")
}
return *grpList.Resources[0].ID, nil
}
func FlattenKeyPolicies(policies []kp.Policy) []map[string]interface{} {
policyMap := make([]map[string]interface{}, 0, 1)
rotationMap := make([]map[string]interface{}, 0, 1)
dualAuthMap := make([]map[string]interface{}, 0, 1)
for _, policy := range policies {
log.Println("Policy CRN Data =============>", policy.CRN)
policyCRNData := strings.Split(policy.CRN, ":")
policyInstance := map[string]interface{}{
"id": policyCRNData[9],
"crn": policy.CRN,
"created_by": policy.CreatedBy,
"creation_date": (*(policy.CreatedAt)).String(),
"updated_by": policy.UpdatedBy,
"last_update_date": (*(policy.UpdatedAt)).String(),
}
if policy.Rotation != nil {
policyInstance["interval_month"] = policy.Rotation.Interval
rotationMap = append(rotationMap, policyInstance)
} else if policy.DualAuth != nil {
policyInstance["enabled"] = *(policy.DualAuth.Enabled)
dualAuthMap = append(dualAuthMap, policyInstance)
}
}
tempMap := map[string]interface{}{
"rotation": rotationMap,
"dual_auth_delete": dualAuthMap,
}
policyMap = append(policyMap, tempMap)
return policyMap
}
func FlattenKeyIndividualPolicy(policy string, policies []kp.Policy) []map[string]interface{} {
rotationMap := make([]map[string]interface{}, 0, 1)
dualAuthMap := make([]map[string]interface{}, 0, 1)
for _, policy := range policies {
log.Println("Policy CRN Data =============>", policy.CRN)
policyCRNData := strings.Split(policy.CRN, ":")
policyInstance := map[string]interface{}{
"id": policyCRNData[9],
"crn": policy.CRN,
"created_by": policy.CreatedBy,
"creation_date": (*(policy.CreatedAt)).String(),
"updated_by": policy.UpdatedBy,
"last_update_date": (*(policy.UpdatedAt)).String(),
}
if policy.Rotation != nil {
policyInstance["interval_month"] = policy.Rotation.Interval
rotationMap = append(rotationMap, policyInstance)
} else if policy.DualAuth != nil {
policyInstance["enabled"] = *(policy.DualAuth.Enabled)
dualAuthMap = append(dualAuthMap, policyInstance)
}
}
if policy == "rotation" {
return rotationMap
} else if policy == "dual_auth_delete" {
return dualAuthMap
}
return nil
}
// IgnoreSystemLabels returns non-IBM tag keys.
func IgnoreSystemLabels(labels map[string]string) map[string]string {
result := make(map[string]string)
for k, v := range labels {
if (strings.HasPrefix(k, SystemIBMLabelPrefix) ||
strings.HasPrefix(k, KubernetesLabelPrefix) ||
strings.HasPrefix(k, K8sLabelPrefix)) &&
!strings.Contains(k, "node-local-dns-enabled") {
continue
}
result[k] = v
}
return result
}
// ExpandCosConfig ..
func ExpandCosConfig(cos []interface{}) *kubernetesserviceapiv1.COSBucket {
if len(cos) == 0 || cos[0] == nil {
return &kubernetesserviceapiv1.COSBucket{}
}
in := cos[0].(map[string]interface{})
obj := &kubernetesserviceapiv1.COSBucket{
Bucket: PtrToString(in["bucket"].(string)),
Endpoint: PtrToString(in["endpoint"].(string)),
Region: PtrToString(in["region"].(string)),
}
return obj
}
// expandCosCredentials ..
func ExpandCosCredentials(cos []interface{}) *kubernetesserviceapiv1.COSAuthorization {
if len(cos) == 0 || cos[0] == nil {
return &kubernetesserviceapiv1.COSAuthorization{}
}
in := cos[0].(map[string]interface{})
obj := &kubernetesserviceapiv1.COSAuthorization{
AccessKeyID: PtrToString(in["access_key-id"].(string)),
SecretAccessKey: PtrToString(in["secret_access_key"].(string)),
}
return obj
}
func FlattenNlbConfigs(nlbData []containerv2.NlbVPCListConfig) []map[string]interface{} {
nlbConfigList := make([]map[string]interface{}, 0)
for _, n := range nlbData {
nlbConfig := make(map[string]interface{})
nlbConfig["secret_name"] = n.SecretName
nlbConfig["secret_status"] = n.SecretStatus
c := n.Nlb
nlbConfig["cluster"] = c.Cluster
nlbConfig["dns_type"] = c.DnsType
nlbConfig["lb_hostname"] = c.LbHostname
nlbConfig["nlb_ips"] = c.NlbIPArray
nlbConfig["nlb_sub_domain"] = c.NlbSubdomain
nlbConfig["secret_namespace"] = c.SecretNamespace
nlbConfig["type"] = c.Type
nlbConfigList = append(nlbConfigList, nlbConfig)
}
return nlbConfigList
}
// flattenHostLabels ..
func FlattenHostLabels(hostLabels []interface{}) map[string]string {
labels := make(map[string]string)
for _, v := range hostLabels {
parts := strings.Split(v.(string), ":")
if parts != nil {
labels[parts[0]] = parts[1]
}
}
return labels
}
func FlattenSatelliteZones(zones *schema.Set) []string {
zoneList := make([]string, zones.Len())
for i, v := range zones.List() {
zoneList[i] = fmt.Sprint(v)
}
return zoneList
}
// error object
type ServiceErrorResponse struct {
Message string
StatusCode int
Result interface{}
}
func BeautifyError(err error, response *core.DetailedResponse) *ServiceErrorResponse {
var (
statusCode int
result interface{}
)
if response != nil {
statusCode = response.StatusCode
result = response.Result
}
return &ServiceErrorResponse{
Message: err.Error(),
StatusCode: statusCode,
Result: result,
}
}
func (response *ServiceErrorResponse) String() string {
output, err := json.MarshalIndent(response, "", " ")
if err == nil {
return fmt.Sprintf("%+v\n", string(output))
}
return fmt.Sprintf("Error : %#v", response)
}
// IAM Policy Management
func GetResourceAttribute(name string, r iampolicymanagementv1.PolicyResource) *string {
for _, a := range r.Attributes {
if *a.Name == name {
return a.Value
}
}
return core.StringPtr("")
}
func GetSubjectAttribute(name string, s iampolicymanagementv1.PolicySubject) *string {
for _, a := range s.Attributes {
if *a.Name == name {
return a.Value
}
}
return core.StringPtr("")
}
func SetResourceAttribute(name *string, value *string, r []iampolicymanagementv1.ResourceAttribute) []iampolicymanagementv1.ResourceAttribute {
for _, a := range r {
if *a.Name == *name {
a.Value = value
return r
}
}
r = append(r, iampolicymanagementv1.ResourceAttribute{
Name: name,
Value: value,
Operator: core.StringPtr("stringEquals"),
})
return r
}
func FindRoleByName(supported []iampolicymanagementv1.PolicyRole, name string) (iampolicymanagementv1.PolicyRole, error) {
for _, role := range supported {
if role.DisplayName != nil {
if *role.DisplayName == name {
role.DisplayName = nil
return role, nil
}
}
}
supportedRoles := getSupportedRolesStr(supported)
return iampolicymanagementv1.PolicyRole{}, bmxerror.New("RoleDoesnotExist",
fmt.Sprintf("%s was not found. Valid roles are %s", name, supportedRoles))
}
func getSupportedRolesStr(supported []iampolicymanagementv1.PolicyRole) string {
rolesStr := ""
for index, role := range supported {
if index != 0 {
rolesStr += ", "
}
if role.DisplayName != nil {
rolesStr += *role.DisplayName
}
}
return rolesStr
}
func GetRolesFromRoleNames(roleNames []string, roles []iampolicymanagementv1.PolicyRole) ([]iampolicymanagementv1.PolicyRole, error) {
filteredRoles := []iampolicymanagementv1.PolicyRole{}
for _, roleName := range roleNames {
role, err := FindRoleByName(roles, roleName)
if err != nil {
return []iampolicymanagementv1.PolicyRole{}, err
}
role.DisplayName = nil
filteredRoles = append(filteredRoles, role)
}
return filteredRoles, nil
}
func MapRoleListToPolicyRoles(roleList iampolicymanagementv1.RoleList) []iampolicymanagementv1.PolicyRole {
var policyRoles []iampolicymanagementv1.PolicyRole
for _, customRole := range roleList.CustomRoles {
newPolicyRole := iampolicymanagementv1.PolicyRole{
DisplayName: customRole.DisplayName,
RoleID: customRole.CRN,
}
policyRoles = append(policyRoles, newPolicyRole)
}
for _, serviceRole := range roleList.ServiceRoles {
newPolicyRole := iampolicymanagementv1.PolicyRole{
DisplayName: serviceRole.DisplayName,
RoleID: serviceRole.CRN,
}
policyRoles = append(policyRoles, newPolicyRole)
}
for _, systemRole := range roleList.SystemRoles {
newPolicyRole := iampolicymanagementv1.PolicyRole{
DisplayName: systemRole.DisplayName,
RoleID: systemRole.CRN,
}
policyRoles = append(policyRoles, newPolicyRole)
}
return policyRoles
}
func GeneratePolicyOptions(d *schema.ResourceData, meta interface{}) (iampolicymanagementv1.CreatePolicyOptions, error) {
var serviceName string
var resourceType string
resourceAttributes := []iampolicymanagementv1.ResourceAttribute{}
if res, ok := d.GetOk("resources"); ok {
resources := res.([]interface{})
for _, resource := range resources {
r, _ := resource.(map[string]interface{})
if r, ok := r["service"]; ok && r != nil {
serviceName = r.(string)
if r.(string) != "" {
resourceAttr := iampolicymanagementv1.ResourceAttribute{
Name: core.StringPtr("serviceName"),
Value: core.StringPtr(r.(string)),
Operator: core.StringPtr("stringEquals"),
}
resourceAttributes = append(resourceAttributes, resourceAttr)
}
}
if r, ok := r["resource_instance_id"]; ok {
if r.(string) != "" {
resourceAttr := iampolicymanagementv1.ResourceAttribute{
Name: core.StringPtr("serviceInstance"),
Value: core.StringPtr(r.(string)),
Operator: core.StringPtr("stringEquals"),
}
resourceAttributes = append(resourceAttributes, resourceAttr)
}
}
if r, ok := r["region"]; ok {
if r.(string) != "" {
resourceAttr := iampolicymanagementv1.ResourceAttribute{
Name: core.StringPtr("region"),
Value: core.StringPtr(r.(string)),
Operator: core.StringPtr("stringEquals"),
}
resourceAttributes = append(resourceAttributes, resourceAttr)
}
}
if r, ok := r["resource_type"]; ok {
if r.(string) != "" {
resourceAttr := iampolicymanagementv1.ResourceAttribute{
Name: core.StringPtr("resourceType"),
Value: core.StringPtr(r.(string)),
Operator: core.StringPtr("stringEquals"),
}
resourceAttributes = append(resourceAttributes, resourceAttr)
}
}
if r, ok := r["resource"]; ok {
if r.(string) != "" {
resourceAttr := iampolicymanagementv1.ResourceAttribute{
Name: core.StringPtr("resource"),
Value: core.StringPtr(r.(string)),
Operator: core.StringPtr("stringEquals"),
}
resourceAttributes = append(resourceAttributes, resourceAttr)
}
}
if r, ok := r["resource_group_id"]; ok {
if r.(string) != "" {
resourceAttr := iampolicymanagementv1.ResourceAttribute{
Name: core.StringPtr("resourceGroupId"),
Value: core.StringPtr(r.(string)),
Operator: core.StringPtr("stringEquals"),
}
resourceAttributes = append(resourceAttributes, resourceAttr)
}
}
if r, ok := r["service_type"]; ok && r != nil {
if r.(string) != "" {
resourceAttr := iampolicymanagementv1.ResourceAttribute{
Name: core.StringPtr("serviceType"),
Value: core.StringPtr(r.(string)),
Operator: core.StringPtr("stringEquals"),
}
resourceAttributes = append(resourceAttributes, resourceAttr)
}
}
if r, ok := r["attributes"]; ok {
for k, v := range r.(map[string]interface{}) {
resourceAttributes = SetResourceAttribute(core.StringPtr(k), core.StringPtr(v.(string)), resourceAttributes)
}
}
}
}
if r, ok := d.GetOk("resource_attributes"); ok {
for _, attribute := range r.(*schema.Set).List() {
a := attribute.(map[string]interface{})
name := a["name"].(string)
value := a["value"].(string)
operator := a["operator"].(string)
at := iampolicymanagementv1.ResourceAttribute{
Name: &name,
Value: &value,
Operator: &operator,
}
resourceAttributes = append(resourceAttributes, at)
}
}
var serviceTypeResourceAttribute iampolicymanagementv1.ResourceAttribute
if d.Get("account_management").(bool) {
serviceTypeResourceAttribute = iampolicymanagementv1.ResourceAttribute{
Name: core.StringPtr("serviceType"),
Value: core.StringPtr("platform_service"),
Operator: core.StringPtr("stringEquals"),
}
resourceAttributes = append(resourceAttributes, serviceTypeResourceAttribute)
}
if len(resourceAttributes) == 0 {
serviceTypeResourceAttribute = iampolicymanagementv1.ResourceAttribute{
Name: core.StringPtr("serviceType"),
Value: core.StringPtr("service"),
Operator: core.StringPtr("stringEquals"),
}
resourceAttributes = append(resourceAttributes, serviceTypeResourceAttribute)
}
policyResources := iampolicymanagementv1.PolicyResource{
Attributes: resourceAttributes,
}
userDetails, err := meta.(conns.ClientSession).BluemixUserDetails()
if err != nil {
return iampolicymanagementv1.CreatePolicyOptions{}, err
}
iamPolicyManagementClient, err := meta.(conns.ClientSession).IAMPolicyManagementV1API()
if err != nil {
return iampolicymanagementv1.CreatePolicyOptions{}, err
}
serviceToQuery := serviceName
if serviceName == "" && // no specific service specified
!d.Get("account_management").(bool) && // not all account management services
resourceType != "resource-group" { // not to a resource group
serviceToQuery = "alliamserviceroles"
}
listRoleOptions := &iampolicymanagementv1.ListRolesOptions{
AccountID: &userDetails.UserAccount,
ServiceName: &serviceToQuery,
}
roleList, _, err := iamPolicyManagementClient.ListRoles(listRoleOptions)
if err != nil {
return iampolicymanagementv1.CreatePolicyOptions{}, err
}
roles := MapRoleListToPolicyRoles(*roleList)
policyRoles, err := GetRolesFromRoleNames(ExpandStringList(d.Get("roles").([]interface{})), roles)
if err != nil {
return iampolicymanagementv1.CreatePolicyOptions{}, err
}
return iampolicymanagementv1.CreatePolicyOptions{Roles: policyRoles, Resources: []iampolicymanagementv1.PolicyResource{policyResources}}, nil
}
func SetTags(d *schema.ResourceData) []iampolicymanagementv1.ResourceTag {
resourceAttributes := []iampolicymanagementv1.ResourceTag{}
if r, ok := d.GetOk("resource_tags"); ok {
for _, attribute := range r.(*schema.Set).List() {
a := attribute.(map[string]interface{})
name := a["name"].(string)
value := a["value"].(string)
operator := a["operator"].(string)
tag := iampolicymanagementv1.ResourceTag{
Name: &name,
Value: &value,
Operator: &operator,
}
resourceAttributes = append(resourceAttributes, tag)
}
return resourceAttributes
}
return []iampolicymanagementv1.ResourceTag{}
}
func GetIBMUniqueId(accountID, userEmail string, meta interface{}) (string, error) {
userManagement, err := meta.(conns.ClientSession).UserManagementAPI()
if err != nil {
return "", err
}
client := userManagement.UserInvite()
res, err := client.ListUsers(accountID)
if err != nil {
return "", err
}
for _, userInfo := range res {
//handling case-sensitivity in userEmail
if strings.ToLower(userInfo.Email) == strings.ToLower(userEmail) {
return userInfo.IamID, nil
}
}
return "", fmt.Errorf("User %s is not found under account %s", userEmail, accountID)
}
func ImmutableResourceCustomizeDiff(resourceList []string, diff *schema.ResourceDiff) error {
sateLocZone := "managed_from"
for _, rName := range resourceList {
if diff.Id() != "" && diff.HasChange(rName) && rName != sateLocZone {
return fmt.Errorf("'%s' attribute is immutable and can't be changed", rName)
} else if diff.Id() != "" && diff.HasChange(rName) && rName == sateLocZone {
o, n := diff.GetChange(rName)
old := o.(string)
new := n.(string)
if len(old) >= 3 && len(new) >= 3 {
if old[0:3] != new[0:3] {
return fmt.Errorf("'%s' attribute is immutable and can't be changed from %s to %s", rName, old, new)
}
}
}
}
return nil
}
func FlattenSatelliteWorkerPoolZones(zones *schema.Set) []kubernetesserviceapiv1.SatelliteCreateWorkerPoolZone {
zoneList := make([]kubernetesserviceapiv1.SatelliteCreateWorkerPoolZone, zones.Len())
for i, v := range zones.List() {
data := v.(map[string]interface{})
if v, ok := data["id"]; ok && v.(string) != "" {
zoneList[i].ID = sl.String(v.(string))
}
}
return zoneList
}
func FlattenSatelliteWorkerPools(list []kubernetesserviceapiv1.GetWorkerPoolResponse) []map[string]interface{} {
workerPools := make([]map[string]interface{}, len(list))
for i, workerPool := range list {
l := map[string]interface{}{
"id": *workerPool.ID,
"name": *workerPool.PoolName,
"isolation": *workerPool.Isolation,
"flavour": *workerPool.Flavor,
"size_per_zone": *workerPool.WorkerCount,
"state": *workerPool.Lifecycle.ActualState,
"default_worker_pool_labels": workerPool.Labels,
"host_labels": workerPool.HostLabels,
}
zones := workerPool.Zones
zonesConfig := make([]map[string]interface{}, len(zones))
for j, zone := range zones {
z := map[string]interface{}{
"zone": *zone.ID,
"worker_count": int(*zone.WorkerCount),
}
zonesConfig[j] = z
}
l["zones"] = zonesConfig
workerPools[i] = l
}
return workerPools
}
func FlattenSatelliteHosts(hostList []kubernetesserviceapiv1.MultishiftQueueNode) []map[string]interface{} {
hosts := make([]map[string]interface{}, len(hostList))
for i, host := range hostList {
l := map[string]interface{}{
"host_id": *host.ID,
"host_name": *host.Name,
"status": *host.Health.Status,
"ip_address": *host.Assignment.IpAddress,
"cluster_name": *host.Assignment.ClusterName,
"zone": *host.Assignment.Zone,
"host_labels": *&host.Labels,
}
hosts[i] = l
}
return hosts
}
func FlattenWorkerPoolHostLabels(hostLabels map[string]string) *schema.Set {
mapped := make([]string, len(hostLabels))
idx := 0
for k, v := range hostLabels {
mapped[idx] = fmt.Sprintf("%s:%v", k, v)
idx++
}
return NewStringSet(schema.HashString, mapped)
}
// KMS Private Endpoint
func updatePrivateURL(kpURL string) (string, error) {
var kmsEndpointURL string
if !strings.Contains(kpURL, "private") {
kmsEndpURL := strings.SplitAfter(kpURL, "https://")
if len(kmsEndpURL) == 2 {
kmsEndpointURL = kmsEndpURL[0] + "private." + kmsEndpURL[1] + "/api/v2/"
} else {
return "", fmt.Errorf("[ERROR] Error in Kms EndPoint URL ")
}
}
return kmsEndpointURL, nil
}
func FlattenSatelliteClusterZones(list []string) []map[string]interface{} {
zones := make([]map[string]interface{}, len(list))
for i, zone := range list {
l := map[string]interface{}{
"id": zone,
}
zones[i] = l
}
return zones
}
|
[
"\"IC_ENV_TAGS\"",
"\"IC_ENV_TAGS\"",
"\"IC_ENV_TAGS\""
] |
[] |
[
"IC_ENV_TAGS"
] |
[]
|
["IC_ENV_TAGS"]
|
go
| 1 | 0 | |
server/main.go
|
package main
import (
"flag"
"io"
"log"
"net/http"
"os"
"path"
"github.com/flosch/pongo2/v4"
"github.com/gorilla/sessions"
_ "github.com/iris-contrib/pongo2-addons/v4"
"github.com/labstack/echo-contrib/session"
"github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
)
// Renderer renders templates.
type Renderer struct {
TemplateDir string
Reload bool
TemplateCache map[string]*pongo2.Template
}
var (
debug bool
storeKey string
)
type ServerData struct {
Title string `json:"title"`
Data string `json:"data"`
}
// GetTemplate returns a template, loading it every time if reload is true.
func (r *Renderer) GetTemplate(name string, reload bool) *pongo2.Template {
filename := path.Join(r.TemplateDir, name)
if r.Reload {
return pongo2.Must(pongo2.FromFile(filename))
}
return pongo2.Must(pongo2.FromCache(filename))
}
// Render renders a pongo2 template.
func (r *Renderer) Render(w io.Writer, name string, data interface{}, c echo.Context) error {
template := r.GetTemplate(name, debug)
pctx := data.(pongo2.Context)
sess, err := session.Get("session", c)
if err == nil {
pctx["session"] = sess
}
return template.ExecuteWriter(pctx, w)
}
func init() {
// db, err := sqlx.Connect("postgres", "user=postgres dbname=logbook sslmode=disable")
// if err != nil {
// log.Fatal(err)
// }
//setDB(db)
storeKey := os.Getenv("ANIGRAM_STORE_KEY")
if storeKey == "" {
log.Fatal("The ANIGRAM_STORE_KEY is not set")
}
//gob.Register(SessionUser{})
}
func getIndex(c echo.Context) error {
return c.Render(http.StatusOK, "index.html", pongo2.Context{})
}
func getAbout(c echo.Context) error {
return echo.NewHTTPError(http.StatusNotImplemented)
}
func getNewAnimation(c echo.Context) error {
return echo.NewHTTPError(http.StatusNotImplemented)
}
func getAnimationEdit(c echo.Context) error {
return c.Render(http.StatusOK, "animation_edit.html", pongo2.Context{})
}
func getAnimation(c echo.Context) error {
s := ServerData{
Title: "Testing",
Data: "0000000000000000000000000000000000000000200000000000000220000000000000222000000000000002200000000000000220000000000000022000000000000002200000000000000220000000000000022000000000000002200000000000002222000000000000222200000000000022200000000000000000000000000000000000000000002222200000000002222222000000022220000220000000200000022200000000000002220000000000002222000000000222220000000002222220000000002200000000000000220000000000000222000022220000022222222220000000222222220000000000000000000000000000000000000000000000000000000000222000000000000222222000000002220002200000000222000022000000022000002200000000000022220000000000022220000000000000002000000000000000220000000222000222000000022200022200000002222222220000000000222220000000000000000000000000000000000000000000000000000000000000220000000000000222000000000000222000000000000222200000000000222020000000000022002000000000022200200000000002222222222000000022222222200000000000200000000000000020000000000000022200000000000002220000000000000222000000000000000000000000000000000000000000222222222000000022222222200000022200000220000002220000000000000222222222000000002222222220000000222200022200000000000002220000220000000222000022200000022200002220000002200000222220022220000002222222220000000000000000000000000000000000000000000000000000000000022222000000000222222220000000222222200000000220000000000000022000000000000002200222220000000220222222200000022220000222000002220000002200000022000002220000002220022222000000022222222000000002222222000000000022220000000000000000000000000000000000000000000000000000000000222222222200000222222222220000022222000220000000220000022000000000000022000000000000022200000000000002200000000000000220000000000000220000000000000222000000000000022200000000000022220000000000000222000000000000000000000000000000000000000000002222000000000002222220000000000222222200000000022000220000000002200022000000000222222000000000002222200000000002222022000000002200002220000000220000222000000022000022200000002222222200000000222222200000000000000000000000000000000000000000000000000000000000222222000000000022222200000000022222222000000022200002200000002220000220000000222000022000000002200002200000000222002220000000022222222000000000222220200000022000000220000002220000220000000022222222000000000222220000000000000000000000000000000000000000000000000000000000020000222200000022000222222000022200022222200000220022000022000022002000002200002200200000220000220020000022000022002000002200002200220002200000220022222220000222202222220000022220022220000002220000000000000000000000000000"}
return c.JSON(http.StatusOK, s)
}
func postAnimation(c echo.Context) error {
return echo.NewHTTPError(http.StatusNotImplemented)
}
func main() {
flag.BoolVar(&debug, "debug", false, "true to enable debug")
flag.Parse()
log.Printf("debug: %v\n", debug)
InitDB()
e := echo.New()
e.Use(middleware.Logger())
e.Use(session.Middleware(sessions.NewCookieStore([]byte(storeKey))))
e.Renderer = &Renderer{TemplateDir: "templates", Reload: debug, TemplateCache: make(map[string]*pongo2.Template)}
e.Static("/static", "static")
e.GET("/", getIndex)
e.GET("/about", getAbout)
e.GET("/animations/new", getNewAnimation)
e.GET("/animations/:uuid/edit", getAnimationEdit)
e.GET("/api/animations/:uuid", getAnimation)
e.POST("/api/animations", postAnimation)
e.Logger.Fatal(e.Start(":3000"))
}
|
[
"\"ANIGRAM_STORE_KEY\""
] |
[] |
[
"ANIGRAM_STORE_KEY"
] |
[]
|
["ANIGRAM_STORE_KEY"]
|
go
| 1 | 0 | |
android_env/components/simulators/emulator/emulator_launcher.py
|
# coding=utf-8
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Prepares and launches the emulator."""
import os
import subprocess
import time
from typing import Optional
from absl import logging
from android_env.components import errors
import grpc
from android_env.proto import emulator_controller_pb2
from android_env.proto import emulator_controller_pb2_grpc
from google.protobuf import empty_pb2
# Period in milliseconds to ping the Emulator gRPC server to keep the connection
# alive. If too frequent, we may get errors such as "Too many pings.", which can
# bring down the process.
_GRPC_KEEPALIVE_MS = 100000
class EmulatorLauncher():
"""Handles launching the emulator."""
def __init__(
self,
local_tmp_dir: str = '/tmp',
adb_port: Optional[int] = None,
adb_server_port: Optional[int] = None,
emulator_console_port: Optional[int] = None,
grpc_port: int = -1,
emulator_path: str = '',
android_sdk_root: str = '',
avd_name: str = '',
run_headless: bool = False,
kvm_device: str = '/dev/kvm',
gpu_mode: str = 'swiftshader_indirect',
android_avd_home: str = '',
startup_wait_time_sec: int = 300,
):
"""Installs required files locally and launches the emulator.
Args:
local_tmp_dir: Local directory for logs and maybe installing the AVD.
adb_port: ADB port for the Android device.
adb_server_port: Port of the ADB server deamon.
emulator_console_port: Port for telnet communication with the emulator.
grpc_port: Port for gRPC communication with the emulator.
emulator_path: Path to the emulator binary.
android_sdk_root: Root directory of the Android SDK.
avd_name: Name of the AVD.
run_headless: Whether to run in headless mode.
kvm_device: Path to the KVM device.
gpu_mode: GPU mode override. Supported values are listed at:
https://developer.android.com/studio/run/emulator-acceleration#accel-graphics
android_avd_home: Local directory for AVDs.
startup_wait_time_sec: Timeout for booting the emulator.
"""
self._local_tmp_dir = local_tmp_dir
self._adb_port = adb_port
self._adb_server_port = adb_server_port
self._emulator_console_port = emulator_console_port
self._emulator_path = emulator_path
self._android_sdk_root = android_sdk_root
self._avd_name = avd_name
self._run_headless = run_headless
self._kvm_device = kvm_device
self._gpu_mode = gpu_mode
self._android_avd_home = android_avd_home
self._startup_wait_time_sec = startup_wait_time_sec
self._grpc_port = grpc_port
self._emulator = None
self._emulator_output = None
self._emulator_stub = None
self._is_closed = False
def launch(self) -> None:
"""Launches the emulator."""
logging.info('Booting the emulator [%s]', self._emulator_path)
# Set necessary environment variables.
base_lib_dir = self._emulator_path[:-8] + 'lib64/'
ld_library_path = ':'.join([
base_lib_dir + 'x11/',
base_lib_dir + 'qt/lib/',
base_lib_dir + 'gles_swiftshader/',
base_lib_dir
])
extra_env_vars = {
'ANDROID_HOME': '',
'ANDROID_SDK_ROOT': self._android_sdk_root,
'ANDROID_AVD_HOME': self._android_avd_home,
'ANDROID_EMULATOR_KVM_DEVICE': self._kvm_device,
'ANDROID_ADB_SERVER_PORT': str(self._adb_server_port),
'LD_LIBRARY_PATH': ld_library_path,
'QT_DEBUG_PLUGINS': '1',
'QT_XKB_CONFIG_ROOT': str(self._emulator_path[:-8] + 'qt_config/'),
}
logging.info('extra_env_vars: %s', str(extra_env_vars))
env_vars = dict(os.environ).copy()
env_vars.update(extra_env_vars)
# Compile command.
grpc_port = ['-grpc', str(self._grpc_port)] if self._grpc_port >= 0 else []
run_headless = ['-no-skin', '-no-window'] if self._run_headless else []
ports = ['-ports', '%s,%s' % (self._emulator_console_port, self._adb_port)]
command = [
self._emulator_path,
'-no-snapshot',
'-gpu', self._gpu_mode,
'-no-audio',
'-verbose',
'-avd', self._avd_name,
] + grpc_port + run_headless + ports
logging.info('Emulator launch command: %s', ' '.join(command))
# Prepare logfile.
emulator_logfile = os.path.join(self._local_tmp_dir, 'emulator_output')
self._emulator_output = open(emulator_logfile, 'wb')
# Spawn the emulator process.
self._emulator = subprocess.Popen(
command,
env=env_vars,
stdout=self._emulator_output,
stderr=self._emulator_output)
self._emulator_stub = EmulatorLauncher.create_emulator_stub(self._grpc_port)
# Wait for the emulator to boot.
start_time = time.time()
deadline = start_time + self._startup_wait_time_sec
success = False
while time.time() < deadline:
emu_status = self._emulator_stub.getStatus(empty_pb2.Empty())
logging.info('Waiting for emulator to start. Emulator uptime: %rms',
emu_status.uptime)
if emu_status.booted:
success = True
break
time.sleep(5.0)
elapsed_time = time.time() - start_time
if not success:
raise errors.SimulatorCrashError(
'The emulator failed to boot after %r seconds' %
self._startup_wait_time_sec)
logging.info('Done booting the emulator (in %f seconds).', elapsed_time)
def restart(self) -> None:
logging.info('Restarting the emulator...')
self._kill_emulator_process()
self.launch()
logging.info('Done restarting the emulator.')
@classmethod
def create_emulator_stub(
cls,
grpc_port: int,
use_async: bool = False,
) -> emulator_controller_pb2_grpc.EmulatorControllerStub:
"""Returns a stub to the EmulatorController service."""
logging.info('Creating gRPC channel to the emulator on port %r', grpc_port)
port = f'localhost:{grpc_port}'
options = [('grpc.max_send_message_length', -1),
('grpc.max_receive_message_length', -1),
('grpc.keepalive_time_ms', _GRPC_KEEPALIVE_MS)]
creds = grpc.local_channel_credentials()
if use_async:
channel = grpc.aio.secure_channel(port, creds, options=options)
else:
channel = grpc.secure_channel(port, creds, options=options)
grpc.channel_ready_future(channel).result() # Wait for channel to be ready.
logging.info('Added gRPC channel for the Emulator on port %s', port)
return emulator_controller_pb2_grpc.EmulatorControllerStub(channel)
def get_emulator_stub(
self) -> emulator_controller_pb2_grpc.EmulatorControllerStub:
"""Returns the EmulatorController stub for the launched emulator."""
return self._emulator_stub
def _kill_emulator_process(self) -> None:
"""Shuts down the emulator process."""
if self._emulator:
logging.info('Killing the emulator process...')
self._emulator_stub.setVmState(
emulator_controller_pb2.VmRunState(
state=emulator_controller_pb2.VmRunState.RunState.SHUTDOWN))
logging.info('Will wait 30s for it to finish gracefully...')
try:
self._emulator.wait(timeout=30.0)
except subprocess.TimeoutExpired:
logging.exception(
'The emulator process did not finish after 30s. '
'returncode: %s. Will now try to kill() it.',
self._emulator.returncode)
self._emulator.kill()
self._emulator = None
self._emulator_output.close()
logging.info('Done killing the emulator process.')
def close(self):
"""Clean up launcher files and processes."""
if not self._is_closed:
self._kill_emulator_process()
self._is_closed = True
def __del__(self):
self.close()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
python/paddle/fluid/__init__.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
# import all class inside framework into fluid module
import framework
from framework import *
# import all class inside executor into fluid module
import executor
from executor import *
import io
import evaluator
import initializer
import layers
import nets
import optimizer
import backward
import regularizer
import average
import metrics
from param_attr import ParamAttr, WeightNormParamAttr
from data_feeder import DataFeeder
from core import LoDTensor, CPUPlace, CUDAPlace, CUDAPinnedPlace
from distribute_transpiler import DistributeTranspiler
from distribute_transpiler_simple import SimpleDistributeTranspiler
from concurrency import (Go, make_channel, channel_send, channel_recv,
channel_close, Select)
import clip
from memory_optimization_transpiler import memory_optimize, release_memory
import profiler
import unique_name
import recordio_writer
from parallel_executor import ParallelExecutor
Tensor = LoDTensor
__all__ = framework.__all__ + executor.__all__ + concurrency.__all__ + [
'io',
'initializer',
'layers',
'nets',
'optimizer',
'learning_rate_decay',
'backward',
'regularizer',
'LoDTensor',
'CPUPlace',
'CUDAPlace',
'CUDAPinnedPlace',
'Tensor',
'ParamAttr',
'WeightNormParamAttr',
'DataFeeder',
'clip',
'SimpleDistributeTranspiler',
'DistributeTranspiler',
'memory_optimize',
'release_memory',
'profiler',
'unique_name',
'recordio_writer',
'ParallelExecutor',
]
def __bootstrap__():
"""
Enable reading gflags from environment variables.
Returns:
None
"""
import sys
import core
import os
in_test = 'unittest' in sys.modules
try:
num_threads = int(os.getenv('OMP_NUM_THREADS', '1'))
except ValueError:
num_threads = 1
if num_threads > 1:
print(
'WARNING: OMP_NUM_THREADS set to {0}, not 1. The computation '
'speed will not be optimized if you use data parallel. It will '
'fail if this PaddlePaddle binary is compiled with OpenBlas since'
' OpenBlas does not support multi-threads.'.format(num_threads),
file=sys.stderr)
print('PLEASE USE OMP_NUM_THREADS WISELY.', file=sys.stderr)
os.environ['OMP_NUM_THREADS'] = str(num_threads)
read_env_flags = [
'use_pinned_memory', 'check_nan_inf', 'benchmark', 'warpctc_dir'
]
if core.is_compiled_with_cuda():
read_env_flags += ['fraction_of_gpu_memory_to_use']
core.init_gflags([sys.argv[0]] +
["--tryfromenv=" + ",".join(read_env_flags)])
core.init_glog(sys.argv[0])
# don't init_p2p when in unittest to save time.
core.init_devices(not in_test)
# TODO(panyx0718): Avoid doing complex initialization logic in __init__.py.
# Consider paddle.init(args) or paddle.main(args)
layers.monkey_patch_variable()
__bootstrap__()
|
[] |
[] |
[
"OMP_NUM_THREADS"
] |
[]
|
["OMP_NUM_THREADS"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"fmt"
"os"
"os/exec"
"strings"
"github.com/dihedron/plugins/log"
"github.com/dihedron/plugins/shared"
"github.com/hashicorp/go-plugin"
"go.uber.org/zap"
)
func main() {
zap.L().Sync()
logger := log.NewHCLogAdapter(nil)
// We don't want to see the plugin logs.
// log.SetOutput(ioutil.Discard)
// We're a host. Start by launching the plugin process.
client := plugin.NewClient(&plugin.ClientConfig{
Logger: logger,
HandshakeConfig: shared.Handshake,
Plugins: shared.PluginMap,
Cmd: exec.Command("sh", "-c", os.Getenv("KV_PLUGIN")),
AllowedProtocols: []plugin.Protocol{
plugin.ProtocolNetRPC, plugin.ProtocolGRPC},
})
defer client.Kill()
// Connect via RPC
rpcClient, err := client.Client()
if err != nil {
fmt.Println("Error:", err.Error())
os.Exit(1)
}
// Request the plugin
var raw interface{}
if strings.Contains(os.Getenv("KV_PLUGIN"), "grpc") {
raw, err = rpcClient.Dispense("kv_grpc")
} else if strings.Contains(os.Getenv("KV_PLUGIN"), "netrpc") {
raw, err = rpcClient.Dispense("kv_netrpc")
}
if err != nil {
fmt.Println("Error:", err.Error())
os.Exit(1)
}
// We should have a KV store now! This feels like a normal interface
// implementation but is in fact over an RPC connection.
kv := raw.(shared.KV)
os.Args = os.Args[1:]
switch os.Args[0] {
case "get":
result, err := kv.Get(os.Args[1])
if err != nil {
fmt.Println("Error:", err.Error())
os.Exit(1)
}
fmt.Println(string(result))
case "put":
err := kv.Put(os.Args[1], []byte(os.Args[2]))
if err != nil {
fmt.Println("Error:", err.Error())
os.Exit(1)
}
default:
fmt.Printf("Please only use 'get' or 'put', given: %q", os.Args[0])
os.Exit(1)
}
os.Exit(0)
}
|
[
"\"KV_PLUGIN\"",
"\"KV_PLUGIN\"",
"\"KV_PLUGIN\""
] |
[] |
[
"KV_PLUGIN"
] |
[]
|
["KV_PLUGIN"]
|
go
| 1 | 0 | |
src/cmd/objdump/objdump_test.go
|
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"flag"
"go/build"
"internal/testenv"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"testing"
)
func buildObjdump(t *testing.T) (tmp, exe string) {
testenv.MustHaveGoBuild(t)
tmp, err := ioutil.TempDir("", "TestObjDump")
if err != nil {
t.Fatal("TempDir failed: ", err)
}
exe = filepath.Join(tmp, "testobjdump.exe")
out, err := exec.Command(testenv.GoToolPath(t), "build", "-o", exe, "cmd/objdump").CombinedOutput()
if err != nil {
os.RemoveAll(tmp)
t.Fatalf("go build -o %v cmd/objdump: %v\n%s", exe, err, string(out))
}
return
}
var x86Need = []string{
"fmthello.go:6",
"TEXT main.main(SB)",
"JMP main.main(SB)",
"CALL fmt.Println(SB)",
"RET",
}
var armNeed = []string{
"fmthello.go:6",
"TEXT main.main(SB)",
//"B.LS main.main(SB)", // TODO(rsc): restore; golang.org/issue/9021
"BL fmt.Println(SB)",
"RET",
}
var ppcNeed = []string{
"fmthello.go:6",
"TEXT main.main(SB)",
"BR main.main(SB)",
"BL fmt.Println(SB)",
"RET",
}
var target = flag.String("target", "", "test disassembly of `goos/goarch` binary")
// objdump is fully cross platform: it can handle binaries
// from any known operating system and architecture.
// We could in principle add binaries to testdata and check
// all the supported systems during this test. However, the
// binaries would be about 1 MB each, and we don't want to
// add that much junk to the hg repository. Instead, build a
// binary for the current system (only) and test that objdump
// can handle that one.
func testDisasm(t *testing.T, flags ...string) {
tmp, exe := buildObjdump(t)
defer os.RemoveAll(tmp)
goarch := runtime.GOARCH
if *target != "" {
f := strings.Split(*target, "/")
if len(f) != 2 {
t.Fatalf("-target argument must be goos/goarch")
}
defer os.Setenv("GOOS", os.Getenv("GOOS"))
defer os.Setenv("GOARCH", os.Getenv("GOARCH"))
os.Setenv("GOOS", f[0])
os.Setenv("GOARCH", f[1])
goarch = f[1]
}
hello := filepath.Join(tmp, "hello.exe")
args := []string{"build", "-o", hello}
args = append(args, flags...)
args = append(args, "testdata/fmthello.go")
out, err := exec.Command(testenv.GoToolPath(t), args...).CombinedOutput()
if err != nil {
t.Fatalf("go build fmthello.go: %v\n%s", err, out)
}
need := []string{
"fmthello.go:6",
"TEXT main.main(SB)",
}
switch goarch {
case "amd64", "386":
need = append(need, x86Need...)
case "arm":
need = append(need, armNeed...)
case "ppc64", "ppc64le":
need = append(need, ppcNeed...)
}
out, err = exec.Command(exe, "-s", "main.main", hello).CombinedOutput()
if err != nil {
t.Fatalf("objdump fmthello.exe: %v\n%s", err, out)
}
text := string(out)
ok := true
for _, s := range need {
if !strings.Contains(text, s) {
t.Errorf("disassembly missing '%s'", s)
ok = false
}
}
if !ok {
t.Logf("full disassembly:\n%s", text)
}
}
func TestDisasm(t *testing.T) {
switch runtime.GOARCH {
case "arm64":
t.Skipf("skipping on %s, issue 10106", runtime.GOARCH)
case "mips64", "mips64le":
t.Skipf("skipping on %s, issue 12559", runtime.GOARCH)
case "s390x":
t.Skipf("skipping on %s, issue 15255", runtime.GOARCH)
}
testDisasm(t)
}
func TestDisasmExtld(t *testing.T) {
switch runtime.GOOS {
case "plan9", "windows":
t.Skipf("skipping on %s", runtime.GOOS)
}
switch runtime.GOARCH {
case "ppc64", "ppc64le":
t.Skipf("skipping on %s, no support for external linking, issue 9038", runtime.GOARCH)
case "arm64":
t.Skipf("skipping on %s, issue 10106", runtime.GOARCH)
case "mips64", "mips64le":
t.Skipf("skipping on %s, issue 12559 and 12560", runtime.GOARCH)
case "s390x":
t.Skipf("skipping on %s, issue 15255", runtime.GOARCH)
}
// TODO(jsing): Reenable once openbsd/arm has external linking support.
if runtime.GOOS == "openbsd" && runtime.GOARCH == "arm" {
t.Skip("skipping on openbsd/arm, no support for external linking, issue 10619")
}
if !build.Default.CgoEnabled {
t.Skip("skipping because cgo is not enabled")
}
testDisasm(t, "-ldflags=-linkmode=external")
}
|
[
"\"GOOS\"",
"\"GOARCH\""
] |
[] |
[
"GOARCH",
"GOOS"
] |
[]
|
["GOARCH", "GOOS"]
|
go
| 2 | 0 | |
first/settings.py
|
from pathlib import Path
import os
from decouple import config
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
SECRET_KEY = config("SECRET_KEY")
DEBUG = config('DEBUG' , cast=bool)
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = [
'whitenoise.runserver_nostatic',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
'users.apps.UsersConfig',
'crispy_forms',
'ibm.apps.IbmConfig',
'storages',
"corsheaders",
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
"corsheaders.middleware.CorsMiddleware",
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'first.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'first.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('POSTGRES_USER'),
'USER': config('POSTGRES_USER'),
'PASSWORD' : config('POSTGRES_PASSWORD'),
'HOST' : config('POSTGRES_HOST'),
'PORT' : config('POSTGRES_PORT')
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Tehran'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATIC_ROOT = '/vol/web/static'
# STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
LOGIN_REDIRECT_URL = 'blog-home'
LOGIN_URL = 'login'
MEDIA_ROOT = os.path.join(BASE_DIR , 'media')
MEDIA_URL = '/media/'
# IBM text to speech
IBM_TEXT_TO_SPEECH_API_KEY = config("IBM_TEXT_TO_SPEECH_API_KEY")
IBM_TEXT_TO_SPEECH_URL = config("IBM_TEXT_TO_SPEECH_URL")
READER_VOICE = config("READER_VOICE")
# IBM NLU
IBM_NLU_API_KEY = config("IBM_NLU_API_KEY")
IBM_NLU_URL = config("IBM_NLU_URL")
# ARVAN
ARVAN_STORAGE_ENDPOINT_URL = config("ARVAN_STORAGE_ENDPOINT_URL")
ARVAN_STORAGE_ACCESS_KEY_ID = config("ARVAN_STORAGE_ACCESS_KEY_ID")
ARVAN_STORAGE_SECRET_ACCESS_KEY = config("ARVAN_STORAGE_SECRET_ACCESS_KEY")
ARVAN_STORAGE_BUCKET_NAME = config("ARVAN_STORAGE_BUCKET_NAME")
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
AWS_S3_ACCESS_KEY_ID = config("ARVAN_STORAGE_ACCESS_KEY_ID")
AWS_S3_SECRET_ACCESS_KEY = config("ARVAN_STORAGE_SECRET_ACCESS_KEY")
AWS_STORAGE_BUCKET_NAME = config("ARVAN_STORAGE_BUCKET_NAME")
AWS_S3_ENDPOINT_URL = config("ARVAN_STORAGE_ENDPOINT_URL")
AWS_DEFAULT_ACL = 'public-read'
# EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# EMAIL_HOST = 'stmp.gmail.com'
# EMAIL_PORT = 587
# EMAIL_USE_TLS = True
# EMAIL_HOST_USER = os.environ.get('EMAIL_USER')
# EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_PASS')
|
[] |
[] |
[
"EMAIL_PASS",
"EMAIL_USER"
] |
[]
|
["EMAIL_PASS", "EMAIL_USER"]
|
python
| 2 | 0 | |
vendor/github.com/argoproj/argo-cd/util/config/exec.go
|
package config
import (
"os"
"time"
"github.com/argoproj/pkg/exec"
)
var timeout time.Duration
func init() {
initTimeout()
}
func initTimeout() {
var err error
timeout, err = time.ParseDuration(os.Getenv("ARGOCD_EXEC_TIMEOUT"))
if err != nil {
timeout = 90 * time.Second
}
}
func CmdOpts() exec.CmdOpts {
return exec.CmdOpts{Timeout: timeout}
}
|
[
"\"ARGOCD_EXEC_TIMEOUT\""
] |
[] |
[
"ARGOCD_EXEC_TIMEOUT"
] |
[]
|
["ARGOCD_EXEC_TIMEOUT"]
|
go
| 1 | 0 | |
amogus/challenge/src/player_voter.py
|
#!/usr/bin/env python3
import asyncio
import argparse
import logging
import socket
import binascii
import time
import json
import aioconsole
import codecs
import sys
import struct
import os
from binascii import hexlify
from construct import *
from ctypes import sizeof, c_int
FLAG = os.getenv("FLAG", "flag{this_is_very_suspicious}")
INITIAL_KEYS = [b"", b"", b"05f1e0f77ce8fbb75532e8023056e4e6"]
MESSAGE_HEADER_LEN = sizeof(c_int) + 32 + sizeof(c_int)
SERVER_PORT = 31337
VOTER_NUM = 2
LOG_LEVEL = logging.ERROR
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=LOG_LEVEL)
logger = logging.getLogger(__name__)
def pwfl(out):
if type(out) == bytes:
out = out.decode()
sys.stdout.write(out+'\n')
sys.stdout.flush()
async def handle_input(transport):
while True:
line = await aioconsole.ainput("")
try:
line = line.strip()
line = codecs.decode(line, "hex")
except binascii.Error as e:
if str(e) == "decoding with 'hex' codec failed (Error: Odd-length string)":
sys.stdout.write("You must provide an even length string...")
sys.stdout.flush()
elif str(e) == "decoding with 'hex' codec failed (Error: Non-hexadecimal digit found)":
sys.stdout.write("Non-hex digit found...")
sys.stdout.flush()
continue
except Exception as e:
sys.stdout.write(f"Woah that's not supposed to happen. Please tell an admin: {str(e)}\n")
sys.stdout.write(f"Exiting...")
sys.stdout.flush()
exit(-1)
transport.write(line)
class VoterClientProtocol(asyncio.Protocol):
def __init__(self, on_con_lost, voter_num, loop):
self.voter = Voter(voter_num)
self.transport = None
self.on_con_lost = on_con_lost
self.loop = loop
def connection_made(self, transport):
self.voter.initialize_transport(transport) # Initialize transport object in voter object
peername = transport.get_extra_info('peername') # Get the IP and port we just connected to
logging.debug(f"Made new connection {peername[0]} : {peername[1]}\n")
self.transport = transport # Intialize transport object so we can send stuff outside this function
self.loop.create_task(handle_input(transport))
return
def data_received(self, data):
while len(data) > 0:
if len(data) >= MESSAGE_HEADER_LEN:
try:
msgHeader = self.voter.GenericMessageHeader.parse(data[0:MESSAGE_HEADER_LEN])
except StreamError as e:
logging.warning(f"Error while trying to parse message from interface: {str(e)}")
return
logger.debug(f"len: {len(data)} | Data: {data.hex()}\n")
full_msg = data[0:MESSAGE_HEADER_LEN+msgHeader.mlen]
header = data[0:MESSAGE_HEADER_LEN]
msg = data[MESSAGE_HEADER_LEN:MESSAGE_HEADER_LEN+msgHeader.mlen]
data = data[MESSAGE_HEADER_LEN+msgHeader.mlen:]
logger.debug(f"RECV from interface: [directive:{msgHeader.directive}][key:{hexlify(msgHeader.key)}][mlen:{msgHeader.mlen}] msg:{msg.hex()} | data: {data.hex()}")
self.loop.create_task((self.voter.handle_msg(msgHeader, header, msg, full_msg)))
logger.debug(f"End: {len(data)}")
else:
logger.debug(f"Received too small of a message: {hexlify(data)}...")
def connection_lost(self, exc):
logger.info('The server closed the connection')
self.on_con_lost.set_result(True)
class Voter():
# Directive Constants
POSE_QUESTION = 0
VOTER_VOTE = 1
MSG_RESPONSE = 2
VOTER_LOG_CACHE = 3
VOTER_LOG_SAVE = 4
VOTER_READY = 5
INVALID_DIRECTIVE = 99
MSG_SUCCESS = 1
def __init__(self, p_num):
self.transport = None
self.answers_file = None
self.answer = [0.0, 0.0, 0.0, 0.0]
self.last_commit = b""
self.last_answer = b""
self.count = 0
self.answers = None
self.lock = asyncio.Lock()
with open("answer_index.txt", "r") as f:
self.answers = json.load(f)
self.GenericMessageHeader = Struct(
'directive' / Int32ul,
'key' / Bytes(32),
'mlen' / Int32ul
)
def initialize_transport(self, t):
self.transport = t
# Determine what the interface is asking for and use its message to provide it the information it needs
async def handle_msg(self, msgHeader, header, msg, full_msg):
logger.debug(f"Voter_PLAYER -- Got directive: {hex(msgHeader.directive)} | msg: {hexlify(msg)} | data len: {msgHeader.mlen}")
if msgHeader.directive == Voter.POSE_QUESTION and msgHeader.key == b"\xFF"*32:
if(self.last_commit != self.last_answer):
logger.debug(f"Last commit: {self.last_commit} | last_answer: {self.last_answer}")
self.count += 1
else:
self.count = 0
if(self.count > 5):
pwfl("\n\nOkay where the heck are we going? Guys?")
pwfl(FLAG)
time.sleep(1)
exit(0)
key = msg.hex()
if(key in self.answers):
self.last_answer = struct.pack("<dddd", *self.answers[key])
else:
self.last_answer = struct.pack("<dddd", *[0.992, 0.123, 0.0, 0.0])
if msgHeader.directive == Voter.VOTER_VOTE:
if(msgHeader.key == b"\xFF"*32): # Got confirmation of commit
self.last_commit = msg
sys.stdout.write(full_msg.hex() + "\n")
sys.stdout.flush()
return
async def main():
parser = argparse.ArgumentParser(description="Sping up an instance of a voter for the Among Us challenge")
parser.add_argument('-i',
'--identity',
metavar="1234",
type=int,
help="Voter number",
default=2)
parser.add_argument('-p',
'--port',
metavar='65536',
default=31337,
type=int,
help='Port to connect to interface on')
args = parser.parse_args()
loop = asyncio.get_running_loop()
on_con_lost = loop.create_future()
while True:
try:
transport, protocol = await loop.create_connection(
lambda: VoterClientProtocol(on_con_lost, args.identity, loop),
'localhost', SERVER_PORT)
break
except socket.error as e:
logging.warning(f"Interface is not up... got {str(e)}")
logging.warning("Trying again in 3 seconds...")
time.sleep(3)
key = INITIAL_KEYS[2]
pwfl("************Among Us************")
pwfl(f"Your key: {key.decode()}")
try:
await on_con_lost
except KeyboardInterrupt:
transport.close()
finally:
transport.close()
if __name__ == "__main__":
asyncio.run(main())
|
[] |
[] |
[
"FLAG"
] |
[]
|
["FLAG"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"fmt"
"log"
"net/http"
"net/url"
"os"
"github.com/gin-gonic/gin"
"github.com/line/line-bot-sdk-go/linebot"
)
func main() {
port := os.Getenv("PORT")
if port == "" {
log.Fatal("$PORT must be set")
}
router := gin.New()
router.Use(gin.Logger())
router.LoadHTMLGlob("templates/*.tmpl.html")
router.Static("/static", "static")
router.GET("/", func(c *gin.Context) {
c.HTML(http.StatusOK, "index.tmpl.html", nil)
})
router.POST("/callback", func(c *gin.Context) {
proxyURL, _ := url.Parse(os.Getenv("FIXIE_URL"))
client := &http.Client{
Transport: &http.Transport{Proxy: http.ProxyURL(proxyURL)},
}
bot, err := linebot.New(os.Getenv("CHANNEL_SECRET"), os.Getenv("CHANNEL_ACCESS_TOKEN"), linebot.WithHTTPClient(client))
if err != nil {
fmt.Println(err)
return
}
events, err := bot.ParseRequest(c.Request)
if err != nil {
if err == linebot.ErrInvalidSignature {
fmt.Println(err)
}
return
}
for _, event := range events {
if event.Type == linebot.EventTypeMessage {
switch message := event.Message.(type) {
case *linebot.TextMessage:
if _, err = bot.ReplyMessage(event.ReplyToken, linebot.NewTextMessage(message.Text)).Do(); err != nil {
log.Print(err)
}
}
}
// content := event.Context()
// if content != nil && content.IsMessage && content.ContentType == linebot.ContentTypeText {
// text, err := content.TextContent()
// res, err := bot.SendText([]string{content.Form}, "OK "+text.Text)
// if err != nil {
// fmt.Println(res)
// }
// }
}
})
router.Run(":" + port)
}
|
[
"\"PORT\"",
"\"FIXIE_URL\"",
"\"CHANNEL_SECRET\"",
"\"CHANNEL_ACCESS_TOKEN\""
] |
[] |
[
"PORT",
"CHANNEL_ACCESS_TOKEN",
"FIXIE_URL",
"CHANNEL_SECRET"
] |
[]
|
["PORT", "CHANNEL_ACCESS_TOKEN", "FIXIE_URL", "CHANNEL_SECRET"]
|
go
| 4 | 0 | |
l2geth/cmd/utils/customflags.go
|
// Copyright 2015 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package utils
import (
"encoding"
"errors"
"flag"
"math/big"
"os"
"os/user"
"path"
"strings"
"github.com/ethereum-optimism/optimism/l2geth/common/math"
"gopkg.in/urfave/cli.v1"
)
// Custom type which is registered in the flags library which cli uses for
// argument parsing. This allows us to expand Value to an absolute path when
// the argument is parsed
type DirectoryString string
func (s *DirectoryString) String() string {
return string(*s)
}
func (s *DirectoryString) Set(value string) error {
*s = DirectoryString(expandPath(value))
return nil
}
// Custom cli.Flag type which expand the received string to an absolute path.
// e.g. ~/.ethereum -> /home/username/.ethereum
type DirectoryFlag struct {
Name string
Value DirectoryString
Usage string
EnvVar string
}
func (f DirectoryFlag) String() string {
return cli.FlagStringer(f)
}
// called by cli library, grabs variable from environment (if in env)
// and adds variable to flag set for parsing.
func (f DirectoryFlag) Apply(set *flag.FlagSet) {
eachName(f.Name, func(name string) {
set.Var(&f.Value, f.Name, f.Usage)
})
}
func (f DirectoryFlag) GetName() string {
return f.Name
}
func (f *DirectoryFlag) Set(value string) {
f.Value.Set(value)
}
func eachName(longName string, fn func(string)) {
parts := strings.Split(longName, ",")
for _, name := range parts {
name = strings.Trim(name, " ")
fn(name)
}
}
type TextMarshaler interface {
encoding.TextMarshaler
encoding.TextUnmarshaler
}
// textMarshalerVal turns a TextMarshaler into a flag.Value
type textMarshalerVal struct {
v TextMarshaler
}
func (v textMarshalerVal) String() string {
if v.v == nil {
return ""
}
text, _ := v.v.MarshalText()
return string(text)
}
func (v textMarshalerVal) Set(s string) error {
return v.v.UnmarshalText([]byte(s))
}
// TextMarshalerFlag wraps a TextMarshaler value.
type TextMarshalerFlag struct {
Name string
Value TextMarshaler
Usage string
EnvVar string
}
func (f TextMarshalerFlag) GetName() string {
return f.Name
}
func (f TextMarshalerFlag) String() string {
return cli.FlagStringer(f)
}
func (f TextMarshalerFlag) Apply(set *flag.FlagSet) {
eachName(f.Name, func(name string) {
set.Var(textMarshalerVal{f.Value}, f.Name, f.Usage)
})
}
// GlobalTextMarshaler returns the value of a TextMarshalerFlag from the global flag set.
func GlobalTextMarshaler(ctx *cli.Context, name string) TextMarshaler {
val := ctx.GlobalGeneric(name)
if val == nil {
return nil
}
return val.(textMarshalerVal).v
}
// BigFlag is a command line flag that accepts 256 bit big integers in decimal or
// hexadecimal syntax.
type BigFlag struct {
Name string
Value *big.Int
Usage string
EnvVar string
}
// bigValue turns *big.Int into a flag.Value
type bigValue big.Int
func (b *bigValue) String() string {
if b == nil {
return ""
}
return (*big.Int)(b).String()
}
func (b *bigValue) Set(s string) error {
int, ok := math.ParseBig256(s)
if !ok {
return errors.New("invalid integer syntax")
}
*b = (bigValue)(*int)
return nil
}
func (f BigFlag) GetName() string {
return f.Name
}
func (f BigFlag) String() string {
return cli.FlagStringer(f)
}
func (f BigFlag) Apply(set *flag.FlagSet) {
eachName(f.Name, func(name string) {
set.Var((*bigValue)(f.Value), f.Name, f.Usage)
})
}
// GlobalBig returns the value of a BigFlag from the global flag set.
func GlobalBig(ctx *cli.Context, name string) *big.Int {
val := ctx.GlobalGeneric(name)
if val == nil {
return nil
}
return (*big.Int)(val.(*bigValue))
}
// Expands a file path
// 1. replace tilde with users home dir
// 2. expands embedded environment variables
// 3. cleans the path, e.g. /a/b/../c -> /a/c
// Note, it has limitations, e.g. ~someuser/tmp will not be expanded
func expandPath(p string) string {
if strings.HasPrefix(p, "~/") || strings.HasPrefix(p, "~\\") {
if home := homeDir(); home != "" {
p = home + p[1:]
}
}
return path.Clean(os.ExpandEnv(p))
}
func homeDir() string {
if home := os.Getenv("HOME"); home != "" {
return home
}
if usr, err := user.Current(); err == nil {
return usr.HomeDir
}
return ""
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
forwarder/forwarder.go
|
package main
import (
"flag"
"os"
"time"
"github.com/golang/glog"
clversioned "github.com/mkimuram/k8s-ext-connector/pkg/client/clientset/versioned"
clv1alpha1 "github.com/mkimuram/k8s-ext-connector/pkg/client/clientset/versioned/typed/submariner/v1alpha1"
sbinformers "github.com/mkimuram/k8s-ext-connector/pkg/client/informers/externalversions"
"github.com/mkimuram/k8s-ext-connector/pkg/forwarder"
"github.com/mkimuram/k8s-ext-connector/pkg/util"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"
)
var (
namespace string
name string
fwd *util.Controller
)
func init() {
flag.Set("logtostderr", "true")
flag.Set("stderrthreshold", "INFO")
flag.Parse()
namespace = os.Getenv("FORWARDER_NAMESPACE")
name = os.Getenv("FORWARDER_NAME")
if namespace == "" || name == "" {
glog.Fatalf("FORWARDER_NAMESPACE and FORWARDER_NAME need to be defined as environment variables")
}
// create in-cluster config
config, err := rest.InClusterConfig()
if err != nil {
glog.Fatalf("Failed to build config: %v", err)
}
// create clientset
cl, err := clv1alpha1.NewForConfig(config)
if err != nil {
glog.Fatalf("Failed to create client: %v", err)
}
// create versioned clientset
vcl, err := clversioned.NewForConfig(config)
if err != nil {
glog.Fatalf("Failed to create versioned client: %v", err)
}
informerFactory := sbinformers.NewSharedInformerFactory(vcl, time.Second*30)
informer := informerFactory.Submariner().V1alpha1().Forwarders().Informer()
reconciler := forwarder.NewReconciler(cl, namespace, name)
fwd = util.NewController(cl, informerFactory, informer, reconciler)
}
func main() {
fwd.Run()
}
|
[
"\"FORWARDER_NAMESPACE\"",
"\"FORWARDER_NAME\""
] |
[] |
[
"FORWARDER_NAMESPACE",
"FORWARDER_NAME"
] |
[]
|
["FORWARDER_NAMESPACE", "FORWARDER_NAME"]
|
go
| 2 | 0 | |
carwings.go
|
package carwings
import (
"bytes"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"math"
"net/http"
"net/http/httputil"
"net/url"
"os"
"strconv"
"strings"
"time"
//lint:ignore SA1019 Blowfish is terrible, but that's what the Nissan API uses
"golang.org/x/crypto/blowfish"
)
const (
initialAppStrings = "9s5rfKVuMrT03RtzajWNcA"
)
var (
// ErrNotLoggedIn is returned whenever an operation is run and
// the user has not let logged in.
ErrNotLoggedIn = errors.New("not logged in")
// ErrUpdateFailed indicates an error talking to the Carwings
// service when fetching updated vehicle data.
ErrUpdateFailed = errors.New("failed to retrieve updated info from vehicle")
// ErrBatteryStatusUnavailable is returned from the
// BatteryStatus method when no data is available.
ErrBatteryStatusUnavailable = errors.New("battery status unavailable")
// ErrVehicleInfoUnavailable is returned when vehicle information is
// not available when logging in.
ErrVehicleInfoUnavailable = errors.New("vehicle info unavailable")
// Debug indiciates whether to log HTTP responses to stderr
Debug = false
// Default URL for connecting to Carwings service. This is
// changed by Nissan from time to time, so it's helpful to
// have it be configurable.
BaseURL = "https://gdcportalgw.its-mo.com/api_v200413_NE/gdc/"
// Http client used for api requests
Client = http.DefaultClient
)
func pkcs5Padding(data []byte, blocksize int) []byte {
padLen := blocksize - (len(data) % blocksize)
padding := bytes.Repeat([]byte{byte(padLen)}, padLen)
return append(data, padding...)
}
// Pads the source, does ECB Blowfish encryption on it, and returns a
// base64-encoded string.
func encrypt(s, key string) (string, error) {
cipher, err := blowfish.NewCipher([]byte(key))
if err != nil {
return "", err
}
src := []byte(s)
src = pkcs5Padding(src, cipher.BlockSize())
dst := make([]byte, len(src))
pos := 0
for pos < len(src) {
cipher.Encrypt(dst[pos:], src[pos:])
pos += cipher.BlockSize()
}
return base64.StdEncoding.EncodeToString(dst), nil
}
// MetersToMiles converts Carwings distances (in meters) to miles.
func MetersToMiles(meters int) int {
const MilesPerMeter = 0.000621371
return int(float64(meters) * MilesPerMeter)
}
const (
RegionUSA = "NNA"
RegionEurope = "NE"
RegionCanada = "NCI"
RegionAustralia = "NMA"
RegionJapan = "NML"
)
// Session defines a one or more connections to the Carwings service
type Session struct {
// Region is one of the predefined region codes where this car operates.
Region string
// Filename is an optional file to load and save an existing session to.
Filename string
username string
encpw string
VIN string
customSessionID string
tz string
loc *time.Location
cabinTemp int
}
// ClimateStatus contains information about the vehicle's climate
// control (AC or heater) status.
type ClimateStatus struct {
// Date and time this status was retrieved from the vehicle.
LastOperationTime time.Time
// The current climate control operation status.
Running bool
// Current plugged-in state
PluginState PluginState
// The amount of time the climate control system will run
// while on battery power, in seconds.
BatteryDuration int
// The amount of time the climate control system will run
// while plugged in, in seconds.
PluggedDuration int
// The climate preset temperature unit, F or C
TemperatureUnit string
// The climate preset temperature value
Temperature int
// Time the AC was stopped, or is scheduled to stop
ACStopTime time.Time
// Estimated cruising range with climate control on, in
// meters.
CruisingRangeACOn int
// Estimated cruising range with climate control off, in
// meters.
CruisingRangeACOff int
}
// BatteryStatus contains information about the vehicle's state of
// charge, current plugged-in state, charging status, and the time to
// charge the battery to full.
type BatteryStatus struct {
// Date and time this battery status was retrieved from the
// vehicle.
Timestamp time.Time
// Total capacity of the battery. Units unknown.
Capacity int
// Remaining battery level. Units unknown, but same as Capacity.
Remaining int
// Remaining battery level in Watt Hours.
RemainingWH int
// Current state of charge. In percent, should be roughly
// equivalent to Remaining / Capacity * 100.
StateOfCharge int // percent
// Estimated cruising range with climate control on, in
// meters.
CruisingRangeACOn int
// Estimated cruising range with climate control off, in
// meters.
CruisingRangeACOff int
// Current plugged-in state
PluginState PluginState
// Current charging status
ChargingStatus ChargingStatus
// Amount of time remaining until battery is fully charged,
// using different possible charging methods.
TimeToFull TimeToFull
}
// TimeToFull contains information about how long it will take to
// charge the battery to full via different charging methods.
type TimeToFull struct {
// Time to fully charge the battery using a 1.4 kW Level 1
// (120V 12A) trickle charge.
Level1 time.Duration
// Time to fully charge the battery using a 3.3 kW Level 2
// (240V ~15A) charge.
Level2 time.Duration
// Time to fully charge the battery using a 6.6 kW Level 2
// (240V ~30A) charge.
Level2At6kW time.Duration
}
// VehicleLocation indicates the vehicle's current location.
type VehicleLocation struct {
// Timestamp of the last time vehicle location was updated.
Timestamp time.Time
// Latitude of the vehicle
Latitude string
// Longitude of the vehicle
Longitude string
}
// PluginState indicates whether and how the vehicle is plugged in.
// It is separate from ChargingStatus, because the vehicle can be
// plugged in but not actively charging.
type PluginState string
const (
// Not connected to a charger
NotConnected = PluginState("NOT_CONNECTED")
// Connected to a normal J1772 Level 1 or 2 charger
Connected = PluginState("CONNECTED")
// Connected to a high voltage DC quick charger (ChaDeMo)
QCConnected = PluginState("QC_CONNECTED")
// Invalid state, when updating data from the vehicle fails.
InvalidPluginState = PluginState("INVALID")
)
func (ps PluginState) String() string {
switch ps {
case NotConnected:
return "not connected"
case Connected:
return "connected"
case QCConnected:
return "connected to quick charger"
case InvalidPluginState:
return "invalid"
default:
return string(ps)
}
}
// ChargingStatus indicates whether and how the vehicle is charging.
type ChargingStatus string
const (
// Not charging
NotCharging = ChargingStatus("NOT_CHARGING")
// Normal charging from a Level 1 or 2 EVSE
NormalCharging = ChargingStatus("NORMAL_CHARGING")
// Rapidly charging from a ChaDeMo DC quick charger
RapidlyCharging = ChargingStatus("RAPIDLY_CHARGING")
// Invalid state, when updating data from the vehicle fails.
InvalidChargingStatus = ChargingStatus("INVALID")
)
func (cs ChargingStatus) String() string {
switch cs {
case NotCharging:
return "not charging"
case NormalCharging:
return "charging"
case RapidlyCharging:
return "rapidly charging"
case InvalidChargingStatus:
return "invalid"
default:
return string(cs)
}
}
// OperationResult
const (
start = "START"
electricWaveAbnormal = "ELECTRIC_WAVE_ABNORMAL"
)
type cwTime time.Time
func (cwt *cwTime) UnmarshalJSON(data []byte) error {
if data == nil || string(data) == `""` {
return nil
}
// Carwings uses at least five different date formats! 🙄🙄🙄
t, err := time.Parse(`"2006\/01\/02 15:04"`, string(data))
if err == nil {
*cwt = cwTime(t)
return nil
}
t, err = time.Parse(`"2006-01-02 15:04:05"`, string(data))
if err == nil {
*cwt = cwTime(t)
return nil
}
// Also e.g. "UserVehicleBoundTime": "2018-08-04T15:08:33Z"
t, err = time.Parse(`"2006-01-02T15:04:05Z"`, string(data))
if err == nil {
*cwt = cwTime(t)
return nil
}
// Also e.g. "GpsDatetime": "2018-08-05T10:18:47" in monthly statistics response
t, err = time.Parse(`"2006-01-02T15:04:05"`, string(data))
if err == nil {
*cwt = cwTime(t)
return nil
}
// Also e.g. "LastScheduledTime": "2018-08-04T15:08:33Z" in ClimateControlSchedule response
t, err = time.Parse(`"Jan _2, 2006 03:04 PM"`, string(data))
if err == nil {
*cwt = cwTime(t)
return nil
}
return fmt.Errorf("cannot parse %q as carwings time", string(data))
}
// FixLocation alters the location associated with the time, without changing
// the value. This is needed since all times are parsed as if they were UTC
// when in fact some of them are in the timezone specified in the session.
func (cwt cwTime) FixLocation(location *time.Location) cwTime {
t := time.Time(cwt)
return cwTime(time.Date(
t.Year(),
t.Month(),
t.Day(),
t.Hour(),
t.Minute(),
t.Second(),
t.Nanosecond(),
location,
))
}
type response interface {
Status() int
ErrorMessage() string
}
type baseResponse struct {
StatusCode json.RawMessage `json:"status"`
Message string `json:"message"`
}
func (r *baseResponse) Status() int {
s := r.StatusCode
if s[0] == '"' {
s = s[1 : len(s)-1]
}
v, _ := strconv.Atoi(string(s))
return v
}
func (r *baseResponse) ErrorMessage() string {
return r.Message
}
func apiRequest(endpoint string, params url.Values, target response) error {
req, err := http.NewRequest("POST", BaseURL+endpoint, strings.NewReader(params.Encode()))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
req.Header.Set("User-Agent", "")
if Debug {
body, err := httputil.DumpRequestOut(req, true)
if err != nil {
panic(err)
}
fmt.Fprintln(os.Stderr, string(body))
fmt.Fprintln(os.Stderr)
}
resp, err := Client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if Debug {
body, err := httputil.DumpResponse(resp, true)
if err != nil {
panic(err)
}
fmt.Fprintln(os.Stderr, string(body))
fmt.Fprintln(os.Stderr)
}
dec := json.NewDecoder(resp.Body)
if err := dec.Decode(target); err != nil {
return err
}
switch s := target.Status(); s {
case http.StatusOK:
return nil
case http.StatusUnauthorized, http.StatusRequestTimeout:
return ErrNotLoggedIn
default:
if e := target.ErrorMessage(); e != "" {
return fmt.Errorf("received status code %d (%s)", s, e)
}
return fmt.Errorf("received status code %d", s)
}
}
// Connect establishes a new authenticated Session with the Carwings
// service.
func (s *Session) Connect(username, password string) error {
params := url.Values{}
params.Set("initial_app_str", initialAppStrings)
var initResp struct {
baseResponse
Baseprm string `json:"baseprm"`
}
if err := apiRequest("InitialApp_v2.php", params, &initResp); err != nil {
return err
}
encpw, err := encrypt(password, initResp.Baseprm)
if err != nil {
return err
}
s.username = username
s.encpw = encpw
if s.Filename != "" {
if err := s.load(); err == nil {
return nil
} else if Debug {
fmt.Fprintf(os.Stderr, "Error loading session from %s: %v\n", s.Filename, err)
}
}
return s.Login()
}
func (s *Session) Login() error {
params := url.Values{}
params.Set("initial_app_str", initialAppStrings)
params.Set("UserId", s.username)
params.Set("Password", s.encpw)
params.Set("RegionCode", s.Region)
// Not a comprehensive representation, just what we need
type vehicleInfo struct {
VIN string `json:"vin"`
CustomSessionID string `json:"custom_sessionid"`
}
var loginResp struct {
baseResponse
// OMG this API... one of these three will be populated.
VehicleInfos []vehicleInfo `json:"vehicleInfo"`
VehicleInfoList struct {
VehicleInfos []vehicleInfo `json:"vehicleInfo"`
} `json:"vehicleInfoList"`
VehicleInfo vehicleInfo `json:"VehicleInfo"`
CustomerInfo struct {
Timezone string
VehicleInfo vehicleInfo `json:"VehicleInfo"`
}
}
if err := apiRequest("UserLoginRequest.php", params, &loginResp); err != nil {
return err
}
var vi vehicleInfo
switch {
case len(loginResp.VehicleInfos) > 0:
vi = loginResp.VehicleInfos[0]
case len(loginResp.VehicleInfoList.VehicleInfos) > 0:
vi = loginResp.VehicleInfoList.VehicleInfos[0]
case len(loginResp.CustomerInfo.VehicleInfo.VIN) > 0:
vi = loginResp.CustomerInfo.VehicleInfo
default:
vi = loginResp.VehicleInfo
}
if vi.VIN == "" {
return ErrVehicleInfoUnavailable
}
s.customSessionID = vi.CustomSessionID
s.VIN = vi.VIN
s.tz = loginResp.CustomerInfo.Timezone
loc, err := time.LoadLocation(loginResp.CustomerInfo.Timezone)
if err != nil {
loc = time.UTC
}
s.loc = loc
if s.Filename != "" {
return s.save()
}
return nil
}
func (s *Session) load() error {
if s.Filename[0] == '~' {
s.Filename = os.Getenv("HOME") + s.Filename[1:]
}
f, err := os.Open(s.Filename)
if err != nil {
return err
}
defer f.Close()
m := map[string]string{}
if err := json.NewDecoder(f).Decode(&m); err != nil {
return err
}
s.VIN = m["vin"]
s.customSessionID = m["customSessionID"]
s.tz = m["tz"]
loc, err := time.LoadLocation(m["tz"])
if err != nil {
loc = time.UTC
}
s.loc = loc
return nil
}
func (s *Session) save() error {
if s.Filename[0] == '~' {
s.Filename = os.Getenv("HOME") + s.Filename[1:]
}
f, err := os.OpenFile(s.Filename, os.O_WRONLY|os.O_CREATE, 0600)
if err != nil {
return err
}
m := map[string]string{
"vin": s.VIN,
"customSessionID": s.customSessionID,
"tz": s.tz,
}
if err := json.NewEncoder(f).Encode(m); err != nil {
f.Close()
os.Remove(s.Filename)
return err
}
return f.Close()
}
func (s *Session) apiRequest(endpoint string, params url.Values, target response) error {
params = s.setCommonParams(params)
err := apiRequest(endpoint, params, target)
if err == ErrNotLoggedIn {
if err := s.Login(); err != nil {
return err
}
params = s.setCommonParams(params)
return apiRequest(endpoint, params, target)
}
return err
}
func (s *Session) setCommonParams(params url.Values) url.Values {
if params == nil {
params = url.Values{}
}
params.Set("RegionCode", s.Region)
params.Set("VIN", s.VIN)
params.Set("custom_sessionid", s.customSessionID)
params.Set("tz", s.tz)
return params
}
// UpdateStatus asks the Carwings service to request an update from
// the vehicle. This is an asynchronous operation: it returns a
// "result key" that must be used to poll for status with the
// CheckUpdate method.
func (s *Session) UpdateStatus() (string, error) {
var resp struct {
baseResponse
ResultKey string `json:"resultKey"`
}
if err := s.apiRequest("BatteryStatusCheckRequest.php", nil, &resp); err != nil {
return "", err
}
return resp.ResultKey, nil
}
// CheckUpdate returns whether the update corresponding to the
// provided result key has finished.
func (s *Session) CheckUpdate(resultKey string) (bool, error) {
params := url.Values{}
params.Set("resultKey", resultKey)
var resp struct {
baseResponse
ResponseFlag int `json:"responseFlag,string"`
OperationResult string `json:"operationResult"`
}
if err := s.apiRequest("BatteryStatusCheckResultRequest.php", params, &resp); err != nil {
return false, err
}
var err error
if resp.OperationResult == electricWaveAbnormal {
err = ErrUpdateFailed
}
return resp.ResponseFlag == 1, err
}
// BatteryStatus returns the most recent battery status from the
// Carwings service. Note that this data is not real-time: it is
// cached from the last time the vehicle data was updated. Use
// UpdateStatus method to update vehicle data.
func (s *Session) BatteryStatus() (BatteryStatus, error) {
type batteryStatusRecord struct {
BatteryStatus struct {
BatteryChargingStatus string
BatteryCapacity int `json:",string"`
BatteryRemainingAmount string
BatteryRemainingAmountWH string
BatteryRemainingAmountKWH string
SOC struct {
Value int `json:",string"`
}
}
PluginState string
CruisingRangeAcOn json.Number `json:",string"`
CruisingRangeAcOff json.Number `json:",string"`
TimeRequiredToFull struct {
HourRequiredToFull int `json:",string"`
MinutesRequiredToFull int `json:",string"`
}
TimeRequiredToFull200 struct {
HourRequiredToFull int `json:",string"`
MinutesRequiredToFull int `json:",string"`
}
TimeRequiredToFull200_6kW struct {
HourRequiredToFull int `json:",string"`
MinutesRequiredToFull int `json:",string"`
}
NotificationDateAndTime cwTime
}
var resp struct {
baseResponse
BatteryStatusRecords json.RawMessage
}
if err := s.apiRequest("BatteryStatusRecordsRequest.php", nil, &resp); err != nil {
return BatteryStatus{}, err
}
if string(resp.BatteryStatusRecords) == "[]" {
return BatteryStatus{}, ErrBatteryStatusUnavailable
}
var batrec batteryStatusRecord
if err := json.Unmarshal(resp.BatteryStatusRecords, &batrec); err != nil {
return BatteryStatus{}, err
}
remaining, _ := strconv.Atoi(batrec.BatteryStatus.BatteryRemainingAmount)
remainingWH, _ := strconv.Atoi(batrec.BatteryStatus.BatteryRemainingAmountWH)
acOn, _ := batrec.CruisingRangeAcOn.Float64()
acOff, _ := batrec.CruisingRangeAcOff.Float64()
soc := batrec.BatteryStatus.SOC.Value
batteryLevelCapacity := float64(batrec.BatteryStatus.BatteryCapacity)
if soc == 0 {
if batteryLevelCapacity >= 0.0 && batteryLevelCapacity <= 12.0 {
// Leaf using 12th bar system; present as 12ths; 5/12 etc.
// batteryLevelCapacity can be lower than 12 because of degradation
// we explicitly use 12 instead of batteryLevelCapacity
batteryLevelCapacity = 12.0
}
soc = int(math.Round(float64(remaining) / batteryLevelCapacity * 100))
}
bs := BatteryStatus{
Timestamp: time.Time(batrec.NotificationDateAndTime).In(s.loc),
Capacity: int(batteryLevelCapacity),
Remaining: remaining,
RemainingWH: remainingWH,
StateOfCharge: soc,
CruisingRangeACOn: int(acOn),
CruisingRangeACOff: int(acOff),
PluginState: PluginState(batrec.PluginState),
ChargingStatus: ChargingStatus(batrec.BatteryStatus.BatteryChargingStatus),
TimeToFull: TimeToFull{
Level1: time.Duration(batrec.TimeRequiredToFull.HourRequiredToFull)*time.Hour + time.Duration(batrec.TimeRequiredToFull.MinutesRequiredToFull)*time.Minute,
Level2: time.Duration(batrec.TimeRequiredToFull200.HourRequiredToFull)*time.Hour + time.Duration(batrec.TimeRequiredToFull200.MinutesRequiredToFull)*time.Minute,
Level2At6kW: time.Duration(batrec.TimeRequiredToFull200_6kW.HourRequiredToFull)*time.Hour + time.Duration(batrec.TimeRequiredToFull200_6kW.MinutesRequiredToFull)*time.Minute,
},
}
return bs, nil
}
// ClimateControlStatus returns the most recent climate control status
// from the Carwings service.
func (s *Session) ClimateControlStatus() (ClimateStatus, error) {
type remoteACRecords struct {
OperationResult string
OperationDateAndTime cwTime
RemoteACOperation string
ACStartStopDateAndTime cwTime
ACStartStopURL string
CruisingRangeAcOn json.Number `json:",string"`
CruisingRangeAcOff json.Number `json:",string"`
PluginState string
ACDurationBatterySec int `json:",string"`
ACDurationPluggedSec int `json:",string"`
PreAC_unit string
PreAC_temp int `json:",string"`
}
var resp struct {
baseResponse
RemoteACRecords json.RawMessage
}
if err := s.apiRequest("RemoteACRecordsRequest.php", nil, &resp); err != nil {
return ClimateStatus{}, err
}
// Sometimes the RemoteACRecords field is an empty array
// instead of a struct value. This API... ¯\_(ツ)_/¯
if string(resp.RemoteACRecords) == "[]" {
return ClimateStatus{}, errors.New("climate status not available")
}
var racr remoteACRecords
if err := json.Unmarshal(resp.RemoteACRecords, &racr); err != nil {
return ClimateStatus{}, err
}
acOn, _ := racr.CruisingRangeAcOn.Float64()
acOff, _ := racr.CruisingRangeAcOff.Float64()
running := racr.RemoteACOperation == "START"
acStopTime := time.Time(racr.ACStartStopDateAndTime).In(s.loc)
if running {
if NotConnected == PluginState(racr.PluginState) {
acStopTime = acStopTime.Add(time.Second * time.Duration(racr.ACDurationBatterySec))
} else {
acStopTime = acStopTime.Add(time.Second * time.Duration(racr.ACDurationPluggedSec))
}
}
cs := ClimateStatus{
LastOperationTime: time.Time(racr.OperationDateAndTime.FixLocation(s.loc)),
Running: running,
PluginState: PluginState(racr.PluginState),
BatteryDuration: racr.ACDurationBatterySec,
PluggedDuration: racr.ACDurationPluggedSec,
TemperatureUnit: racr.PreAC_unit,
Temperature: racr.PreAC_temp,
ACStopTime: acStopTime,
CruisingRangeACOn: int(acOn),
CruisingRangeACOff: int(acOff),
}
return cs, nil
}
// ClimateOffRequest sends a request to turn off the climate control
// system. This is an asynchronous operation: it returns a "result
// key" that can be used to poll for status with the
// CheckClimateOffRequest method.
func (s *Session) ClimateOffRequest() (string, error) {
var resp struct {
baseResponse
ResultKey string `json:"resultKey"`
}
if err := s.apiRequest("ACRemoteOffRequest.php", nil, &resp); err != nil {
return "", err
}
return resp.ResultKey, nil
}
// CheckClimateOffRequest returns whether the ClimateOffRequest has
// finished.
func (s *Session) CheckClimateOffRequest(resultKey string) (bool, error) {
var resp struct {
baseResponse
ResponseFlag int `json:"responseFlag,string"` // 0 or 1
OperationResult string `json:"operationResult"`
TimeStamp cwTime `json:"timeStamp"`
HVACStatus string `json:"hvacStatus"`
}
params := url.Values{}
params.Set("resultKey", resultKey)
if err := s.apiRequest("ACRemoteOffResult.php", params, &resp); err != nil {
return false, err
}
return resp.ResponseFlag == 1, nil
}
// ClimateOnRequest sends a request to turn on the climate control
// system. This is an asynchronous operation: it returns a "result
// key" that can be used to poll for status with the
// CheckClimateOnRequest method.
func (s *Session) ClimateOnRequest() (string, error) {
var resp struct {
baseResponse
ResultKey string `json:"resultKey"`
}
if err := s.apiRequest("ACRemoteRequest.php", nil, &resp); err != nil {
return "", err
}
return resp.ResultKey, nil
}
// CheckClimateOnRequest returns whether the ClimateOnRequest has
// finished.
func (s *Session) CheckClimateOnRequest(resultKey string) (bool, error) {
var resp struct {
baseResponse
ResponseFlag int `json:"responseFlag,string"` // 0 or 1
OperationResult string `json:"operationResult"`
ACContinueTime string `json:"acContinueTime"`
TimeStamp cwTime `json:"timeStamp"`
HVACStatus string `json:"hvacStatus"`
}
params := url.Values{}
params.Set("resultKey", resultKey)
if err := s.apiRequest("ACRemoteResult.php", params, &resp); err != nil {
return false, err
}
return resp.ResponseFlag == 1, nil
}
// ChargingRequest begins charging a plugged-in vehicle.
func (s *Session) ChargingRequest() error {
var resp struct {
baseResponse
}
params := url.Values{}
params.Set("ExecuteTime", time.Now().In(s.loc).Format("2006-01-02"))
if err := s.apiRequest("BatteryRemoteChargingRequest.php", params, &resp); err != nil {
return err
}
return nil
}
// CabinTempRequest sends a request to get the cabin temperature. This is an
// asynchronous operation: it returns a "result key" that can be used
// to poll for status with the CheckCabinTempRequest method.
func (s *Session) CabinTempRequest() (string, error) {
var resp struct {
baseResponse
ResultKey string `json:"resultKey"`
}
if err := s.apiRequest("GetInteriorTemperatureRequestForNsp.php", nil, &resp); err != nil {
return "", err
}
return resp.ResultKey, nil
}
// CheckCabinTempRequest returns whether the CabinTempRequest has finished.
func (s *Session) CheckCabinTempRequest(resultKey string) (bool, error) {
var resp struct {
baseResponse
ResponseFlag int `json:"responseFlag,string"` // 0 or 1
Temperature int `json:"Inc_temp"`
}
params := url.Values{}
params.Set("resultKey", resultKey)
if err := s.apiRequest("GetInteriorTemperatureResultForNsp.php", params, &resp); err != nil {
return false, err
}
s.cabinTemp = resp.Temperature
return resp.ResponseFlag == 1, nil
}
// GetCabinTemp returns the latest cached cabin temperature result.
func (s *Session) GetCabinTemp() int {
return s.cabinTemp
}
// TripDetail holds the details of each trip. All of the parsed detail is
// used in both the response and the MonthlyStatistics.
type TripDetail struct {
// "PriceSimulatorDetailInfoTrip": [
// {
// "TripId": "1",
// "PowerConsumptTotal": "2461.12",
// "PowerConsumptMoter": "3812.22",
// "PowerConsumptMinus": "1351.1",
// "TravelDistance": "17841",
// "ElectricMileage": "13.8",
// "CO2Reduction": "3",
// "MapDisplayFlg": "NONACTIVE",
// "GpsDatetime": "2018-08-05T10:18:47"
// },
TripId int `json:",string"`
PowerConsumedTotal float64 `json:"PowerConsumptTotal,string"`
PowerConsumedMotor float64 `json:"PowerConsumptMoter,string"`
PowerRegenerated float64 `json:"PowerConsumptMinus,string"`
Meters int `json:"TravelDistance,string"`
Efficiency float64 `json:"ElectricMileage,string"`
CO2Reduction int `json:",string"`
MapDisplayFlag string `json:"MapDisplayFlg"`
GPSDateTime cwTime `json:"GpsDatetime"`
Started time.Time `json:",omitempty"`
}
// DateDetail is the detail for a single date
type DateDetail struct {
TargetDate string
Trips []TripDetail
}
// MonthlyTotals holds the various totals of things for the whole month
type MonthlyTotals struct {
Trips int `json:"TotalNumberOfTrips,string"`
PowerConsumed float64 `json:"TotalPowerConsumptTotal,string"`
PowerConsumedMotor float64 `json:"TotalPowerConsumptMoter,string"`
PowerRegenerated float64 `json:"TotalPowerConsumptMinus,string"`
MetersTravelled int `json:"TotalTravelDistance,string"`
Efficiency float64 `json:"TotalElectricMileage,string"`
CO2Reduction int `json:"TotalCO2Reductiont,string"`
}
// MonthlyStatistics is the structure returned which includes
// all of the trips and all of the totals as well as the electricity rate
// informtion that has been supplied to CarWings.
type MonthlyStatistics struct {
EfficiencyScale string
ElectricityRate float64
ElectricityBill float64
Dates []DateDetail
Total MonthlyTotals
}
// GetMonthlyStatistics gets the statistics for a particular month
func (s *Session) GetMonthlyStatistics(month time.Time) (MonthlyStatistics, error) {
// {
// "status": 200,
// "PriceSimulatorDetailInfoResponsePersonalData": {
// "TargetMonth": "201808",
// "TotalPowerConsumptTotal": "55.88882",
// "TotalPowerConsumptMoter": "71.44184",
// "TotalPowerConsumptMinus": "15.55302",
// "ElectricPrice": "0.15",
// "ElectricBill": "8.3833230",
// "ElectricCostScale": "kWh/100km",
// "MainRateFlg": "COUNTRY",
// "ExistFlg": "EXIST",
// "PriceSimulatorDetailInfoDateList": {
// "PriceSimulatorDetailInfoDate": [
// {
// "TargetDate": "2018-08-05",
// "PriceSimulatorDetailInfoTripList": {
// "PriceSimulatorDetailInfoTrip": [
// {
// "TripId": "1",
// "PowerConsumptTotal": "2461.12",
// "PowerConsumptMoter": "3812.22",
// "PowerConsumptMinus": "1351.1",
// "TravelDistance": "17841",
// "ElectricMileage": "13.8",
// "CO2Reduction": "3",
// "MapDisplayFlg": "NONACTIVE",
// "GpsDatetime": "2018-08-05T10:18:47"
// },
// { ... repeats for each trip ... }
// ]
// },
// "DisplayDate": "Aug 05"
// },
// { ... repeats for each day ... }
// ]
// },
// "PriceSimulatorTotalInfo": {
// "TotalNumberOfTrips": "23",
// "TotalPowerConsumptTotal": "55.88882",
// "TotalPowerConsumptMoter": "71.44184",
// "TotalPowerConsumptMinus": "15.55302",
// "TotalTravelDistance": "416252",
// "TotalElectricMileage": "0.0134",
// "TotalCO2Reductiont": "72"
// },
// "DisplayMonth": "Aug/2018"
// }
// }
type detailInfoDate struct {
// "PriceSimulatorDetailInfoDateList": {
// "PriceSimulatorDetailInfoDate": [
// {
// "TargetDate": "2018-08-05",
// "PriceSimulatorDetailInfoTripList": {
// "PriceSimulatorDetailInfoTrip": [
TargetDate string
// DisplayDate string // ignored
Trips struct {
List []TripDetail `json:"PriceSimulatorDetailInfoTrip"`
} `json:"PriceSimulatorDetailInfoTripList"`
}
var resp struct {
baseResponse
Data struct {
TargetMonth string
// The following three fields are ignored because they also appear in the totals
// - TotalPowerConsumptTotal
// - TotalPowerConsumptMoter
// - TotalPowerConsumptMinus
ElectricPrice float64 `json:",string"`
ElectricBill float64 `json:",string"`
ElectricCostScale string
// The following two fields are ignored because their meaning is unclear
// - MainRateFlg
// - ExistFlg
Detail struct {
RawList json.RawMessage `json:"PriceSimulatorDetailInfoDate"`
List []detailInfoDate `json:"-"`
} `json:"PriceSimulatorDetailInfoDateList"`
Total MonthlyTotals `json:"PriceSimulatorTotalInfo"`
} `json:"PriceSimulatorDetailInfoResponsePersonalData"`
// DisplayMonth string // ignored
}
ms := MonthlyStatistics{}
params := url.Values{}
params.Set("TargetMonth", month.In(s.loc).Format("200601"))
if err := s.apiRequest("PriceSimulatorDetailInfoRequest.php", params, &resp); err != nil {
return ms, err
}
// This field is an empty string instead of an object if there's no data.
if string(resp.Data.Detail.RawList) != `""` {
err := json.Unmarshal(resp.Data.Detail.RawList, &resp.Data.Detail.List)
if err != nil {
return ms, err
}
}
ms.EfficiencyScale = resp.Data.ElectricCostScale
ms.ElectricityRate = resp.Data.ElectricPrice
ms.ElectricityBill = resp.Data.ElectricBill
ms.Total = resp.Data.Total
ms.Dates = make([]DateDetail, 0, 31)
for i := 0; i < len(resp.Data.Detail.List); i++ {
trips := make([]TripDetail, 0, 10)
for j := 0; j < len(resp.Data.Detail.List[i].Trips.List); j++ {
trip := resp.Data.Detail.List[i].Trips.List[j]
trip.Started = time.Time(trip.GPSDateTime)
trips = append(trips, trip)
}
ms.Dates = append(ms.Dates, DateDetail{
TargetDate: resp.Data.Detail.List[i].TargetDate,
Trips: trips,
})
}
return ms, nil
}
// DailyStatistics holds the statistics for a day
type DailyStatistics struct {
TargetDate time.Time
EfficiencyScale string
Efficiency float64 `json:",string"`
EfficiencyLevel int `json:",string"`
PowerConsumedMotor float64 `json:",string"`
PowerConsumedMotorLevel int `json:",string"`
PowerRegeneration float64 `json:",string"`
PowerRegenerationLevel int `json:",string"`
PowerConsumedAUX float64 `json:",string"`
PowerConsumedAUXLevel int `json:",string"`
}
// GetDailyStatistics returns the statistics for a specified Date^W^W^Wtoday
func (s *Session) GetDailyStatistics(day time.Time) (DailyStatistics, error) {
// {
// "status": 200,
// "DriveAnalysisBasicScreenResponsePersonalData": {
// "DateSummary": {
// "TargetDate": "2018-08-12",
// "ElectricMileage": "11.9",
// "ElectricMileageLevel": "5",
// "PowerConsumptMoter": "140.5",
// "PowerConsumptMoterLevel": "5",
// "PowerConsumptMinus": "29.3",
// "PowerConsumptMinusLevel": "2",
// "PowerConsumptAUX": "7.4",
// "PowerConsumptAUXLevel": "5",
// "DisplayDate": "Aug 12, 18"
// },
// "ElectricCostScale": "kWh/100km"
// },
// "AdviceList": {
// "Advice": {
// "title": "Drive Tip:",
// "body": "Use remote climate control or timer so that the cabin will be at a comfortable temperature before starting. This allows the car to save energy whilst being driven."
// }
// }
// }
var resp struct {
baseResponse
Data struct {
Stats struct {
TargetDate string
ElectricMileage float64 `json:",string"`
ElectricMileageLevel int `json:",string"`
PowerConsumptMoter float64 `json:",string"`
PowerConsumptMoterLevel int `json:",string"`
PowerConsumptMinus float64 `json:",string"`
PowerConsumptMinusLevel int `json:",string"`
PowerConsumptAUX float64 `json:",string"`
PowerConsumptAUXLevel int `json:",string"`
} `json:"DateSummary"`
ElectricCostScale string
} `json:"DriveAnalysisBasicScreenResponsePersonalData"`
}
ds := DailyStatistics{}
params := url.Values{}
// TODO: There's a bug getting stats for any day other than today: we have guessed the
// TODO: name of the `TargetDate` parameter wrong :-(
// TODO: It isn't `TargetDate` or `DetailTargetDate`
// On the other hand, we can get/calculate all of this (and more) from the daily records in the
// MonthlyStatistics response, so maybe it's silly to do it this way?
// params.Set("DetailTargetDate", day.In(s.loc).Format("2006-01-02"))
if err := s.apiRequest("DriveAnalysisBasicScreenRequestEx.php", params, &resp); err != nil {
return ds, err
}
if resp.Data.Stats.TargetDate == "" {
return ds, errors.New("daily driving statistics not available")
}
ds.TargetDate, _ = time.ParseInLocation("2006-01-02", resp.Data.Stats.TargetDate, s.loc)
ds.EfficiencyScale = resp.Data.ElectricCostScale
ds.Efficiency = resp.Data.Stats.ElectricMileage
ds.EfficiencyLevel = resp.Data.Stats.ElectricMileageLevel
ds.PowerConsumedMotor = resp.Data.Stats.PowerConsumptMoter
ds.PowerConsumedMotorLevel = resp.Data.Stats.PowerConsumptMoterLevel
ds.PowerRegeneration = resp.Data.Stats.PowerConsumptMinus
ds.PowerRegenerationLevel = resp.Data.Stats.PowerConsumptMinusLevel
ds.PowerConsumedAUX = resp.Data.Stats.PowerConsumptAUX
ds.PowerConsumedAUXLevel = resp.Data.Stats.PowerConsumptAUXLevel
return ds, nil
}
|
[
"\"HOME\"",
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
cli/commands/edit/edit.go
|
package edit
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path"
"reflect"
"strings"
corev2 "github.com/sensu/sensu-go/api/core/v2"
"github.com/sensu/sensu-go/cli"
"github.com/sensu/sensu-go/cli/client/config"
"github.com/sensu/sensu-go/cli/commands/create"
"github.com/sensu/sensu-go/cli/commands/dump"
"github.com/sensu/sensu-go/cli/commands/helpers"
"github.com/sensu/sensu-go/types"
"github.com/spf13/cobra"
)
// vi is the default editor!
const defaultEditor = "vi"
func extension(format string) string {
switch format {
case config.FormatJSON, config.FormatWrappedJSON:
return "json"
default:
return "yaml"
}
}
type lifter interface {
Lift() types.Resource
}
type namespaceFormat interface {
Namespace() string
Format() string
}
type client interface {
Get(string, interface{}) error
}
func dumpResource(client client, cfg namespaceFormat, typeName string, key []string, to io.Writer) error {
// Determine the requested resource type. We will use this resource only to
// determine it's path in the store
requested, err := dump.ResolveResource(typeName)
if err != nil {
return fmt.Errorf("invalid resource type: %s", typeName)
}
switch r := requested.(type) {
case *corev2.Event:
// Need an exception for event, because it's a special little type
if len(key) != 2 {
return errors.New("events need an entity and check component")
}
r.Entity = &corev2.Entity{
ObjectMeta: corev2.ObjectMeta{
Namespace: cfg.Namespace(),
Name: key[0],
},
}
r.Check = &corev2.Check{
ObjectMeta: corev2.ObjectMeta{
Namespace: cfg.Namespace(),
Name: key[1],
},
}
case *corev2.Check:
// Special case here takes care of the check naming boondoggle
requested = &corev2.CheckConfig{}
if len(key) != 1 {
return errors.New("resource name missing")
}
requested.SetObjectMeta(corev2.ObjectMeta{
Namespace: cfg.Namespace(),
Name: key[0],
})
default:
if len(key) != 1 {
return errors.New("resource name missing")
}
requested.SetObjectMeta(corev2.ObjectMeta{
Namespace: cfg.Namespace(),
Name: key[0],
})
}
if lifter, ok := requested.(lifter); ok {
requested = lifter.Lift()
}
// Determine the expected type for the store response between a
// corev2.Resource & a types.Wrapper. We will assume that all resources
// outside core/v2 are stored as wrapped value
var response interface{}
if types.ApiVersion(reflect.Indirect(reflect.ValueOf(requested)).Type().PkgPath()) == path.Join(corev2.APIGroupName, corev2.APIVersion) {
response, _ = dump.ResolveResource(typeName)
} else {
response = &types.Wrapper{}
}
if err := client.Get(requested.URIPath(), &response); err != nil {
return err
}
// Retrieve the concrete resource value from the response
var resource corev2.Resource
switch r := response.(type) {
case corev2.Resource:
resource = r
case *types.Wrapper:
resource = r.Value
default:
return fmt.Errorf("unexpected response type %T. Make sure the resource type is valid", response)
}
format := cfg.Format()
switch format {
case "wrapped-json", "json":
return helpers.PrintWrappedJSON(resource, to)
default:
return helpers.PrintYAML([]types.Resource{resource}, to)
}
}
func dumpBlank(cfg namespaceFormat, typeName string, to io.Writer) error {
resource, err := dump.ResolveResource(typeName)
if err != nil {
return fmt.Errorf("invalid resource type: %s", typeName)
}
switch r := resource.(type) {
case *corev2.Event:
r.Entity = &corev2.Entity{
ObjectMeta: corev2.ObjectMeta{
Namespace: cfg.Namespace(),
},
}
r.Check = &corev2.Check{
ObjectMeta: corev2.ObjectMeta{
Namespace: cfg.Namespace(),
},
}
case *corev2.Check:
// Special case here takes care of the check naming boondoggle
resource = &corev2.CheckConfig{}
resource.SetObjectMeta(corev2.ObjectMeta{
Namespace: cfg.Namespace(),
})
default:
resource.SetObjectMeta(corev2.ObjectMeta{
Namespace: cfg.Namespace(),
})
}
if lifter, ok := resource.(lifter); ok {
resource = lifter.Lift()
}
format := cfg.Format()
switch format {
case "wrapped-json", "json":
return helpers.PrintWrappedJSON(resource, to)
default:
return helpers.PrintYAML([]types.Resource{resource}, to)
}
}
func Command(cli *cli.SensuCli) *cobra.Command {
cmd := &cobra.Command{
Use: "edit [RESOURCE TYPE] [KEY]...",
Short: "Edit resources interactively",
RunE: func(cmd *cobra.Command, args []string) error {
blank, err := cmd.Flags().GetBool("blank")
if err != nil {
return err
}
if len(args) < 2 && !blank {
_ = cmd.Help()
return errors.New("invalid argument(s) received")
} else if len(args) < 1 && blank {
_ = cmd.Help()
return errors.New("invalid argument(s) received")
}
tf, err := ioutil.TempFile("", fmt.Sprintf("sensu-resource.*.%s", extension(cli.Config.Format())))
if err != nil {
return err
}
defer os.Remove(tf.Name())
orig := new(bytes.Buffer)
writer := io.MultiWriter(orig, tf)
if blank {
if err := dumpBlank(cli.Config, args[0], writer); err != nil {
return err
}
} else {
if err := dumpResource(cli.Client, cli.Config, args[0], args[1:], writer); err != nil {
return err
}
}
if err := tf.Close(); err != nil {
return err
}
editorEnv := os.Getenv("EDITOR")
if strings.TrimSpace(editorEnv) == "" {
editorEnv = defaultEditor
}
editorArgs := parseCommand(editorEnv)
execCmd := exec.Command(editorArgs[0], append(editorArgs[1:], tf.Name())...)
execCmd.Stdin = os.Stdin
execCmd.Stdout = os.Stdout
execCmd.Stderr = os.Stderr
if err := execCmd.Run(); err != nil {
return err
}
changedBytes, err := ioutil.ReadFile(tf.Name())
if err != nil {
return err
}
if bytes.Equal(orig.Bytes(), changedBytes) {
return nil
}
resources, err := create.ParseResources(bytes.NewReader(changedBytes))
if err != nil {
return err
}
if len(resources) == 0 {
return errors.New("no resources were parsed")
}
if err := create.ValidateResources(resources, cli.Config.Namespace()); err != nil {
return err
}
if err := create.PutResources(cli.Client, resources); err != nil {
return err
}
fmt.Fprintf(cmd.OutOrStdout(), "Updated %s\n", resources[0].Value.URIPath())
return nil
},
}
helpers.AddFormatFlag(cmd.Flags())
_ = cmd.Flags().BoolP("blank", "b", false, "edit a blank resource, and create it on save")
return cmd
}
func parseCommand(cmd string) []string {
scanner := bufio.NewScanner(strings.NewReader(cmd))
scanner.Split(bufio.ScanWords)
var result []string
for scanner.Scan() {
result = append(result, scanner.Text())
}
if err := scanner.Err(); err != nil {
// unlikely
panic(err)
}
return result
}
|
[
"\"EDITOR\""
] |
[] |
[
"EDITOR"
] |
[]
|
["EDITOR"]
|
go
| 1 | 0 | |
backend/silent_voice_29079/wsgi.py
|
"""
WSGI config for silent_voice_29079 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'silent_voice_29079.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
docs/conf.py
|
# -*- coding: utf-8 -*-
#
# Project X-Ray documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 5 11:04:37 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import re
# Markdown support
import recommonmark
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
from markdown_code_symlinks import LinkParser, MarkdownSymlinksDomain
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# yapf: disable
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.imgmath',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx_markdown_tables'
]
# yapf: enable
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
source_parsers = {
'.md': 'markdown_code_symlinks.LinkParser',
}
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Project X-Ray'
copyright = u'2018, SymbiFlow Team'
author = u'SymbiFlow Team'
# Enable github links when not on readthedocs
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
html_context = {
"display_github": True, # Integrate GitHub
"github_user": "symbiflow", # Username
"github_repo": "prjxray", # Repo name
"github_version": "master", # Version
"conf_py_path": "/doc/",
}
else:
docs_dir = os.path.abspath(os.path.dirname(__file__))
print("Docs dir is:", docs_dir)
import subprocess
subprocess.call('git fetch origin --unshallow', cwd=docs_dir, shell=True)
subprocess.check_call('git fetch origin --tags', cwd=docs_dir, shell=True)
subprocess.check_call('make links', cwd=docs_dir, shell=True)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = re.sub('^v', '', os.popen('git describe ').read().strip())
# The short X.Y version.
version = release
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
# yapf: disable
exclude_patterns = [
'_build',
'architecture/copying.md',
'db_dev_process/minitests/index/**',
'db_dev_process/fuzzers/index/**'
]
# yapf: enable
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_materialdesign_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
# Specify a list of menu in Header.
# Tuples forms:
# ('Name', 'external url or path of pages in the document', boolean, 'icon name')
#
# Third argument:
# True indicates an external link.
# False indicates path of pages in the document.
#
# Fourth argument:
# Specify the icon name.
# For details see link.
# https://material.io/icons/
'header_links': [
('Home', 'index', False, 'home'),
("GitHub", "https://github.com/SymbiFlow/prjxray", True, 'link')
],
# Customize css colors.
# For details see link.
# https://getmdl.io/customize/index.html
#
# Values: amber, blue, brown, cyan deep_orange, deep_purple, green, grey, indigo, light_blue,
# light_green, lime, orange, pink, purple, red, teal, yellow(Default: indigo)
'primary_color':
'deep_purple',
# Values: Same as primary_color. (Default: pink)
'accent_color':
'purple',
# Customize layout.
# For details see link.
# https://getmdl.io/components/index.html#layout-section
'fixed_drawer':
True,
'fixed_header':
True,
'header_waterfall':
True,
'header_scroll':
False,
# Render title in header.
# Values: True, False (Default: False)
'show_header_title':
False,
# Render title in drawer.
# Values: True, False (Default: True)
'show_drawer_title':
True,
# Render footer.
# Values: True, False (Default: True)
'show_footer':
True
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'prjxray'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc, 'ProjectX-Ray.tex', u'Project X-Ray Documentation',
u'SymbiFlow Team', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'projectx-ray', u'Project X-Ray Documentation', [author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc, 'ProjectX-Ray', u'Project X-Ray Documentation', author,
'ProjectX-Ray', 'One line description of project.', 'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
def setup(app):
github_code_repo = 'https://github.com/SymbiFlow/prjxray/'
github_code_branch = 'blob/master/'
docs_root_dir = os.path.realpath(os.path.dirname(__file__))
code_root_dir = os.path.realpath(os.path.join(docs_root_dir, ".."))
MarkdownSymlinksDomain.init_domain(
github_code_repo, github_code_branch, docs_root_dir, code_root_dir)
MarkdownSymlinksDomain.find_links()
app.add_domain(MarkdownSymlinksDomain)
app.add_config_value(
'recommonmark_config', {
'github_code_repo': github_code_repo,
}, True)
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'quoteshitter.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
docker/utils/core_file_processor.py
|
#!/usr/bin/env python
import logging
import sys
import ConfigParser
import os
import socket
import time
import subprocess
from string import Template
import textwrap
import boto.ses
def format_time(epoch_time):
time_format = "%Y-%m-%dT%H:%M:%S"
return time.strftime(time_format, time.gmtime(epoch_time))
class CoreMailer(object):
def __init__(self, config):
self.config = config
self.hostname = self.config.get('Config', 'hostname')
self.out = sys.stdout
def find_core(self):
path = self.config.get('Config', 'cores')
core_filter = self.config.get('Config', 'core_filter')
cores = [os.path.join(path, core) for core in os.listdir(path) if core_filter in core]
if len(cores):
return max(cores, key=os.path.getctime)
def filter_logs(self, logs):
log_filter = self.config.get('Config', 'log_filter')
if not log_filter:
return logs
def strip_prefix(line):
first_space = line.index(' ')
following_colon = line.index(':', first_space)
return line[0:first_space] + line[following_colon:]
lines = logs.split("\n")
filtered = filter(lambda line: log_filter in line, lines)
stripped = map(strip_prefix, filtered)
return "\n".join(stripped)
def find_logs(self, epoch_time):
log = self.config.get('Config', 'log')
formatted_time = format_time(epoch_time)
logging.info('Searching %s for logs around %s', log, formatted_time)
command = ["egrep",
"-C1000",
("^%s" % formatted_time),
log]
try:
return self.filter_logs(subprocess.check_output(command))
except subprocess.CalledProcessError:
return 'Unable to retrieve logs around %s' % formatted_time
def get_trace(self, core):
binary = self.config.get('Config', 'bin')
logging.info('Processing core file %s with binary %s', core, binary)
# matschaffer: this is really awful
# But lldb just exits with no output and exit code -11 if I try to run
# this script as a container entry point
lldb_command = "lldb-3.6 -f %(binary)s -c %(core)s --batch " + \
"-o 'target create -c \"%(core)s\" \"%(binary)s\"' " + \
"-o 'script import time; time.sleep(1)' " + \
"-o 'thread backtrace all'"
command = ["script", "-c",
(lldb_command % {"core": core, "binary": binary})]
return subprocess.check_output(command, stderr=subprocess.STDOUT)
def send_alert(self, epoch_time, trace, logs):
template_vars = {
"hostname": self.hostname,
"binary": self.config.get('Config', 'bin'),
"formatted_time": format_time(epoch_time),
"trace": trace,
"logs": logs
}
sender = self.config.get('Config', 'from')
recipient = self.config.get('Config', 'to')
subject = 'stellar-core crash on %(hostname)s' % template_vars
template = textwrap.dedent("""
<p>${binary} on ${hostname} crashed at ${formatted_time} with the
following back traces:</p>
<pre><code>
${trace}
</code></pre>
<h2>Extracted logs</h2>
<pre><code>
${logs}
</code></pre>
""")
body = Template(template).substitute(template_vars)
logging.info("Sending core alert from %s to %s", sender, recipient)
self.send_email(sender, recipient, subject, body)
def send_email(self, sender, recipient, subject, body):
conn = boto.ses.connect_to_region(self.config.get('Config', 'region'))
# noinspection PyTypeChecker
conn.send_email(sender, subject, None, [recipient], html_body=body)
def output_trace(self, epoch_time, trace):
template_vars = {
"hostname": self.hostname,
"binary": self.config.get('Config', 'bin'),
"formatted_time": format_time(epoch_time),
"trace": trace
}
template = textwrap.dedent("""
${binary} on ${hostname} crashed at ${formatted_time} with the
following back traces:
${trace}
""")
body = Template(template).substitute(template_vars)
self.out.write(body)
def archive_core(self, core):
command_string = self.config.get('Config', 'archive_command')
if command_string:
core_path = os.path.join(self.hostname, os.path.basename(core))
command_string = command_string.format(core, core_path)
logging.info(subprocess.check_output(command_string.split(' ')))
else:
logging.warn("No archive command, just removing core file")
os.remove(core)
def run(self, single_core):
core = single_core or self.find_core()
mode = self.config.get('Config', 'mode')
if core:
logging.info('Found core file %s', core)
epoch_time = os.path.getctime(core)
trace = self.get_trace(core)
if mode == "aws":
logs = self.find_logs(epoch_time)
self.send_alert(epoch_time, trace, logs)
self.archive_core(core)
elif mode == "local":
self.output_trace(epoch_time, trace)
else:
logging.fatal("Unknown MODE setting: %s", mode)
sys.exit(1)
else:
logging.info('No core file found for processing')
if __name__ == "__main__":
if len(sys.argv) > 1:
single_core = sys.argv[1]
else:
single_core = None
config_file = "/etc/core_file_processor.ini"
logging.basicConfig(level=logging.INFO)
config_parser = ConfigParser.ConfigParser({
"region": "us-east-1",
"cores": "/cores",
"log": "/host/syslog",
"log_filter": os.environ.get('CORE_LOG_FILTER'),
"core_filter": "stellar-core",
"hostname": socket.gethostname(),
"from": "%(hostname)s <ops+%(hostname)[email protected]>",
"to": os.environ.get('CORE_ALERT_RECIPIENT'),
"bin": "/usr/local/bin/stellar-core",
"archive_command": os.environ.get('CORE_ARCHIVE_COMMAND'),
"mode": os.environ.get('MODE', 'aws')
})
config_parser.add_section("Config")
config_parser.read(config_file)
mailer = CoreMailer(config_parser)
mailer.run(single_core)
|
[] |
[] |
[
"CORE_ALERT_RECIPIENT",
"MODE",
"CORE_LOG_FILTER",
"CORE_ARCHIVE_COMMAND"
] |
[]
|
["CORE_ALERT_RECIPIENT", "MODE", "CORE_LOG_FILTER", "CORE_ARCHIVE_COMMAND"]
|
python
| 4 | 0 | |
EnvReplacer.java
|
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Files;
import java.nio.file.FileSystems;
import java.util.Map;
import java.util.List;
import java.util.ArrayList;
/**
* Static class containing utilities for replacing environment variables
* in files and lists of strings.
*/
public class EnvReplacer {
/**
* Replaces environment variables in a {@link List} of Strings.
* As a second step, it will unescape any backslashed characters
* in the string. If you have a substring like \${HOME}, this will
* be rendered to ${HOME}, while ${HOME} will be replaced with the content
* of the HOME environment variable.
*
* @param lines The List of Strings to replace env vars in
*/
public static List<String> replaceEnv(List<String> lines) {
Map<String, String> envVars = System.getenv();
List<String> outLines = new ArrayList<String>();
for (String line : lines) {
String tempLine = line;
for (String envVar : envVars.keySet()) {
tempLine = replaceVar(tempLine, envVar, envVars.get(envVar));
}
tempLine = unescape(tempLine);
outLines.add(tempLine);
}
return outLines;
}
private static String replaceVar(String s, String name, String value) {
String temp = s;
String findName = String.format("${%s}", name);
int index = temp.indexOf(findName);
while (index >= 0) {
if (index > 0) {
if (temp.charAt(index - 1) == '\\')
return temp;
}
temp = String.format("%s%s%s", temp.substring(0, index), value, temp.substring(index + findName.length()));
index = temp.indexOf(findName);
}
return temp;
}
private static String unescape(String s) {
StringBuilder sb = new StringBuilder();
boolean lastWasEscape = false;
for (int i = 0; i < s.length(); ++i) {
char c = s.charAt(i);
if (lastWasEscape) {
sb.append(c);
lastWasEscape = false;
} else {
if (c == '\\')
lastWasEscape = true;
else {
sb.append(c);
lastWasEscape = false;
}
}
}
return sb.toString();
}
/**
* Like {@link replaceEnv(List<String>)}, but reads input from a file and writes
* to a file.
*
* @param inFile Path pointing to a template file, has to exist
* @param outFile Path pointing to an output file; will be overwritten if exists.
*/
public static void replaceEnv(Path inFile, Path outFile) throws IOException {
List<String> lines = Files.readAllLines(inFile);
List<String> outLines = replaceEnv(lines);
Files.write(outFile, outLines);
}
private static void printUsage() {
System.out.println("Usage: java ReplaceEnv <infile> [<outfile>]");
}
private static Path resolveFile(String filePathString, boolean mustExist) throws IOException {
Path filePath = FileSystems.getDefault().getPath(filePathString);
if (mustExist && !filePath.toFile().exists())
throw new IOException("File does not exist: '" + filePathString + "'.");
return filePath;
}
/**
* You can call this class as an executable; it takes one or two parameters,
* inFile and outFile. If only inFile is present (the first parameter), the output
* is written to stdout, otherwise it is written to outFile.
*
* @param args args[0] is inFile, args[1] is (optionally) outFile.
*/
public static void main(String[] args) {
if (args.length == 0) {
printUsage();
System.exit(1);
}
try {
if (args.length == 1) {
// Single input, output to stdout
Path inPath = resolveFile(args[0], true);
List<String> lines = Files.readAllLines(inPath);
List<String> outLines = replaceEnv(lines);
for (String line : outLines) {
System.out.println(line);
}
} else if (args.length == 2) {
// Input and Output
Path inPath = resolveFile(args[0], true);
Path outPath = resolveFile(args[1], false);
replaceEnv(inPath, outPath);
}
System.exit(0);
} catch (Exception ex) {
System.err.println("An error occurred: " + ex.getMessage());
System.exit(1);
}
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
selfdrive/car/interfaces.py
|
import os
import time
from cereal import car
from common.kalman.simple_kalman import KF1D
from common.realtime import DT_CTRL
from selfdrive.car import gen_empty_fingerprint
from selfdrive.config import Conversions as CV
from selfdrive.controls.lib.events import Events
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.controls.lib.drive_helpers import V_CRUISE_MAX
GearShifter = car.CarState.GearShifter
EventName = car.CarEvent.EventName
MAX_CTRL_SPEED = (V_CRUISE_MAX + 4) * CV.KPH_TO_MS # 144 + 4 = 92 mph
# generic car and radar interfaces
class CarInterfaceBase():
def __init__(self, CP, CarController, CarState):
self.CP = CP
self.VM = VehicleModel(CP)
self.frame = 0
self.low_speed_alert = False
if CarState is not None:
self.CS = CarState(CP)
self.cp = self.CS.get_can_parser(CP)
self.cp_cam = self.CS.get_cam_can_parser(CP)
self.cp_body = self.CS.get_body_can_parser(CP)
self.CC = None
if CarController is not None:
self.CC = CarController(self.cp.dbc_name, CP, self.VM)
@staticmethod
def calc_accel_override(a_ego, a_target, v_ego, v_target):
return 1.
@staticmethod
def compute_gb(accel, speed):
raise NotImplementedError
@staticmethod
def get_params(candidate, fingerprint=gen_empty_fingerprint(), car_fw=None):
raise NotImplementedError
# returns a set of default params to avoid repetition in car specific params
@staticmethod
def get_std_params(candidate, fingerprint):
ret = car.CarParams.new_message()
ret.carFingerprint = candidate
ret.isPandaBlack = True # TODO: deprecate this field
# standard ALC params
ret.steerControlType = car.CarParams.SteerControlType.torque
ret.steerMaxBP = [0.]
ret.steerMaxV = [1.]
ret.minSteerSpeed = 0.
# stock ACC by default
ret.enableCruise = True
ret.minEnableSpeed = -1. # enable is done by stock ACC, so ignore this
ret.steerRatioRear = 0. # no rear steering, at least on the listed cars aboveA
ret.gasMaxBP = [0.]
ret.gasMaxV = [.5] # half max brake
ret.brakeMaxBP = [0.]
ret.brakeMaxV = [1.]
ret.openpilotLongitudinalControl = False
ret.startAccel = 0.0
ret.stoppingControl = False
ret.longitudinalTuning.deadzoneBP = [0.]
ret.longitudinalTuning.deadzoneV = [0.]
ret.longitudinalTuning.kpBP = [0.]
ret.longitudinalTuning.kpV = [1.]
ret.longitudinalTuning.kiBP = [0.]
ret.longitudinalTuning.kiV = [1.]
return ret
# returns a car.CarState, pass in car.CarControl
def update(self, c, can_strings):
raise NotImplementedError
# return sendcan, pass in a car.CarControl
def apply(self, c):
raise NotImplementedError
def create_common_events(self, cs_out, extra_gears=[], gas_resume_speed=-1, pcm_enable=True): # pylint: disable=dangerous-default-value
events = Events()
# if cs_out.doorOpen:
# events.add(EventName.doorOpen)
# if cs_out.seatbeltUnlatched:
# events.add(EventName.seatbeltNotLatched)
if cs_out.gearShifter != GearShifter.drive and cs_out.gearShifter not in extra_gears:
events.add(EventName.wrongGear)
# if cs_out.gearShifter == GearShifter.reverse:
# events.add(EventName.reverseGear)
if not cs_out.cruiseState.available:
events.add(EventName.wrongCarMode)
if cs_out.espDisabled:
events.add(EventName.espDisabled)
#if cs_out.gasPressed:
# events.add(EventName.gasPressed)
if cs_out.stockFcw:
events.add(EventName.stockFcw)
if cs_out.stockAeb:
events.add(EventName.stockAeb)
if cs_out.vEgo > MAX_CTRL_SPEED:
events.add(EventName.speedTooHigh)
if cs_out.cruiseState.nonAdaptive:
events.add(EventName.wrongCruiseMode)
if cs_out.steerError:
events.add(EventName.steerUnavailable)
elif cs_out.steerWarning:
events.add(EventName.steerTempUnavailable)
# Disable on rising edge of gas or brake. Also disable on brake when speed > 0.
# Optionally allow to press gas at zero speed to resume.
# e.g. Chrysler does not spam the resume button yet, so resuming with gas is handy. FIXME!
if (cs_out.gasPressed and (not self.CS.out.gasPressed) and cs_out.vEgo > gas_resume_speed) or \
(cs_out.brakePressed and (not self.CS.out.brakePressed or not cs_out.standstill)):
events.add(EventName.pedalPressed)
# we engage when pcm is active (rising edge)
if pcm_enable:
if cs_out.cruiseState.enabled and not self.CS.out.cruiseState.enabled:
events.add(EventName.pcmEnable)
elif not cs_out.cruiseState.enabled:
events.add(EventName.pcmDisable)
return events
class RadarInterfaceBase():
def __init__(self, CP):
self.pts = {}
self.delay = 0
self.radar_ts = CP.radarTimeStep
self.no_radar_sleep = 'NO_RADAR_SLEEP' in os.environ
def update(self, can_strings):
ret = car.RadarData.new_message()
if not self.no_radar_sleep:
time.sleep(self.radar_ts) # radard runs on RI updates
return ret
class CarStateBase:
def __init__(self, CP):
self.CP = CP
self.car_fingerprint = CP.carFingerprint
self.out = car.CarState.new_message()
self.cruise_buttons = 0
self.left_blinker_cnt = 0
self.right_blinker_cnt = 0
# Q = np.matrix([[10.0, 0.0], [0.0, 100.0]])
# R = 1e3
self.v_ego_kf = KF1D(x0=[[0.0], [0.0]],
A=[[1.0, DT_CTRL], [0.0, 1.0]],
C=[1.0, 0.0],
K=[[0.12287673], [0.29666309]])
def update_speed_kf(self, v_ego_raw):
if abs(v_ego_raw - self.v_ego_kf.x[0][0]) > 2.0: # Prevent large accelerations when car starts at non zero speed
self.v_ego_kf.x = [[v_ego_raw], [0.0]]
v_ego_x = self.v_ego_kf.update(v_ego_raw)
return float(v_ego_x[0]), float(v_ego_x[1])
def update_blinker(self, blinker_time: int, left_blinker_lamp: bool, right_blinker_lamp: bool):
self.left_blinker_cnt = blinker_time if left_blinker_lamp else max(self.left_blinker_cnt - 1, 0)
self.right_blinker_cnt = blinker_time if right_blinker_lamp else max(self.right_blinker_cnt - 1, 0)
return self.left_blinker_cnt > 0, self.right_blinker_cnt > 0
@staticmethod
def parse_gear_shifter(gear):
return {'P': GearShifter.park, 'R': GearShifter.reverse, 'N': GearShifter.neutral,
'E': GearShifter.eco, 'T': GearShifter.manumatic, 'D': GearShifter.drive,
'S': GearShifter.sport, 'L': GearShifter.low, 'B': GearShifter.brake}.get(gear, GearShifter.unknown)
@staticmethod
def get_cam_can_parser(CP):
return None
@staticmethod
def get_body_can_parser(CP):
return None
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
internal/libminiooni/libminiooni.go
|
// Package libminiooni implements the cmd/miniooni CLI. Miniooni is our
// experimental client used for research and QA testing.
//
// This CLI has CLI options that do not conflict with Measurement Kit
// v0.10.x CLI options. There are some options conflict with the legacy
// OONI Probe CLI options. Perfect backwards compatibility is not a
// design goal for miniooni. Rather, we aim to have as little conflict
// as possible such that we can run side by side QA checks.
//
// We extracted this package from cmd/miniooni to allow us to further
// integrate the miniooni CLI into other binaries (see for example the
// code at github.com/bassosimone/aladdin).
package libminiooni
import (
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/url"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/apex/log"
"github.com/ooni/probe-cli/v3/internal/engine"
"github.com/ooni/probe-cli/v3/internal/engine/humanizex"
"github.com/ooni/probe-cli/v3/internal/engine/model"
"github.com/ooni/probe-cli/v3/internal/engine/netx/selfcensor"
"github.com/ooni/probe-cli/v3/internal/version"
"github.com/pborman/getopt/v2"
)
// Options contains the options you can set from the CLI.
type Options struct {
Annotations []string
ExtraOptions []string
HomeDir string
Inputs []string
InputFilePaths []string
NoJSON bool
NoCollector bool
ProbeServicesURL string
Proxy string
Random bool
ReportFile string
SelfCensorSpec string
TorArgs []string
TorBinary string
Tunnel string
Verbose bool
Yes bool
}
const (
softwareName = "miniooni"
softwareVersion = version.Version
)
var (
globalOptions Options
startTime = time.Now()
)
func init() {
getopt.FlagLong(
&globalOptions.Annotations, "annotation", 'A', "Add annotaton", "KEY=VALUE",
)
getopt.FlagLong(
&globalOptions.ExtraOptions, "option", 'O',
"Pass an option to the experiment", "KEY=VALUE",
)
getopt.FlagLong(
&globalOptions.InputFilePaths, "input-file", 'f',
"Path to input file to supply test-dependent input. File must contain one input per line.", "PATH",
)
getopt.FlagLong(
&globalOptions.HomeDir, "home", 0,
"Force specific home directory", "PATH",
)
getopt.FlagLong(
&globalOptions.Inputs, "input", 'i',
"Add test-dependent input to the test input", "INPUT",
)
getopt.FlagLong(
&globalOptions.NoJSON, "no-json", 'N', "Disable writing to disk",
)
getopt.FlagLong(
&globalOptions.NoCollector, "no-collector", 'n', "Don't use a collector",
)
getopt.FlagLong(
&globalOptions.ProbeServicesURL, "probe-services", 0,
"Set the URL of the probe-services instance you want to use", "URL",
)
getopt.FlagLong(
&globalOptions.Proxy, "proxy", 0, "Set the proxy URL", "URL",
)
getopt.FlagLong(
&globalOptions.Random, "random", 0, "Randomize inputs",
)
getopt.FlagLong(
&globalOptions.ReportFile, "reportfile", 'o',
"Set the report file path", "PATH",
)
getopt.FlagLong(
&globalOptions.SelfCensorSpec, "self-censor-spec", 0,
"Enable and configure self censorship", "JSON",
)
getopt.FlagLong(
&globalOptions.TorArgs, "tor-args", 0,
"Extra args for tor binary (may be specified multiple times)",
)
getopt.FlagLong(
&globalOptions.TorBinary, "tor-binary", 0,
"Specify path to a specific tor binary",
)
getopt.FlagLong(
&globalOptions.Tunnel, "tunnel", 0,
"Name of the tunnel to use (one of `tor`, `psiphon`)",
)
getopt.FlagLong(
&globalOptions.Verbose, "verbose", 'v', "Increase verbosity",
)
getopt.FlagLong(
&globalOptions.Yes, "yes", 0, "I accept the risk of running OONI",
)
}
func fatalWithString(msg string) {
panic(msg)
}
func fatalIfFalse(cond bool, msg string) {
if !cond {
panic(msg)
}
}
// Main is the main function of miniooni. This function parses the command line
// options and uses a global state. Use MainWithConfiguration if you want to avoid
// using any global state and relying on command line options.
//
// This function will panic in case of a fatal error. It is up to you that
// integrate this function to either handle the panic of ignore it.
func Main() {
getopt.Parse()
fatalIfFalse(len(getopt.Args()) == 1, "Missing experiment name")
MainWithConfiguration(getopt.Arg(0), globalOptions)
}
func split(s string) (string, string, error) {
v := strings.SplitN(s, "=", 2)
if len(v) != 2 {
return "", "", errors.New("invalid key-value pair")
}
return v[0], v[1], nil
}
func fatalOnError(err error, msg string) {
if err != nil {
log.WithError(err).Warn(msg)
panic(msg)
}
}
func warnOnError(err error, msg string) {
if err != nil {
log.WithError(err).Warn(msg)
}
}
func mustMakeMap(input []string) (output map[string]string) {
output = make(map[string]string)
for _, opt := range input {
key, value, err := split(opt)
fatalOnError(err, "cannot split key-value pair")
output[key] = value
}
return
}
func mustParseURL(URL string) *url.URL {
rv, err := url.Parse(URL)
fatalOnError(err, "cannot parse URL")
return rv
}
type logHandler struct {
io.Writer
}
func (h *logHandler) HandleLog(e *log.Entry) (err error) {
s := fmt.Sprintf("[%14.6f] <%s> %s", time.Since(startTime).Seconds(), e.Level, e.Message)
if len(e.Fields) > 0 {
s += fmt.Sprintf(": %+v", e.Fields)
}
s += "\n"
_, err = h.Writer.Write([]byte(s))
return
}
// See https://gist.github.com/miguelmota/f30a04a6d64bd52d7ab59ea8d95e54da
func gethomedir(optionsHome string) string {
if optionsHome != "" {
return optionsHome
}
if runtime.GOOS == "windows" {
home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
if home == "" {
home = os.Getenv("USERPROFILE")
}
return home
}
if runtime.GOOS == "linux" {
home := os.Getenv("XDG_CONFIG_HOME")
if home != "" {
return home
}
// fallthrough
}
return os.Getenv("HOME")
}
const riskOfRunningOONI = `
Do you consent to OONI Probe data collection?
OONI Probe collects evidence of internet censorship and measures
network performance:
- OONI Probe will likely test objectionable sites and services;
- Anyone monitoring your internet activity (such as a government
or Internet provider) may be able to tell that you are using OONI Probe;
- The network data you collect will be published automatically
unless you use miniooni's -n command line flag.
To learn more, see https://ooni.org/about/risks/.
If you're onboard, re-run the same command and add the --yes flag, to
indicate that you understand the risks. This will create an empty file
named 'consent' in $HOME/.miniooni, meaning that we know you opted in
and we will not ask you this question again.
`
func canOpen(filepath string) bool {
stat, err := os.Stat(filepath)
return err == nil && stat.Mode().IsRegular()
}
func maybeWriteConsentFile(yes bool, filepath string) (err error) {
if yes {
err = ioutil.WriteFile(filepath, []byte("\n"), 0644)
}
return
}
// MainWithConfiguration is the miniooni main with a specific configuration
// represented by the experiment name and the current options.
//
// This function will panic in case of a fatal error. It is up to you that
// integrate this function to either handle the panic of ignore it.
func MainWithConfiguration(experimentName string, currentOptions Options) {
ctx := context.Background()
extraOptions := mustMakeMap(currentOptions.ExtraOptions)
annotations := mustMakeMap(currentOptions.Annotations)
err := selfcensor.MaybeEnable(currentOptions.SelfCensorSpec)
fatalOnError(err, "cannot parse --self-censor-spec argument")
logger := &log.Logger{Level: log.InfoLevel, Handler: &logHandler{Writer: os.Stderr}}
if currentOptions.Verbose {
logger.Level = log.DebugLevel
}
if currentOptions.ReportFile == "" {
currentOptions.ReportFile = "report.jsonl"
}
log.Log = logger
//Mon Jan 2 15:04:05 -0700 MST 2006
log.Infof("Current time: %s", time.Now().Format("2006-01-02 15:04:05 MST"))
homeDir := gethomedir(currentOptions.HomeDir)
fatalIfFalse(homeDir != "", "home directory is empty")
miniooniDir := path.Join(homeDir, ".miniooni")
assetsDir := path.Join(miniooniDir, "assets")
err = os.MkdirAll(assetsDir, 0700)
fatalOnError(err, "cannot create assets directory")
log.Debugf("miniooni state directory: %s", miniooniDir)
consentFile := path.Join(miniooniDir, "informed")
fatalOnError(maybeWriteConsentFile(currentOptions.Yes, consentFile),
"cannot write informed consent file")
fatalIfFalse(canOpen(consentFile), riskOfRunningOONI)
log.Info("miniooni home directory: $HOME/.miniooni")
var proxyURL *url.URL
if currentOptions.Proxy != "" {
proxyURL = mustParseURL(currentOptions.Proxy)
}
kvstore2dir := filepath.Join(miniooniDir, "kvstore2")
kvstore, err := engine.NewFileSystemKVStore(kvstore2dir)
fatalOnError(err, "cannot create kvstore2 directory")
config := engine.SessionConfig{
AssetsDir: assetsDir,
KVStore: kvstore,
Logger: logger,
ProxyURL: proxyURL,
SoftwareName: softwareName,
SoftwareVersion: softwareVersion,
TorArgs: currentOptions.TorArgs,
TorBinary: currentOptions.TorBinary,
}
if currentOptions.ProbeServicesURL != "" {
config.AvailableProbeServices = []model.Service{{
Address: currentOptions.ProbeServicesURL,
Type: "https",
}}
}
sess, err := engine.NewSession(config)
fatalOnError(err, "cannot create measurement session")
defer func() {
sess.Close()
log.Infof("whole session: recv %s, sent %s",
humanizex.SI(sess.KibiBytesReceived()*1024, "byte"),
humanizex.SI(sess.KibiBytesSent()*1024, "byte"),
)
}()
log.Debugf("miniooni temporary directory: %s", sess.TempDir())
err = sess.MaybeStartTunnel(context.Background(), currentOptions.Tunnel)
fatalOnError(err, "cannot start session tunnel")
log.Info("Looking up OONI backends; please be patient...")
err = sess.MaybeLookupBackends()
fatalOnError(err, "cannot lookup OONI backends")
log.Info("Looking up your location; please be patient...")
err = sess.MaybeLookupLocation()
fatalOnError(err, "cannot lookup your location")
log.Debugf("- IP: %s", sess.ProbeIP())
log.Infof("- country: %s", sess.ProbeCC())
log.Infof("- network: %s (%s)", sess.ProbeNetworkName(), sess.ProbeASNString())
log.Infof("- resolver's IP: %s", sess.ResolverIP())
log.Infof("- resolver's network: %s (%s)", sess.ResolverNetworkName(),
sess.ResolverASNString())
builder, err := sess.NewExperimentBuilder(experimentName)
fatalOnError(err, "cannot create experiment builder")
inputLoader := engine.NewInputLoader(engine.InputLoaderConfig{
StaticInputs: currentOptions.Inputs,
SourceFiles: currentOptions.InputFilePaths,
InputPolicy: builder.InputPolicy(),
Session: sess,
URLLimit: 17,
})
inputs, err := inputLoader.Load(context.Background())
fatalOnError(err, "cannot load inputs")
if currentOptions.Random {
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
rnd.Shuffle(len(inputs), func(i, j int) {
inputs[i], inputs[j] = inputs[j], inputs[i]
})
}
err = builder.SetOptionsGuessType(extraOptions)
fatalOnError(err, "cannot parse extraOptions")
experiment := builder.NewExperiment()
defer func() {
log.Infof("experiment: recv %s, sent %s",
humanizex.SI(experiment.KibiBytesReceived()*1024, "byte"),
humanizex.SI(experiment.KibiBytesSent()*1024, "byte"),
)
}()
submitter, err := engine.NewSubmitter(ctx, engine.SubmitterConfig{
Enabled: currentOptions.NoCollector == false,
Session: sess,
Logger: log.Log,
})
fatalOnError(err, "cannot create submitter")
saver, err := engine.NewSaver(engine.SaverConfig{
Enabled: currentOptions.NoJSON == false,
Experiment: experiment,
FilePath: currentOptions.ReportFile,
Logger: log.Log,
})
fatalOnError(err, "cannot create saver")
inputProcessor := engine.InputProcessor{
Annotations: annotations,
Experiment: &experimentWrapper{
child: engine.NewInputProcessorExperimentWrapper(experiment),
total: len(inputs),
},
Inputs: inputs,
Options: currentOptions.ExtraOptions,
Saver: engine.NewInputProcessorSaverWrapper(saver),
Submitter: submitterWrapper{
child: engine.NewInputProcessorSubmitterWrapper(submitter),
},
}
err = inputProcessor.Run(ctx)
fatalOnError(err, "inputProcessor.Run failed")
}
type experimentWrapper struct {
child engine.InputProcessorExperimentWrapper
total int
}
func (ew *experimentWrapper) MeasureWithContext(
ctx context.Context, idx int, input string) (*model.Measurement, error) {
if input != "" {
log.Infof("[%d/%d] running with input: %s", idx+1, ew.total, input)
}
measurement, err := ew.child.MeasureWithContext(ctx, idx, input)
warnOnError(err, "measurement failed")
// policy: we do not stop the loop if the measurement fails
return measurement, nil
}
type submitterWrapper struct {
child engine.InputProcessorSubmitterWrapper
}
func (sw submitterWrapper) Submit(ctx context.Context, idx int, m *model.Measurement) error {
err := sw.child.Submit(ctx, idx, m)
warnOnError(err, "submitting measurement failed")
// policy: we do not stop the loop if measurement submission fails
return nil
}
|
[
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"USERPROFILE\"",
"\"XDG_CONFIG_HOME\"",
"\"HOME\""
] |
[] |
[
"HOMEPATH",
"HOMEDRIVE",
"USERPROFILE",
"HOME",
"XDG_CONFIG_HOME"
] |
[]
|
["HOMEPATH", "HOMEDRIVE", "USERPROFILE", "HOME", "XDG_CONFIG_HOME"]
|
go
| 5 | 0 | |
backend/priv-neg/persisters/queue.go
|
package persisters
import (
"fmt"
"log"
"os"
"github.com/VJftw/privacy-negotiator/backend/priv-neg/utils"
"github.com/streadway/amqp"
)
// Consumer - interface.
type Consumer interface {
Consume()
}
// Publisher - interface.
type Publisher interface {
Publish(Queueable)
GetMessageTotal() int
}
// TotalStats - Returns totals for all of the queues.
type TotalStats struct {
TotalMessageCount uint `json:"messageCount"`
}
// Queueable - What Publishers should accept.
type Queueable interface{}
// NewQueue - Returns a new RabbitMQ Channel and Connection.
func NewQueue(logger *log.Logger) (*amqp.Channel, *amqp.Connection) {
if !utils.WaitForService(fmt.Sprintf("%s:%s", os.Getenv("RABBITMQ_HOSTNAME"), "5672"), logger) {
panic("Could not find RabbitMQ..")
}
conn, err := amqp.Dial(
fmt.Sprintf("amqp://%s:%s@%s:5672/",
os.Getenv("RABBITMQ_USER"),
os.Getenv("RABBITMQ_PASS"),
os.Getenv("RABBITMQ_HOSTNAME"),
),
)
utils.FailOnError(err, "Failed to connect to RabbitMQ")
// defer conn.Close()
ch, err := conn.Channel()
utils.FailOnError(err, "Failed to open a channel")
return ch, conn
}
|
[
"\"RABBITMQ_HOSTNAME\"",
"\"RABBITMQ_USER\"",
"\"RABBITMQ_PASS\"",
"\"RABBITMQ_HOSTNAME\""
] |
[] |
[
"RABBITMQ_USER",
"RABBITMQ_HOSTNAME",
"RABBITMQ_PASS"
] |
[]
|
["RABBITMQ_USER", "RABBITMQ_HOSTNAME", "RABBITMQ_PASS"]
|
go
| 3 | 0 | |
salt/utils/network.py
|
# -*- coding: utf-8 -*-
'''
Define some generic socket functions for network modules
'''
# Import python libs
from __future__ import absolute_import, unicode_literals, print_function
import itertools
import os
import re
import types
import socket
import logging
import platform
import random
import subprocess
from string import ascii_letters, digits
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
# Attempt to import wmi
try:
import wmi
import salt.utils.winapi
except ImportError:
pass
# Import salt libs
import salt.utils.args
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.zeromq
from salt._compat import ipaddress
from salt.exceptions import SaltClientError, SaltSystemExit
from salt.utils.decorators.jinja import jinja_filter
from salt.utils.versions import LooseVersion
# inet_pton does not exist in Windows, this is a workaround
if salt.utils.platform.is_windows():
from salt.ext import win_inet_pton # pylint: disable=unused-import
log = logging.getLogger(__name__)
try:
import ctypes
import ctypes.util
libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c"))
res_init = libc.__res_init
except (ImportError, OSError, AttributeError, TypeError):
pass
# pylint: disable=C0103
def sanitize_host(host):
'''
Sanitize host string.
'''
return ''.join([
c for c in host[0:255] if c in (ascii_letters + digits + '.-')
])
def isportopen(host, port):
'''
Return status of a port
'''
if not 1 <= int(port) <= 65535:
return False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
out = sock.connect_ex((sanitize_host(host), int(port)))
return out
def host_to_ips(host):
'''
Returns a list of IP addresses of a given hostname or None if not found.
'''
ips = []
try:
for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo(
host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM):
if family == socket.AF_INET:
ip, port = sockaddr
elif family == socket.AF_INET6:
ip, port, flow_info, scope_id = sockaddr
ips.append(ip)
if not ips:
ips = None
except Exception:
ips = None
return ips
def _generate_minion_id():
'''
Get list of possible host names and convention names.
:return:
'''
# There are three types of hostnames:
# 1. Network names. How host is accessed from the network.
# 2. Host aliases. They might be not available in all the network or only locally (/etc/hosts)
# 3. Convention names, an internal nodename.
class DistinctList(list):
'''
List, which allows one to append only distinct objects.
Needs to work on Python 2.6, because of collections.OrderedDict only since 2.7 version.
Override 'filter()' for custom filtering.
'''
localhost_matchers = [r'localhost.*', r'ip6-.*', r'127[.]\d', r'0\.0\.0\.0',
r'::1.*', r'ipv6-.*', r'fe00::.*', r'fe02::.*', r'1.0.0.*.ip6.arpa']
def append(self, p_object):
if p_object and p_object not in self and not self.filter(p_object):
super(self.__class__, self).append(p_object)
return self
def extend(self, iterable):
for obj in iterable:
self.append(obj)
return self
def filter(self, element):
'Returns True if element needs to be filtered'
for rgx in self.localhost_matchers:
if re.match(rgx, element):
return True
def first(self):
return self and self[0] or None
hosts = DistinctList().append(socket.getfqdn()).append(platform.node()).append(socket.gethostname())
if not hosts:
try:
for a_nfo in socket.getaddrinfo(hosts.first() or 'localhost', None, socket.AF_INET,
socket.SOCK_RAW, socket.IPPROTO_IP, socket.AI_CANONNAME):
if len(a_nfo) > 3:
hosts.append(a_nfo[3])
except socket.gaierror:
log.warning('Cannot resolve address {addr} info via socket: {message}'.format(
addr=hosts.first() or 'localhost (N/A)', message=socket.gaierror)
)
# Universal method for everywhere (Linux, Slowlaris, Windows etc)
for f_name in ('/etc/hostname', '/etc/nodename', '/etc/hosts',
r'{win}\system32\drivers\etc\hosts'.format(win=os.getenv('WINDIR'))):
try:
with salt.utils.files.fopen(f_name) as f_hdl:
for line in f_hdl:
line = salt.utils.stringutils.to_unicode(line)
hst = line.strip().split('#')[0].strip().split()
if hst:
if hst[0][:4] in ('127.', '::1') or len(hst) == 1:
hosts.extend(hst)
except IOError:
pass
# include public and private ipaddresses
return hosts.extend([addr for addr in ip_addrs()
if not ipaddress.ip_address(addr).is_loopback])
def generate_minion_id():
'''
Return only first element of the hostname from all possible list.
:return:
'''
try:
ret = salt.utils.stringutils.to_unicode(_generate_minion_id().first())
except TypeError:
ret = None
return ret or 'localhost'
def get_socket(addr, type=socket.SOCK_STREAM, proto=0):
'''
Return a socket object for the addr
IP-version agnostic
'''
version = ipaddress.ip_address(addr).version
if version == 4:
family = socket.AF_INET
elif version == 6:
family = socket.AF_INET6
return socket.socket(family, type, proto)
def get_fqhostname():
'''
Returns the fully qualified hostname
'''
l = [socket.getfqdn()]
# try socket.getaddrinfo
try:
addrinfo = socket.getaddrinfo(
socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM,
socket.SOL_TCP, socket.AI_CANONNAME
)
for info in addrinfo:
# info struct [family, socktype, proto, canonname, sockaddr]
# On Windows `canonname` can be an empty string
# This can cause the function to return `None`
if len(info) >= 4 and info[3]:
l = [info[3]]
except socket.gaierror:
pass
return l and l[0] or None
def ip_to_host(ip):
'''
Returns the hostname of a given IP
'''
try:
hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ip)
except Exception as exc:
log.debug('salt.utils.network.ip_to_host(%r) failed: %s', ip, exc)
hostname = None
return hostname
# pylint: enable=C0103
def is_reachable_host(entity_name):
'''
Returns a bool telling if the entity name is a reachable host (IPv4/IPv6/FQDN/etc).
:param hostname:
:return:
'''
try:
assert type(socket.getaddrinfo(entity_name, 0, 0, 0, 0)) == list
ret = True
except socket.gaierror:
ret = False
return ret
def is_ip(ip):
'''
Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address.
'''
return is_ipv4(ip) or is_ipv6(ip)
def is_ipv4(ip):
'''
Returns a bool telling if the value passed to it was a valid IPv4 address
'''
try:
return ipaddress.ip_address(ip).version == 4
except ValueError:
return False
def is_ipv6(ip):
'''
Returns a bool telling if the value passed to it was a valid IPv6 address
'''
try:
return ipaddress.ip_address(ip).version == 6
except ValueError:
return False
def is_subnet(cidr):
'''
Returns a bool telling if the passed string is an IPv4 or IPv6 subnet
'''
return is_ipv4_subnet(cidr) or is_ipv6_subnet(cidr)
def is_ipv4_subnet(cidr):
'''
Returns a bool telling if the passed string is an IPv4 subnet
'''
try:
return '/' in cidr and bool(ipaddress.IPv4Network(cidr))
except Exception:
return False
def is_ipv6_subnet(cidr):
'''
Returns a bool telling if the passed string is an IPv6 subnet
'''
try:
return '/' in cidr and bool(ipaddress.IPv6Network(cidr))
except Exception:
return False
@jinja_filter('is_ip')
def is_ip_filter(ip, options=None):
'''
Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address.
'''
return is_ipv4_filter(ip, options=options) or is_ipv6_filter(ip, options=options)
def _ip_options_global(ip_obj, version):
return not ip_obj.is_private
def _ip_options_multicast(ip_obj, version):
return ip_obj.is_multicast
def _ip_options_loopback(ip_obj, version):
return ip_obj.is_loopback
def _ip_options_link_local(ip_obj, version):
return ip_obj.is_link_local
def _ip_options_private(ip_obj, version):
return ip_obj.is_private
def _ip_options_reserved(ip_obj, version):
return ip_obj.is_reserved
def _ip_options_site_local(ip_obj, version):
if version == 6:
return ip_obj.is_site_local
return False
def _ip_options_unspecified(ip_obj, version):
return ip_obj.is_unspecified
def _ip_options(ip_obj, version, options=None):
# will process and IP options
options_fun_map = {
'global': _ip_options_global,
'link-local': _ip_options_link_local,
'linklocal': _ip_options_link_local,
'll': _ip_options_link_local,
'link_local': _ip_options_link_local,
'loopback': _ip_options_loopback,
'lo': _ip_options_loopback,
'multicast': _ip_options_multicast,
'private': _ip_options_private,
'public': _ip_options_global,
'reserved': _ip_options_reserved,
'site-local': _ip_options_site_local,
'sl': _ip_options_site_local,
'site_local': _ip_options_site_local,
'unspecified': _ip_options_unspecified
}
if not options:
return six.text_type(ip_obj) # IP version already checked
options_list = [option.strip() for option in options.split(',')]
for option, fun in options_fun_map.items():
if option in options_list:
fun_res = fun(ip_obj, version)
if not fun_res:
return None
# stop at first failed test
# else continue
return six.text_type(ip_obj)
def _is_ipv(ip, version, options=None):
if not version:
version = 4
if version not in (4, 6):
return None
try:
ip_obj = ipaddress.ip_address(ip)
except ValueError:
# maybe it is an IP network
try:
ip_obj = ipaddress.ip_interface(ip)
except ValueError:
# nope, still not :(
return None
if not ip_obj.version == version:
return None
# has the right version, let's move on
return _ip_options(ip_obj, version, options=options)
@jinja_filter('is_ipv4')
def is_ipv4_filter(ip, options=None):
'''
Returns a bool telling if the value passed to it was a valid IPv4 address.
ip
The IP address.
net: False
Consider IP addresses followed by netmask.
options
CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc.
'''
_is_ipv4 = _is_ipv(ip, 4, options=options)
return isinstance(_is_ipv4, six.string_types)
@jinja_filter('is_ipv6')
def is_ipv6_filter(ip, options=None):
'''
Returns a bool telling if the value passed to it was a valid IPv6 address.
ip
The IP address.
net: False
Consider IP addresses followed by netmask.
options
CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc.
'''
_is_ipv6 = _is_ipv(ip, 6, options=options)
return isinstance(_is_ipv6, six.string_types)
def _ipv_filter(value, version, options=None):
if version not in (4, 6):
return
if isinstance(value, (six.string_types, six.text_type, six.binary_type)):
return _is_ipv(value, version, options=options) # calls is_ipv4 or is_ipv6 for `value`
elif isinstance(value, (list, tuple, types.GeneratorType)):
# calls is_ipv4 or is_ipv6 for each element in the list
# os it filters and returns only those elements having the desired IP version
return [
_is_ipv(addr, version, options=options)
for addr in value
if _is_ipv(addr, version, options=options) is not None
]
return None
@jinja_filter('ipv4')
def ipv4(value, options=None):
'''
Filters a list and returns IPv4 values only.
'''
return _ipv_filter(value, 4, options=options)
@jinja_filter('ipv6')
def ipv6(value, options=None):
'''
Filters a list and returns IPv6 values only.
'''
return _ipv_filter(value, 6, options=options)
@jinja_filter('ipaddr')
def ipaddr(value, options=None):
'''
Filters and returns only valid IP objects.
'''
ipv4_obj = ipv4(value, options=options)
ipv6_obj = ipv6(value, options=options)
if ipv4_obj is None or ipv6_obj is None:
# an IP address can be either IPv4 either IPv6
# therefofe if the value passed as arg is not a list, at least one of the calls above will return None
# if one of them is none, means that we should return only one of them
return ipv4_obj or ipv6_obj # one of them
else:
return ipv4_obj + ipv6_obj # extend lists
def _filter_ipaddr(value, options, version=None):
ipaddr_filter_out = None
if version:
if version == 4:
ipaddr_filter_out = ipv4(value, options)
elif version == 6:
ipaddr_filter_out = ipv6(value, options)
else:
ipaddr_filter_out = ipaddr(value, options)
if not ipaddr_filter_out:
return
if not isinstance(ipaddr_filter_out, (list, tuple, types.GeneratorType)):
ipaddr_filter_out = [ipaddr_filter_out]
return ipaddr_filter_out
@jinja_filter('ip_host')
def ip_host(value, options=None, version=None):
'''
Returns the interfaces IP address, e.g.: 192.168.0.1/28.
'''
ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version)
if not ipaddr_filter_out:
return
if not isinstance(value, (list, tuple, types.GeneratorType)):
return six.text_type(ipaddress.ip_interface(ipaddr_filter_out[0]))
return [six.text_type(ipaddress.ip_interface(ip_a)) for ip_a in ipaddr_filter_out]
def _network_hosts(ip_addr_entry):
return [
six.text_type(host)
for host in ipaddress.ip_network(ip_addr_entry, strict=False).hosts()
]
@jinja_filter('network_hosts')
def network_hosts(value, options=None, version=None):
'''
Return the list of hosts within a network.
.. note::
When running this command with a large IPv6 network, the command will
take a long time to gather all of the hosts.
'''
ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version)
if not ipaddr_filter_out:
return
if not isinstance(value, (list, tuple, types.GeneratorType)):
return _network_hosts(ipaddr_filter_out[0])
return [
_network_hosts(ip_a)
for ip_a in ipaddr_filter_out
]
def _network_size(ip_addr_entry):
return ipaddress.ip_network(ip_addr_entry, strict=False).num_addresses
@jinja_filter('network_size')
def network_size(value, options=None, version=None):
'''
Get the size of a network.
'''
ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version)
if not ipaddr_filter_out:
return
if not isinstance(value, (list, tuple, types.GeneratorType)):
return _network_size(ipaddr_filter_out[0])
return [
_network_size(ip_a)
for ip_a in ipaddr_filter_out
]
def natural_ipv4_netmask(ip, fmt='prefixlen'):
'''
Returns the "natural" mask of an IPv4 address
'''
bits = _ipv4_to_bits(ip)
if bits.startswith('11'):
mask = '24'
elif bits.startswith('1'):
mask = '16'
else:
mask = '8'
if fmt == 'netmask':
return cidr_to_ipv4_netmask(mask)
else:
return '/' + mask
def rpad_ipv4_network(ip):
'''
Returns an IP network address padded with zeros.
Ex: '192.168.3' -> '192.168.3.0'
'10.209' -> '10.209.0.0'
'''
return '.'.join(itertools.islice(itertools.chain(ip.split('.'), '0000'), 0,
4))
def cidr_to_ipv4_netmask(cidr_bits):
'''
Returns an IPv4 netmask
'''
try:
cidr_bits = int(cidr_bits)
if not 1 <= cidr_bits <= 32:
return ''
except ValueError:
return ''
netmask = ''
for idx in range(4):
if idx:
netmask += '.'
if cidr_bits >= 8:
netmask += '255'
cidr_bits -= 8
else:
netmask += '{0:d}'.format(256 - (2 ** (8 - cidr_bits)))
cidr_bits = 0
return netmask
def _number_of_set_bits_to_ipv4_netmask(set_bits): # pylint: disable=C0103
'''
Returns an IPv4 netmask from the integer representation of that mask.
Ex. 0xffffff00 -> '255.255.255.0'
'''
return cidr_to_ipv4_netmask(_number_of_set_bits(set_bits))
# pylint: disable=C0103
def _number_of_set_bits(x):
'''
Returns the number of bits that are set in a 32bit int
'''
# Taken from http://stackoverflow.com/a/4912729. Many thanks!
x -= (x >> 1) & 0x55555555
x = ((x >> 2) & 0x33333333) + (x & 0x33333333)
x = ((x >> 4) + x) & 0x0f0f0f0f
x += x >> 8
x += x >> 16
return x & 0x0000003f
# pylint: enable=C0103
def _interfaces_ip(out):
'''
Uses ip to return a dictionary of interfaces with various information about
each (up/down state, ip address, netmask, and hwaddr)
'''
ret = dict()
def parse_network(value, cols):
'''
Return a tuple of ip, netmask, broadcast
based on the current set of cols
'''
brd = None
scope = None
if '/' in value: # we have a CIDR in this address
ip, cidr = value.split('/') # pylint: disable=C0103
else:
ip = value # pylint: disable=C0103
cidr = 32
if type_ == 'inet':
mask = cidr_to_ipv4_netmask(int(cidr))
if 'brd' in cols:
brd = cols[cols.index('brd') + 1]
elif type_ == 'inet6':
mask = cidr
if 'scope' in cols:
scope = cols[cols.index('scope') + 1]
return (ip, mask, brd, scope)
groups = re.compile('\r?\n\\d').split(out)
for group in groups:
iface = None
data = dict()
for line in group.splitlines():
if ' ' not in line:
continue
match = re.match(r'^\d*:\s+([\w.\-]+)(?:@)?([\w.\-]+)?:\s+<(.+)>', line)
if match:
iface, parent, attrs = match.groups()
if 'UP' in attrs.split(','):
data['up'] = True
else:
data['up'] = False
if parent:
data['parent'] = parent
continue
cols = line.split()
if len(cols) >= 2:
type_, value = tuple(cols[0:2])
iflabel = cols[-1:][0]
if type_ in ('inet', 'inet6'):
if 'secondary' not in cols:
ipaddr, netmask, broadcast, scope = parse_network(value, cols)
if type_ == 'inet':
if 'inet' not in data:
data['inet'] = list()
addr_obj = dict()
addr_obj['address'] = ipaddr
addr_obj['netmask'] = netmask
addr_obj['broadcast'] = broadcast
addr_obj['label'] = iflabel
data['inet'].append(addr_obj)
elif type_ == 'inet6':
if 'inet6' not in data:
data['inet6'] = list()
addr_obj = dict()
addr_obj['address'] = ipaddr
addr_obj['prefixlen'] = netmask
addr_obj['scope'] = scope
data['inet6'].append(addr_obj)
else:
if 'secondary' not in data:
data['secondary'] = list()
ip_, mask, brd, scp = parse_network(value, cols)
data['secondary'].append({
'type': type_,
'address': ip_,
'netmask': mask,
'broadcast': brd,
'label': iflabel,
})
del ip_, mask, brd, scp
elif type_.startswith('link'):
data['hwaddr'] = value
if iface:
ret[iface] = data
del iface, data
return ret
def _interfaces_ifconfig(out):
'''
Uses ifconfig to return a dictionary of interfaces with various information
about each (up/down state, ip address, netmask, and hwaddr)
'''
ret = dict()
piface = re.compile(r'^([^\s:]+)')
pmac = re.compile('.*?(?:HWaddr|ether|address:|lladdr) ([0-9a-fA-F:]+)')
if salt.utils.platform.is_sunos():
pip = re.compile(r'.*?(?:inet\s+)([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)(.*)')
pip6 = re.compile('.*?(?:inet6 )([0-9a-fA-F:]+)')
pmask6 = re.compile(r'.*?(?:inet6 [0-9a-fA-F:]+/(\d+)).*')
else:
pip = re.compile(r'.*?(?:inet addr:|inet [^\d]*)(.*?)\s')
pip6 = re.compile('.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)')
pmask6 = re.compile(r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+))(?: Scope:([a-zA-Z]+)| scopeid (0x[0-9a-fA-F]))?')
pmask = re.compile(r'.*?(?:Mask:|netmask )(?:((?:0x)?[0-9a-fA-F]{8})|([\d\.]+))')
pupdown = re.compile('UP')
pbcast = re.compile(r'.*?(?:Bcast:|broadcast )([\d\.]+)')
groups = re.compile('\r?\n(?=\\S)').split(out)
for group in groups:
data = dict()
iface = ''
updown = False
for line in group.splitlines():
miface = piface.match(line)
mmac = pmac.match(line)
mip = pip.match(line)
mip6 = pip6.match(line)
mupdown = pupdown.search(line)
if miface:
iface = miface.group(1)
if mmac:
data['hwaddr'] = mmac.group(1)
if salt.utils.platform.is_sunos():
expand_mac = []
for chunk in data['hwaddr'].split(':'):
expand_mac.append('0{0}'.format(chunk) if len(chunk) < 2 else '{0}'.format(chunk))
data['hwaddr'] = ':'.join(expand_mac)
if mip:
if 'inet' not in data:
data['inet'] = list()
addr_obj = dict()
addr_obj['address'] = mip.group(1)
mmask = pmask.match(line)
if mmask:
if mmask.group(1):
mmask = _number_of_set_bits_to_ipv4_netmask(
int(mmask.group(1), 16))
else:
mmask = mmask.group(2)
addr_obj['netmask'] = mmask
mbcast = pbcast.match(line)
if mbcast:
addr_obj['broadcast'] = mbcast.group(1)
data['inet'].append(addr_obj)
if mupdown:
updown = True
if mip6:
if 'inet6' not in data:
data['inet6'] = list()
addr_obj = dict()
addr_obj['address'] = mip6.group(1) or mip6.group(2)
mmask6 = pmask6.match(line)
if mmask6:
addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2)
if not salt.utils.platform.is_sunos():
ipv6scope = mmask6.group(3) or mmask6.group(4)
addr_obj['scope'] = ipv6scope.lower() if ipv6scope is not None else ipv6scope
# SunOS sometimes has ::/0 as inet6 addr when using addrconf
if not salt.utils.platform.is_sunos() \
or addr_obj['address'] != '::' \
and addr_obj['prefixlen'] != 0:
data['inet6'].append(addr_obj)
data['up'] = updown
if iface in ret:
# SunOS optimization, where interfaces occur twice in 'ifconfig -a'
# output with the same name: for ipv4 and then for ipv6 addr family.
# Every instance has it's own 'UP' status and we assume that ipv4
# status determines global interface status.
#
# merge items with higher priority for older values
# after that merge the inet and inet6 sub items for both
ret[iface] = dict(list(data.items()) + list(ret[iface].items()))
if 'inet' in data:
ret[iface]['inet'].extend(x for x in data['inet'] if x not in ret[iface]['inet'])
if 'inet6' in data:
ret[iface]['inet6'].extend(x for x in data['inet6'] if x not in ret[iface]['inet6'])
else:
ret[iface] = data
del data
return ret
def linux_interfaces():
'''
Obtain interface information for *NIX/BSD variants
'''
ifaces = dict()
ip_path = salt.utils.path.which('ip')
ifconfig_path = None if ip_path else salt.utils.path.which('ifconfig')
if ip_path:
cmd1 = subprocess.Popen(
'{0} link show'.format(ip_path),
shell=True,
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()[0]
cmd2 = subprocess.Popen(
'{0} addr show'.format(ip_path),
shell=True,
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()[0]
ifaces = _interfaces_ip("{0}\n{1}".format(
salt.utils.stringutils.to_str(cmd1),
salt.utils.stringutils.to_str(cmd2)))
elif ifconfig_path:
cmd = subprocess.Popen(
'{0} -a'.format(ifconfig_path),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()[0]
ifaces = _interfaces_ifconfig(salt.utils.stringutils.to_str(cmd))
return ifaces
def _netbsd_interfaces_ifconfig(out):
'''
Uses ifconfig to return a dictionary of interfaces with various information
about each (up/down state, ip address, netmask, and hwaddr)
'''
ret = dict()
piface = re.compile(r'^([^\s:]+)')
pmac = re.compile('.*?address: ([0-9a-f:]+)')
pip = re.compile(r'.*?inet [^\d]*(.*?)/([\d]*)\s')
pip6 = re.compile(r'.*?inet6 ([0-9a-f:]+)%([a-zA-Z0-9]*)/([\d]*)\s')
pupdown = re.compile('UP')
pbcast = re.compile(r'.*?broadcast ([\d\.]+)')
groups = re.compile('\r?\n(?=\\S)').split(out)
for group in groups:
data = dict()
iface = ''
updown = False
for line in group.splitlines():
miface = piface.match(line)
mmac = pmac.match(line)
mip = pip.match(line)
mip6 = pip6.match(line)
mupdown = pupdown.search(line)
if miface:
iface = miface.group(1)
if mmac:
data['hwaddr'] = mmac.group(1)
if mip:
if 'inet' not in data:
data['inet'] = list()
addr_obj = dict()
addr_obj['address'] = mip.group(1)
mmask = mip.group(2)
if mip.group(2):
addr_obj['netmask'] = cidr_to_ipv4_netmask(mip.group(2))
mbcast = pbcast.match(line)
if mbcast:
addr_obj['broadcast'] = mbcast.group(1)
data['inet'].append(addr_obj)
if mupdown:
updown = True
if mip6:
if 'inet6' not in data:
data['inet6'] = list()
addr_obj = dict()
addr_obj['address'] = mip6.group(1)
mmask6 = mip6.group(3)
addr_obj['scope'] = mip6.group(2)
addr_obj['prefixlen'] = mip6.group(3)
data['inet6'].append(addr_obj)
data['up'] = updown
ret[iface] = data
del data
return ret
def netbsd_interfaces():
'''
Obtain interface information for NetBSD >= 8 where the ifconfig
output diverged from other BSD variants (Netmask is now part of the
address)
'''
# NetBSD versions prior to 8.0 can still use linux_interfaces()
if LooseVersion(os.uname()[2]) < LooseVersion('8.0'):
return linux_interfaces()
ifconfig_path = salt.utils.path.which('ifconfig')
cmd = subprocess.Popen(
'{0} -a'.format(ifconfig_path),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()[0]
return _netbsd_interfaces_ifconfig(salt.utils.stringutils.to_str(cmd))
def _interfaces_ipconfig(out):
'''
Returns a dictionary of interfaces with various information about each
(up/down state, ip address, netmask, and hwaddr)
NOTE: This is not used by any function and may be able to be removed in the
future.
'''
ifaces = dict()
iface = None
adapter_iface_regex = re.compile(r'adapter (\S.+):$')
for line in out.splitlines():
if not line:
continue
# TODO what does Windows call Infiniband and 10/40gige adapters
if line.startswith('Ethernet'):
iface = ifaces[adapter_iface_regex.search(line).group(1)]
iface['up'] = True
addr = None
continue
if iface:
key, val = line.split(',', 1)
key = key.strip(' .')
val = val.strip()
if addr and key == 'Subnet Mask':
addr['netmask'] = val
elif key in ('IP Address', 'IPv4 Address'):
if 'inet' not in iface:
iface['inet'] = list()
addr = {'address': val.rstrip('(Preferred)'),
'netmask': None,
'broadcast': None} # TODO find the broadcast
iface['inet'].append(addr)
elif 'IPv6 Address' in key:
if 'inet6' not in iface:
iface['inet'] = list()
# XXX What is the prefixlen!?
addr = {'address': val.rstrip('(Preferred)'),
'prefixlen': None}
iface['inet6'].append(addr)
elif key == 'Physical Address':
iface['hwaddr'] = val
elif key == 'Media State':
# XXX seen used for tunnel adaptors
# might be useful
iface['up'] = (val != 'Media disconnected')
def win_interfaces():
'''
Obtain interface information for Windows systems
'''
with salt.utils.winapi.Com():
c = wmi.WMI()
ifaces = {}
for iface in c.Win32_NetworkAdapterConfiguration(IPEnabled=1):
ifaces[iface.Description] = dict()
if iface.MACAddress:
ifaces[iface.Description]['hwaddr'] = iface.MACAddress
if iface.IPEnabled:
ifaces[iface.Description]['up'] = True
for ip in iface.IPAddress:
if '.' in ip:
if 'inet' not in ifaces[iface.Description]:
ifaces[iface.Description]['inet'] = []
item = {'address': ip,
'label': iface.Description}
if iface.DefaultIPGateway:
broadcast = next((i for i in iface.DefaultIPGateway if '.' in i), '')
if broadcast:
item['broadcast'] = broadcast
if iface.IPSubnet:
netmask = next((i for i in iface.IPSubnet if '.' in i), '')
if netmask:
item['netmask'] = netmask
ifaces[iface.Description]['inet'].append(item)
if ':' in ip:
if 'inet6' not in ifaces[iface.Description]:
ifaces[iface.Description]['inet6'] = []
item = {'address': ip}
if iface.DefaultIPGateway:
broadcast = next((i for i in iface.DefaultIPGateway if ':' in i), '')
if broadcast:
item['broadcast'] = broadcast
if iface.IPSubnet:
netmask = next((i for i in iface.IPSubnet if ':' in i), '')
if netmask:
item['netmask'] = netmask
ifaces[iface.Description]['inet6'].append(item)
else:
ifaces[iface.Description]['up'] = False
return ifaces
def interfaces():
'''
Return a dictionary of information about all the interfaces on the minion
'''
if salt.utils.platform.is_windows():
return win_interfaces()
elif salt.utils.platform.is_netbsd():
return netbsd_interfaces()
else:
return linux_interfaces()
def get_net_start(ipaddr, netmask):
'''
Return the address of the network
'''
net = ipaddress.ip_network('{0}/{1}'.format(ipaddr, netmask), strict=False)
return six.text_type(net.network_address)
def get_net_size(mask):
'''
Turns an IPv4 netmask into it's corresponding prefix length
(255.255.255.0 -> 24 as in 192.168.1.10/24).
'''
binary_str = ''
for octet in mask.split('.'):
binary_str += bin(int(octet))[2:].zfill(8)
return len(binary_str.rstrip('0'))
def calc_net(ipaddr, netmask=None):
'''
Takes IP (CIDR notation supported) and optionally netmask
and returns the network in CIDR-notation.
(The IP can be any IP inside the subnet)
'''
if netmask is not None:
ipaddr = '{0}/{1}'.format(ipaddr, netmask)
return six.text_type(ipaddress.ip_network(ipaddr, strict=False))
def _ipv4_to_bits(ipaddr):
'''
Accepts an IPv4 dotted quad and returns a string representing its binary
counterpart
'''
return ''.join([bin(int(x))[2:].rjust(8, '0') for x in ipaddr.split('.')])
def _get_iface_info(iface):
'''
If `iface` is available, return interface info and no error, otherwise
return no info and log and return an error
'''
iface_info = interfaces()
if iface in iface_info.keys():
return iface_info, False
else:
error_msg = ('Interface "{0}" not in available interfaces: "{1}"'
''.format(iface, '", "'.join(iface_info.keys())))
log.error(error_msg)
return None, error_msg
def _hw_addr_aix(iface):
'''
Return the hardware address (a.k.a. MAC address) for a given interface on AIX
MAC address not available in through interfaces
'''
cmd = subprocess.Popen(
'entstat -d {0} | grep \'Hardware Address\''.format(iface),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()[0]
if cmd:
comps = cmd.split(' ')
if len(comps) == 3:
mac_addr = comps[2].strip('\'').strip()
return mac_addr
error_msg = ('Interface "{0}" either not available or does not contain a hardware address'.format(iface))
log.error(error_msg)
return error_msg
def hw_addr(iface):
'''
Return the hardware address (a.k.a. MAC address) for a given interface
.. versionchanged:: 2016.11.4
Added support for AIX
'''
if salt.utils.platform.is_aix():
return _hw_addr_aix
iface_info, error = _get_iface_info(iface)
if error is False:
return iface_info.get(iface, {}).get('hwaddr', '')
else:
return error
def interface(iface):
'''
Return the details of `iface` or an error if it does not exist
'''
iface_info, error = _get_iface_info(iface)
if error is False:
return iface_info.get(iface, {}).get('inet', '')
else:
return error
def interface_ip(iface):
'''
Return `iface` IPv4 addr or an error if `iface` does not exist
'''
iface_info, error = _get_iface_info(iface)
if error is False:
inet = iface_info.get(iface, {}).get('inet', None)
return inet[0].get('address', '') if inet else ''
else:
return error
def _subnets(proto='inet', interfaces_=None):
'''
Returns a list of subnets to which the host belongs
'''
if interfaces_ is None:
ifaces = interfaces()
elif isinstance(interfaces_, list):
ifaces = {}
for key, value in six.iteritems(interfaces()):
if key in interfaces_:
ifaces[key] = value
else:
ifaces = {interfaces_: interfaces().get(interfaces_, {})}
ret = set()
if proto == 'inet':
subnet = 'netmask'
dflt_cidr = 32
elif proto == 'inet6':
subnet = 'prefixlen'
dflt_cidr = 128
else:
log.error('Invalid proto {0} calling subnets()'.format(proto))
return
for ip_info in six.itervalues(ifaces):
addrs = ip_info.get(proto, [])
addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto])
for intf in addrs:
if subnet in intf:
intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], intf[subnet]))
else:
intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], dflt_cidr))
if not intf.is_loopback:
ret.add(intf.network)
return [six.text_type(net) for net in sorted(ret)]
def subnets(interfaces=None):
'''
Returns a list of IPv4 subnets to which the host belongs
'''
return _subnets('inet', interfaces_=interfaces)
def subnets6():
'''
Returns a list of IPv6 subnets to which the host belongs
'''
return _subnets('inet6')
def in_subnet(cidr, addr=None):
'''
Returns True if host or (any of) addrs is within specified subnet, otherwise False
'''
try:
cidr = ipaddress.ip_network(cidr)
except ValueError:
log.error('Invalid CIDR \'%s\'', cidr)
return False
if addr is None:
addr = ip_addrs()
addr.extend(ip_addrs6())
elif not isinstance(addr, (list, tuple)):
addr = (addr,)
return any(ipaddress.ip_address(item) in cidr for item in addr)
def _ip_addrs(interface=None, include_loopback=False, interface_data=None, proto='inet'):
'''
Return the full list of IP adresses matching the criteria
proto = inet|inet6
'''
ret = set()
ifaces = interface_data \
if isinstance(interface_data, dict) \
else interfaces()
if interface is None:
target_ifaces = ifaces
else:
target_ifaces = dict([(k, v) for k, v in six.iteritems(ifaces)
if k == interface])
if not target_ifaces:
log.error('Interface {0} not found.'.format(interface))
for ip_info in six.itervalues(target_ifaces):
addrs = ip_info.get(proto, [])
addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto])
for addr in addrs:
addr = ipaddress.ip_address(addr.get('address'))
if not addr.is_loopback or include_loopback:
ret.add(addr)
return [six.text_type(addr) for addr in sorted(ret)]
def ip_addrs(interface=None, include_loopback=False, interface_data=None):
'''
Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is
ignored, unless 'include_loopback=True' is indicated. If 'interface' is
provided, then only IP addresses from that interface will be returned.
'''
return _ip_addrs(interface, include_loopback, interface_data, 'inet')
def ip_addrs6(interface=None, include_loopback=False, interface_data=None):
'''
Returns a list of IPv6 addresses assigned to the host. ::1 is ignored,
unless 'include_loopback=True' is indicated. If 'interface' is provided,
then only IP addresses from that interface will be returned.
'''
return _ip_addrs(interface, include_loopback, interface_data, 'inet6')
def hex2ip(hex_ip, invert=False):
'''
Convert a hex string to an ip, if a failure occurs the original hex is
returned. If 'invert=True' assume that ip from /proc/net/<proto>
'''
if len(hex_ip) == 32: # ipv6
ip = []
for i in range(0, 32, 8):
ip_part = hex_ip[i:i + 8]
ip_part = [ip_part[x:x + 2] for x in range(0, 8, 2)]
if invert:
ip.append("{0[3]}{0[2]}:{0[1]}{0[0]}".format(ip_part))
else:
ip.append("{0[0]}{0[1]}:{0[2]}{0[3]}".format(ip_part))
try:
address = ipaddress.IPv6Address(":".join(ip))
if address.ipv4_mapped:
return str(address.ipv4_mapped)
else:
return address.compressed
except ipaddress.AddressValueError as ex:
log.error('hex2ip - ipv6 address error: {0}'.format(ex))
return hex_ip
try:
hip = int(hex_ip, 16)
except ValueError:
return hex_ip
if invert:
return '{3}.{2}.{1}.{0}'.format(hip >> 24 & 255,
hip >> 16 & 255,
hip >> 8 & 255,
hip & 255)
return '{0}.{1}.{2}.{3}'.format(hip >> 24 & 255,
hip >> 16 & 255,
hip >> 8 & 255,
hip & 255)
def mac2eui64(mac, prefix=None):
'''
Convert a MAC address to a EUI64 identifier
or, with prefix provided, a full IPv6 address
'''
# http://tools.ietf.org/html/rfc4291#section-2.5.1
eui64 = re.sub(r'[.:-]', '', mac).lower()
eui64 = eui64[0:6] + 'fffe' + eui64[6:]
eui64 = hex(int(eui64[0:2], 16) | 2)[2:].zfill(2) + eui64[2:]
if prefix is None:
return ':'.join(re.findall(r'.{4}', eui64))
else:
try:
net = ipaddress.ip_network(prefix, strict=False)
euil = int('0x{0}'.format(eui64), 16)
return '{0}/{1}'.format(net[euil], net.prefixlen)
except Exception:
return
def active_tcp():
'''
Return a dict describing all active tcp connections as quickly as possible
'''
ret = {}
for statf in ['/proc/net/tcp', '/proc/net/tcp6']:
if os.path.isfile(statf):
with salt.utils.files.fopen(statf, 'rb') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.strip().startswith('sl'):
continue
iret = _parse_tcp_line(line)
sl = next(iter(iret))
if iret[sl]['state'] == 1: # 1 is ESTABLISHED
del iret[sl]['state']
ret[len(ret)] = iret[sl]
return ret
def local_port_tcp(port):
'''
Return a set of remote ip addrs attached to the specified local port
'''
ret = _remotes_on(port, 'local_port')
return ret
def remote_port_tcp(port):
'''
Return a set of ip addrs the current host is connected to on given port
'''
ret = _remotes_on(port, 'remote_port')
return ret
def _remotes_on(port, which_end):
'''
Return a set of ip addrs active tcp connections
'''
port = int(port)
ret = set()
proc_available = False
for statf in ['/proc/net/tcp', '/proc/net/tcp6']:
if os.path.isfile(statf):
proc_available = True
with salt.utils.files.fopen(statf, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.strip().startswith('sl'):
continue
iret = _parse_tcp_line(line)
sl = next(iter(iret))
if iret[sl][which_end] == port and iret[sl]['state'] == 1: # 1 is ESTABLISHED
ret.add(iret[sl]['remote_addr'])
if not proc_available: # Fallback to use OS specific tools
if salt.utils.platform.is_sunos():
return _sunos_remotes_on(port, which_end)
if salt.utils.platform.is_freebsd():
return _freebsd_remotes_on(port, which_end)
if salt.utils.platform.is_netbsd():
return _netbsd_remotes_on(port, which_end)
if salt.utils.platform.is_openbsd():
return _openbsd_remotes_on(port, which_end)
if salt.utils.platform.is_windows():
return _windows_remotes_on(port, which_end)
if salt.utils.platform.is_aix():
return _aix_remotes_on(port, which_end)
return _linux_remotes_on(port, which_end)
return ret
def _parse_tcp_line(line):
'''
Parse a single line from the contents of /proc/net/tcp or /proc/net/tcp6
'''
ret = {}
comps = line.strip().split()
sl = comps[0].rstrip(':')
ret[sl] = {}
l_addr, l_port = comps[1].split(':')
r_addr, r_port = comps[2].split(':')
ret[sl]['local_addr'] = hex2ip(l_addr, True)
ret[sl]['local_port'] = int(l_port, 16)
ret[sl]['remote_addr'] = hex2ip(r_addr, True)
ret[sl]['remote_port'] = int(r_port, 16)
ret[sl]['state'] = int(comps[3], 16)
return ret
def _sunos_remotes_on(port, which_end):
'''
SunOS specific helper function.
Returns set of ipv4 host addresses of remote established connections
on local or remote tcp port.
Parses output of shell 'netstat' to get connections
[root@salt-master ~]# netstat -f inet -n
TCP: IPv4
Local Address Remote Address Swind Send-Q Rwind Recv-Q State
-------------------- -------------------- ----- ------ ----- ------ -----------
10.0.0.101.4505 10.0.0.1.45329 1064800 0 1055864 0 ESTABLISHED
10.0.0.101.4505 10.0.0.100.50798 1064800 0 1055864 0 ESTABLISHED
'''
remotes = set()
try:
data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version
except subprocess.CalledProcessError:
log.error('Failed netstat')
raise
lines = salt.utils.stringutils.to_str(data).split('\n')
for line in lines:
if 'ESTABLISHED' not in line:
continue
chunks = line.split()
local_host, local_port = chunks[0].rsplit('.', 1)
remote_host, remote_port = chunks[1].rsplit('.', 1)
if which_end == 'remote_port' and int(remote_port) != port:
continue
if which_end == 'local_port' and int(local_port) != port:
continue
remotes.add(remote_host)
return remotes
def _freebsd_remotes_on(port, which_end):
'''
Returns set of ipv4 host addresses of remote established connections
on local tcp port port.
Parses output of shell 'sockstat' (FreeBSD)
to get connections
$ sudo sockstat -4
USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS
root python2.7 1456 29 tcp4 *:4505 *:*
root python2.7 1445 17 tcp4 *:4506 *:*
root python2.7 1294 14 tcp4 127.0.0.1:11813 127.0.0.1:4505
root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506
$ sudo sockstat -4 -c -p 4506
USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS
root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506
'''
port = int(port)
remotes = set()
try:
cmd = salt.utils.args.shlex_split('sockstat -4 -c -p {0}'.format(port))
data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version
except subprocess.CalledProcessError as ex:
log.error('Failed "sockstat" with returncode = {0}'.format(ex.returncode))
raise
lines = salt.utils.stringutils.to_str(data).split('\n')
for line in lines:
chunks = line.split()
if not chunks:
continue
# ['root', 'python2.7', '1456', '37', 'tcp4',
# '127.0.0.1:4505-', '127.0.0.1:55703']
# print chunks
if 'COMMAND' in chunks[1]:
continue # ignore header
if len(chunks) < 2:
continue
# sockstat -4 -c -p 4506 does this with high PIDs:
# USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS
# salt-master python2.781106 35 tcp4 192.168.12.34:4506 192.168.12.45:60143
local = chunks[-2]
remote = chunks[-1]
lhost, lport = local.split(':')
rhost, rport = remote.split(':')
if which_end == 'local' and int(lport) != port: # ignore if local port not port
continue
if which_end == 'remote' and int(rport) != port: # ignore if remote port not port
continue
remotes.add(rhost)
return remotes
def _netbsd_remotes_on(port, which_end):
'''
Returns set of ipv4 host addresses of remote established connections
on local tcp port port.
Parses output of shell 'sockstat' (NetBSD)
to get connections
$ sudo sockstat -4 -n
USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS
root python2.7 1456 29 tcp *.4505 *.*
root python2.7 1445 17 tcp *.4506 *.*
root python2.7 1294 14 tcp 127.0.0.1.11813 127.0.0.1.4505
root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506
$ sudo sockstat -4 -c -n -p 4506
USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS
root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506
'''
port = int(port)
remotes = set()
try:
cmd = salt.utils.args.shlex_split('sockstat -4 -c -n -p {0}'.format(port))
data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version
except subprocess.CalledProcessError as ex:
log.error('Failed "sockstat" with returncode = {0}'.format(ex.returncode))
raise
lines = salt.utils.stringutils.to_str(data).split('\n')
for line in lines:
chunks = line.split()
if not chunks:
continue
# ['root', 'python2.7', '1456', '37', 'tcp',
# '127.0.0.1.4505-', '127.0.0.1.55703']
# print chunks
if 'COMMAND' in chunks[1]:
continue # ignore header
if len(chunks) < 2:
continue
local = chunks[5].split('.')
lport = local.pop()
lhost = '.'.join(local)
remote = chunks[6].split('.')
rport = remote.pop()
rhost = '.'.join(remote)
if which_end == 'local' and int(lport) != port: # ignore if local port not port
continue
if which_end == 'remote' and int(rport) != port: # ignore if remote port not port
continue
remotes.add(rhost)
return remotes
def _openbsd_remotes_on(port, which_end):
'''
OpenBSD specific helper function.
Returns set of ipv4 host addresses of remote established connections
on local or remote tcp port.
Parses output of shell 'netstat' to get connections
$ netstat -nf inet
Active Internet connections
Proto Recv-Q Send-Q Local Address Foreign Address (state)
tcp 0 0 10.0.0.101.4505 10.0.0.1.45329 ESTABLISHED
tcp 0 0 10.0.0.101.4505 10.0.0.100.50798 ESTABLISHED
'''
remotes = set()
try:
data = subprocess.check_output(['netstat', '-nf', 'inet']) # pylint: disable=minimum-python-version
except subprocess.CalledProcessError:
log.error('Failed netstat')
raise
lines = data.split('\n')
for line in lines:
if 'ESTABLISHED' not in line:
continue
chunks = line.split()
local_host, local_port = chunks[3].rsplit('.', 1)
remote_host, remote_port = chunks[4].rsplit('.', 1)
if which_end == 'remote_port' and int(remote_port) != port:
continue
if which_end == 'local_port' and int(local_port) != port:
continue
remotes.add(remote_host)
return remotes
def _windows_remotes_on(port, which_end):
r'''
Windows specific helper function.
Returns set of ipv4 host addresses of remote established connections
on local or remote tcp port.
Parses output of shell 'netstat' to get connections
C:\>netstat -n
Active Connections
Proto Local Address Foreign Address State
TCP 10.2.33.17:3007 130.164.12.233:10123 ESTABLISHED
TCP 10.2.33.17:3389 130.164.30.5:10378 ESTABLISHED
'''
remotes = set()
try:
data = subprocess.check_output(['netstat', '-n']) # pylint: disable=minimum-python-version
except subprocess.CalledProcessError:
log.error('Failed netstat')
raise
lines = salt.utils.stringutils.to_str(data).split('\n')
for line in lines:
if 'ESTABLISHED' not in line:
continue
chunks = line.split()
local_host, local_port = chunks[1].rsplit(':', 1)
remote_host, remote_port = chunks[2].rsplit(':', 1)
if which_end == 'remote_port' and int(remote_port) != port:
continue
if which_end == 'local_port' and int(local_port) != port:
continue
remotes.add(remote_host)
return remotes
def _linux_remotes_on(port, which_end):
'''
Linux specific helper function.
Returns set of ip host addresses of remote established connections
on local tcp port port.
Parses output of shell 'lsof'
to get connections
$ sudo lsof -iTCP:4505 -n
COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
Python 9971 root 35u IPv4 0x18a8464a29ca329d 0t0 TCP *:4505 (LISTEN)
Python 9971 root 37u IPv4 0x18a8464a29b2b29d 0t0 TCP 127.0.0.1:4505->127.0.0.1:55703 (ESTABLISHED)
Python 10152 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP 127.0.0.1:55703->127.0.0.1:4505 (ESTABLISHED)
Python 10153 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP [fe80::249a]:4505->[fe80::150]:59367 (ESTABLISHED)
'''
remotes = set()
try:
data = subprocess.check_output(
['lsof', '-iTCP:{0:d}'.format(port), '-n', '-P'] # pylint: disable=minimum-python-version
)
except subprocess.CalledProcessError as ex:
if ex.returncode == 1:
# Lsof return 1 if any error was detected, including the failure
# to locate Internet addresses, and it is not an error in this case.
log.warning('"lsof" returncode = 1, likely no active TCP sessions.')
return remotes
log.error('Failed "lsof" with returncode = {0}'.format(ex.returncode))
raise
lines = salt.utils.stringutils.to_str(data).split('\n')
for line in lines:
chunks = line.split()
if not chunks:
continue
# ['Python', '9971', 'root', '37u', 'IPv4', '0x18a8464a29b2b29d', '0t0',
# 'TCP', '127.0.0.1:4505->127.0.0.1:55703', '(ESTABLISHED)']
# print chunks
if 'COMMAND' in chunks[0]:
continue # ignore header
if 'ESTABLISHED' not in chunks[-1]:
continue # ignore if not ESTABLISHED
# '127.0.0.1:4505->127.0.0.1:55703'
local, remote = chunks[8].split('->')
_, lport = local.rsplit(':', 1)
rhost, rport = remote.rsplit(':', 1)
if which_end == 'remote_port' and int(rport) != port:
continue
if which_end == 'local_port' and int(lport) != port:
continue
remotes.add(rhost.strip("[]"))
return remotes
def _aix_remotes_on(port, which_end):
'''
AIX specific helper function.
Returns set of ipv4 host addresses of remote established connections
on local or remote tcp port.
Parses output of shell 'netstat' to get connections
root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -f inet -n
Active Internet connections
Proto Recv-Q Send-Q Local Address Foreign Address (state)
tcp4 0 0 172.29.149.95.50093 209.41.78.13.4505 ESTABLISHED
tcp4 0 0 127.0.0.1.9514 *.* LISTEN
tcp4 0 0 127.0.0.1.9515 *.* LISTEN
tcp4 0 0 127.0.0.1.199 127.0.0.1.32779 ESTABLISHED
tcp4 0 0 127.0.0.1.32779 127.0.0.1.199 ESTABLISHED
tcp4 0 40 172.29.149.95.22 172.29.96.83.41022 ESTABLISHED
tcp4 0 0 172.29.149.95.22 172.29.96.83.41032 ESTABLISHED
tcp4 0 0 127.0.0.1.32771 127.0.0.1.32775 ESTABLISHED
tcp 0 0 127.0.0.1.32775 127.0.0.1.32771 ESTABLISHED
tcp4 0 0 127.0.0.1.32771 127.0.0.1.32776 ESTABLISHED
tcp 0 0 127.0.0.1.32776 127.0.0.1.32771 ESTABLISHED
tcp4 0 0 127.0.0.1.32771 127.0.0.1.32777 ESTABLISHED
tcp 0 0 127.0.0.1.32777 127.0.0.1.32771 ESTABLISHED
tcp4 0 0 127.0.0.1.32771 127.0.0.1.32778 ESTABLISHED
tcp 0 0 127.0.0.1.32778 127.0.0.1.32771 ESTABLISHED
'''
remotes = set()
try:
data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version
except subprocess.CalledProcessError:
log.error('Failed netstat')
raise
lines = salt.utils.stringutils.to_str(data).split('\n')
for line in lines:
if 'ESTABLISHED' not in line:
continue
chunks = line.split()
local_host, local_port = chunks[3].rsplit('.', 1)
remote_host, remote_port = chunks[4].rsplit('.', 1)
if which_end == 'remote_port' and int(remote_port) != port:
continue
if which_end == 'local_port' and int(local_port) != port:
continue
remotes.add(remote_host)
return remotes
@jinja_filter('gen_mac')
def gen_mac(prefix='AC:DE:48'):
'''
Generates a MAC address with the defined OUI prefix.
Common prefixes:
- ``00:16:3E`` -- Xen
- ``00:18:51`` -- OpenVZ
- ``00:50:56`` -- VMware (manually generated)
- ``52:54:00`` -- QEMU/KVM
- ``AC:DE:48`` -- PRIVATE
References:
- http://standards.ieee.org/develop/regauth/oui/oui.txt
- https://www.wireshark.org/tools/oui-lookup.html
- https://en.wikipedia.org/wiki/MAC_address
'''
return '{0}:{1:02X}:{2:02X}:{3:02X}'.format(prefix,
random.randint(0, 0xff),
random.randint(0, 0xff),
random.randint(0, 0xff))
@jinja_filter('mac_str_to_bytes')
def mac_str_to_bytes(mac_str):
'''
Convert a MAC address string into bytes. Works with or without separators:
b1 = mac_str_to_bytes('08:00:27:13:69:77')
b2 = mac_str_to_bytes('080027136977')
assert b1 == b2
assert isinstance(b1, bytes)
'''
if len(mac_str) == 12:
pass
elif len(mac_str) == 17:
sep = mac_str[2]
mac_str = mac_str.replace(sep, '')
else:
raise ValueError('Invalid MAC address')
chars = (int(mac_str[s:s+2], 16) for s in range(0, 12, 2))
return bytes(chars) if six.PY3 else b''.join(chr(x) for x in chars)
def refresh_dns():
'''
issue #21397: force glibc to re-read resolv.conf
'''
try:
res_init()
except NameError:
# Exception raised loading the library, thus res_init is not defined
pass
@jinja_filter('connection_check')
def connection_check(addr, port=80, safe=False, ipv6=None):
'''
Provides a convenient alias for the dns_check filter.
'''
return dns_check(addr, port, safe, ipv6)
@jinja_filter('dns_check')
def dns_check(addr, port=80, safe=False, ipv6=None):
'''
Return the ip resolved by dns, but do not exit on failure, only raise an
exception. Obeys system preference for IPv4/6 address resolution - this
can be overridden by the ipv6 flag.
Tries to connect to the address before considering it useful. If no address
can be reached, the first one resolved is used as a fallback.
'''
error = False
lookup = addr
seen_ipv6 = False
family = socket.AF_INET6 if ipv6 else socket.AF_INET if ipv6 is False else socket.AF_UNSPEC
hostnames = []
try:
refresh_dns()
hostnames = socket.getaddrinfo(addr, port, family, socket.SOCK_STREAM)
except TypeError:
err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup)
raise SaltSystemExit(code=42, msg=err)
except socket.error:
error = True
# If ipv6 is set to True, attempt another lookup using the IPv4 family,
# just in case we're attempting to lookup an IPv4 IP
# as an IPv6 hostname.
if error and ipv6:
try:
refresh_dns()
hostnames = socket.getaddrinfo(addr, port,
socket.AF_INET,
socket.SOCK_STREAM)
except TypeError:
err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup)
raise SaltSystemExit(code=42, msg=err)
except socket.error:
error = True
try:
if not hostnames:
error = True
else:
resolved = False
candidates = []
for h in hostnames:
# Input is IP address, passed through unchanged, just return it
if h[4][0] == addr:
resolved = salt.utils.zeromq.ip_bracket(addr)
break
candidate_addr = salt.utils.zeromq.ip_bracket(h[4][0])
candidates.append(candidate_addr)
try:
s = socket.socket(h[0], socket.SOCK_STREAM)
s.settimeout(2)
s.connect((candidate_addr.strip('[]'), h[4][1]))
s.close()
resolved = candidate_addr
break
except socket.error:
pass
if not resolved:
if len(candidates) > 0:
resolved = candidates[0]
else:
error = True
except TypeError:
err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup)
raise SaltSystemExit(code=42, msg=err)
except socket.error:
error = True
if error:
err = ('DNS lookup or connection check of \'{0}\' failed.').format(addr)
if safe:
if salt.log.is_console_configured():
# If logging is not configured it also means that either
# the master or minion instance calling this hasn't even
# started running
log.error(err)
raise SaltClientError()
raise SaltSystemExit(code=42, msg=err)
return resolved
|
[] |
[] |
[
"WINDIR"
] |
[]
|
["WINDIR"]
|
python
| 1 | 0 | |
wildfly/adduser/src/main/java/org/keycloak/wildfly/adduser/AddUser.java
|
/*
* Copyright 2016 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.wildfly.adduser;
import com.fasterxml.jackson.core.type.TypeReference;
import org.jboss.aesh.cl.CommandDefinition;
import org.jboss.aesh.cl.Option;
import org.jboss.aesh.cl.parser.ParserGenerator;
import org.jboss.aesh.console.command.Command;
import org.jboss.aesh.console.command.CommandNotFoundException;
import org.jboss.aesh.console.command.CommandResult;
import org.jboss.aesh.console.command.container.CommandContainer;
import org.jboss.aesh.console.command.invocation.CommandInvocation;
import org.jboss.aesh.console.command.registry.AeshCommandRegistryBuilder;
import org.jboss.aesh.console.command.registry.CommandRegistry;
import org.keycloak.common.util.Base64;
import org.keycloak.credential.CredentialModel;
import org.keycloak.credential.hash.Pbkdf2PasswordHashProvider;
import org.keycloak.representations.idm.CredentialRepresentation;
import org.keycloak.representations.idm.RealmRepresentation;
import org.keycloak.representations.idm.UserRepresentation;
import org.keycloak.util.JsonSerialization;
import java.io.Console;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.lang.reflect.Method;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
/**
* @author <a href="mailto:[email protected]">Stian Thorgersen</a>
*/
public class AddUser {
private static final String COMMAND_NAME = "add-user";
private static final int DEFAULT_HASH_ITERATIONS = 100000;
public static void main(String[] args) throws Exception {
AddUserCommand command = new AddUserCommand();
try {
ParserGenerator.parseAndPopulate(command, COMMAND_NAME, args);
} catch (Exception e) {
System.err.println(e.getMessage());
System.exit(1);
}
if (command.isHelp()) {
printHelp(command);
} else {
try {
String password = command.getPassword();
checkRequired(command, "user");
if(isEmpty(command, "password")){
password = promptForInput();
}
File addUserFile = getAddUserFile(command);
createUser(addUserFile, command.getRealm(), command.getUser(), password, command.getRoles(), command.getIterations());
} catch (Exception e) {
System.err.println(e.getMessage());
System.exit(1);
}
}
}
private static File getAddUserFile(AddUserCommand command) throws Exception {
File configDir;
if (command.isDomain()) {
if (command.getDc() != null) {
configDir = new File(command.getDc());
} else if (System.getProperty("jboss.domain.config.user.dir") != null) {
configDir = new File(System.getProperty("jboss.domain.config.user.dir"));
} else if (System.getenv("JBOSS_HOME") != null) {
configDir = new File(System.getenv("JBOSS_HOME") + File.separator + "domain" + File.separator + "configuration");
} else {
throw new Exception("Could not find domain configuration directory");
}
} else {
if (command.getSc() != null) {
configDir = new File(command.getSc());
} else if (System.getProperty("jboss.server.config.user.dir") != null) {
configDir = new File(System.getProperty("jboss.server.config.user.dir"));
} else if (System.getenv("JBOSS_HOME") != null) {
configDir = new File(System.getenv("JBOSS_HOME") + File.separator + "standalone" + File.separator + "configuration");
} else {
throw new Exception("Could not find standalone configuration directory");
}
}
if (!configDir.isDirectory()) {
throw new Exception("'" + configDir + "' does not exist or is not a directory");
}
File addUserFile = new File(configDir, "keycloak-add-user.json");
return addUserFile;
}
private static void createUser(File addUserFile, String realmName, String userName, String password, String rolesString, int iterations) throws Exception {
List<RealmRepresentation> realms;
if (addUserFile.isFile()) {
realms = JsonSerialization.readValue(new FileInputStream(addUserFile), new TypeReference<List<RealmRepresentation>>() {});
} else {
realms = new LinkedList<>();
}
if (realmName == null) {
realmName = "master";
}
RealmRepresentation realm = null;
for (RealmRepresentation r : realms) {
if (r.getRealm().equals(realmName)) {
realm = r;
}
}
if (realm == null) {
realm = new RealmRepresentation();
realm.setRealm(realmName);
realms.add(realm);
realm.setUsers(new LinkedList<UserRepresentation>());
}
for (UserRepresentation u : realm.getUsers()) {
if (u.getUsername().equals(userName)) {
throw new Exception("User with username '" + userName + "' already added to '" + addUserFile + "'");
}
}
UserRepresentation user = new UserRepresentation();
user.setEnabled(true);
user.setUsername(userName);
user.setCredentials(new LinkedList<CredentialRepresentation>());
CredentialModel credentialValueModel = new Pbkdf2PasswordHashProvider().encode(password, iterations > 0 ? iterations : DEFAULT_HASH_ITERATIONS);
CredentialRepresentation credentials = new CredentialRepresentation();
credentials.setType(credentialValueModel.getType());
credentials.setAlgorithm(credentialValueModel.getAlgorithm());
credentials.setHashIterations(credentialValueModel.getHashIterations());
credentials.setSalt(Base64.encodeBytes(credentialValueModel.getSalt()));
credentials.setHashedSaltedValue(credentialValueModel.getValue());
user.getCredentials().add(credentials);
String[] roles;
if (rolesString != null) {
roles = rolesString.split(",");
} else {
if (realmName.equals("master")) {
roles = new String[] { "admin" };
} else {
roles = new String[] { "realm-management/realm-admin" };
}
}
for (String r : roles) {
if (r.indexOf('/') != -1) {
String[] cr = r.split("/");
String client = cr[0];
String clientRole = cr[1];
if (user.getClientRoles() == null) {
user.setClientRoles(new HashMap<String, List<String>>());
}
if (user.getClientRoles().get(client) == null) {
user.getClientRoles().put(client, new LinkedList<String>());
}
user.getClientRoles().get(client).add(clientRole);
} else {
if (user.getRealmRoles() == null) {
user.setRealmRoles(new LinkedList<String>());
}
user.getRealmRoles().add(r);
}
}
realm.getUsers().add(user);
JsonSerialization.writeValuePrettyToStream(new FileOutputStream(addUserFile), realms);
System.out.println("Added '" + userName + "' to '" + addUserFile + "', restart server to load user");
}
private static void checkRequired(Command command, String field) throws Exception {
if (isEmpty(command, field)) {
Option option = command.getClass().getDeclaredField(field).getAnnotation(Option.class);
String optionName;
if (option != null && option.shortName() != '\u0000') {
optionName = "-" + option.shortName() + ", --" + field;
} else {
optionName = "--" + field;
}
throw new Exception("Option: " + optionName + " is required");
}
}
private static Boolean isEmpty(Command command, String field) throws Exception {
Method m = command.getClass().getMethod("get" + Character.toUpperCase(field.charAt(0)) + field.substring(1));
if (m.invoke(command) == null) {
return true;
}
return false;
}
private static String promptForInput() throws Exception {
Console console = System.console();
if (console == null) {
throw new Exception("Couldn't get Console instance");
}
console.printf("Press ctrl-d (Unix) or ctrl-z (Windows) to exit\n");
char passwordArray[] = console.readPassword("Password: ");
if(passwordArray == null) System.exit(0);
return new String(passwordArray);
}
private static void printHelp(Command command) throws CommandNotFoundException {
CommandRegistry registry = new AeshCommandRegistryBuilder().command(command).create();
CommandContainer commandContainer = registry.getCommand(command.getClass().getAnnotation(CommandDefinition.class).name(), null);
String help = commandContainer.printHelp(null);
System.out.println(help);
}
@CommandDefinition(name= COMMAND_NAME, description = "[options...]")
public static class AddUserCommand implements Command {
@Option(shortName = 'r', hasValue = true, description = "Name of realm to add user to")
private String realm;
@Option(shortName = 'u', hasValue = true, description = "Name of the user")
private String user;
@Option(shortName = 'p', hasValue = true, description = "Password of the user")
private String password;
@Option(hasValue = true, description = "Roles to add to the user")
private String roles;
@Option(hasValue = true, description = "Hash iterations")
private int iterations;
@Option(hasValue = false, description = "Enable domain mode")
private boolean domain;
@Option(hasValue = true, description = "Define the location of the server config directory")
private String sc;
@Option(hasValue = true, description = "Define the location of the domain config directory")
private String dc;
@Option(shortName = 'h', hasValue = false, description = "Display this help and exit")
private boolean help;
@Override
public CommandResult execute(CommandInvocation commandInvocation) throws InterruptedException {
return CommandResult.SUCCESS;
}
public String getRealm() {
return realm;
}
public String getUser() {
return user;
}
public String getPassword() {
return password;
}
public String getRoles() {
return roles;
}
public int getIterations() {
return iterations;
}
public boolean isDomain() {
return domain;
}
public String getSc() {
return sc;
}
public String getDc() {
return dc;
}
public boolean isHelp() {
return help;
}
}
}
|
[
"\"JBOSS_HOME\"",
"\"JBOSS_HOME\"",
"\"JBOSS_HOME\"",
"\"JBOSS_HOME\""
] |
[] |
[
"JBOSS_HOME"
] |
[]
|
["JBOSS_HOME"]
|
java
| 1 | 0 | |
consumer/main.go
|
package main
import (
"context"
"fmt"
"log"
"os"
"os/signal"
"strconv"
"time"
"github.com/segmentio/kafka-go"
)
var reset = "\033[0m"
var red = "\033[31m"
var green = "\033[32m"
var yellow = "\033[33m"
var blue = "\033[34m"
var purple = "\033[35m"
var cyan = "\033[36m"
var gray = "\033[37m"
var white = "\033[97m"
func main() {
var kafkaBroker = os.Getenv("KAFKA_BROKER")
if kafkaBroker == "" {
kafkaBroker = "localhost:9092"
}
var kafkaTopic = os.Getenv("KAFKA_TOPIC")
if kafkaTopic == "" {
kafkaTopic = "my-topic"
}
var consumerGroup = os.Getenv("KAFKA_CONSUMER_GROUP")
if consumerGroup == "" {
consumerGroup = "my-topic-consumers"
}
var sleep = os.Getenv("SLEEP")
if sleep == "" {
sleep = "0"
}
sleepSeconds, err := strconv.Atoi(sleep)
if err != nil {
sleepSeconds = 0
}
// make a new reader that consumes from topic-A
r := kafka.NewReader(kafka.ReaderConfig{
Brokers: []string{kafkaBroker},
GroupID: consumerGroup,
Topic: kafkaTopic,
MinBytes: 10e1, // 1KB
MaxBytes: 10e6, // 10MB
})
defer r.Close()
fmt.Printf("%sWaiting for messages on %sbroker: %v %sgroup: %v %stopic: %v%s\n", yellow, blue, r.Config().Brokers, purple, r.Config().GroupID, cyan, r.Config().Topic, reset)
go func() {
for {
fmt.Printf("%sWaiting for a new message. %sCurrent Offset: %d, %sLag: %d %s\n ", yellow, blue, r.Offset(), purple, r.Lag(), reset)
m, err := r.ReadMessage(context.Background())
if err != nil {
fmt.Printf("Error occured while waiting for message %s%s", red, err)
break
}
fmt.Printf("%smessage at %stopic/%spartition/%soffset %s%v/%s%v/%s%v: %s%v = %v%s\n", red, blue, purple, cyan, blue, m.Topic, purple, m.Partition, cyan, m.Offset, green, string(m.Key), string(m.Value), reset)
time.Sleep(time.Second * time.Duration(sleepSeconds))
fmt.Printf("%sSlept for %s%d seconds%s\n", yellow, blue, sleepSeconds, reset)
}
}()
// trap sigterm or interupt and gracefully shutdown the server
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
signal.Notify(c, os.Kill)
// Block until a signal is received.
sig := <-c
fmt.Printf("%sGot signal: %s%s\n", red, sig, reset)
// gracefully shutdown the server, waiting max 30 seconds for current operations to complete
_, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
if err := r.Close(); err != nil {
log.Fatal("failed to close reader:", err)
} else {
fmt.Printf("%sClosed the reader%s\n", green, reset)
}
}
|
[
"\"KAFKA_BROKER\"",
"\"KAFKA_TOPIC\"",
"\"KAFKA_CONSUMER_GROUP\"",
"\"SLEEP\""
] |
[] |
[
"KAFKA_BROKER",
"KAFKA_TOPIC",
"KAFKA_CONSUMER_GROUP",
"SLEEP"
] |
[]
|
["KAFKA_BROKER", "KAFKA_TOPIC", "KAFKA_CONSUMER_GROUP", "SLEEP"]
|
go
| 4 | 0 | |
main.go
|
package main
import (
"errors"
"fmt"
"io/ioutil"
"os"
"sort"
"strings"
log "github.com/Sirupsen/logrus"
haikunator "github.com/atrox/haikunatorgo"
"github.com/gin-contrib/sessions"
"github.com/gin-gonic/gin"
cache "github.com/hashicorp/golang-lru"
"github.com/msoedov/hacker-slides/auth"
"github.com/msoedov/hacker-slides/files"
)
const sessionHeader = "slide-session"
func SlidePath(name string) string {
return fmt.Sprintf("slides/%s.md", name)
}
func NewApp() *gin.Engine {
r := gin.Default()
store := sessions.NewCookieStore([]byte("secret"))
arc, err := cache.NewARC(10)
if err != nil {
log.Fatalf("Failied to allocate cache %#v", err)
}
r.Use(sessions.Sessions(sessionHeader, store))
r.Use(auth.BasicAuth())
r.LoadHTMLGlob("templates/*.tmpl")
r.Static("/static", "./static")
r.Static("/images", "./slides/images")
r.GET("/", func(c *gin.Context) {
isNew := c.Query("new")
latest := files.LatestFileIn("slides")
log.WithFields(log.Fields{
"name": latest,
"isNew": isNew,
}).Info("Restoring latest point")
var path, name string
if latest == "" || isNew != "" {
haikunator := haikunator.New()
haikunator.TokenLength = 0
name = haikunator.Haikunate()
} else {
name = strings.Replace(latest, ".md", "", 1)
}
path = SlidePath(name)
log.WithFields(log.Fields{
"path": path,
}).Info("A new session")
session := sessions.Default(c)
session.Set("name", path)
session.Save()
c.Writer.Header().Set("Location", fmt.Sprintf("/stash/edit/%s", name))
c.HTML(302, "index.tmpl", gin.H{
"pubTo": path,
})
})
mustHaveSession := func(c *gin.Context) (string, error) {
session := sessions.Default(c)
val := session.Get("name")
emptySession := errors.New("Empty session")
if val == nil {
c.String(400, "No context")
return "", emptySession
}
log.WithFields(log.Fields{
"path": val,
}).Info("Got session")
path, ok := val.(string)
if !ok {
c.String(400, "No context")
return "", emptySession
}
return path, nil
}
r.GET("/slides.md", func(c *gin.Context) {
path, err := mustHaveSession(c)
if err != nil {
return
}
if _, err := os.Stat(path); err != nil {
// copy sample markdown file to the path
body, err := ioutil.ReadFile("initial-slides.md")
if err != nil {
panic(err)
}
ioutil.WriteFile(path, body, 0644)
c.String(200, string(body))
return
}
var slide string
cached, ok := arc.Get(path)
if ok {
slide = string(cached.([]byte))
} else {
body, err := ioutil.ReadFile(path)
if err != nil {
log.Errorf("Failied to read file %#v", err)
c.Abort()
return
}
slide = string(body)
}
c.String(200, slide)
})
r.PUT("/slides.md", func(c *gin.Context) {
path, err := mustHaveSession(c)
if err != nil {
return
}
body, _ := ioutil.ReadAll(c.Request.Body)
arc.Add(path, body)
go ioutil.WriteFile(path, body, 0644)
log.WithFields(log.Fields{
"size": len(body),
"file": path,
}).Info("Async wrote to file")
c.String(200, "")
})
r.GET("/stash", func(c *gin.Context) {
files, err := ioutil.ReadDir("slides")
if err != nil {
log.Fatal(err)
}
sort.Slice(files, func(i, j int) bool {
return files[i].ModTime().Unix() > files[j].ModTime().Unix()
})
var stash []string
for _, file := range files {
if file.IsDir() {
continue
}
stash = append(stash, file.Name())
}
c.HTML(200, "stash.tmpl", gin.H{
"stash": stash,
})
})
r.GET("/stash/edit/:name", func(c *gin.Context) {
name := c.Param("name")
log.WithFields(log.Fields{
"name": name,
}).Info("Restore session?")
if strings.HasSuffix(name, ".md") {
name = name[0 : len(name)-3]
}
path := SlidePath(name)
session := sessions.Default(c)
session.Set("name", path)
session.Save()
c.HTML(200, "index.tmpl", gin.H{
"pubTo": path,
})
})
r.GET("/published/slides/:name", func(c *gin.Context) {
name := c.Param("name")
log.WithFields(log.Fields{
"name": name,
}).Info("Published")
if strings.HasSuffix(name, ".md") {
name = name[0 : len(name)-3]
}
path := SlidePath(name)
session := sessions.Default(c)
session.Set("name", path)
session.Save()
c.HTML(200, "slides.tmpl", gin.H{
"pubTo": path,
})
})
return r
}
func main() {
r := NewApp()
port := "8080"
if len(os.Args) > 1 {
port = os.Args[1]
} else {
envPort := os.Getenv("PORT")
if len(envPort) > 0 {
port = envPort
}
}
log.Info("Started http://0.0.0.0:8080")
r.Run(fmt.Sprintf(":%s", port))
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
tools/vendor/github.com/kardianos/govendor/context/context.go
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package context gathers the status of packages and stores it in Context.
// A new Context needs to be pointed to the root of the project and any
// project owned vendor file.
package context
import (
"fmt"
"io"
"os/exec"
"path"
"path/filepath"
"strings"
"github.com/kardianos/govendor/internal/pathos"
os "github.com/kardianos/govendor/internal/vos"
"github.com/kardianos/govendor/pkgspec"
"github.com/kardianos/govendor/vendorfile"
)
const (
debug = false
looplimit = 10000
vendorFilename = "vendor.json"
)
func dprintf(f string, v ...interface{}) {
if debug {
fmt.Printf(f, v...)
}
}
// Context represents the current project context.
type Context struct {
Logger io.Writer // Write to the verbose log.
Insecure bool // Allow insecure network operations
GopathList []string // List of GOPATHs in environment. Includes "src" dir.
Goroot string // The path to the standard library.
RootDir string // Full path to the project root.
RootGopath string // The GOPATH the project is in.
RootImportPath string // The import path to the project.
VendorFile *vendorfile.File
VendorFilePath string // File path to vendor file.
VendorFolder string // Store vendor packages in this folder.
RootToVendorFile string // The relative path from the project root to the vendor file directory.
VendorDiscoverFolder string // Normally auto-set to "vendor"
// Package is a map where the import path is the key.
// Populated with LoadPackage.
Package map[string]*Package
// Change to unkown structure (rename). Maybe...
// MoveRule provides the translation from origional import path to new import path.
RewriteRule map[string]string // map[from]to
Operation []*Operation
loaded, dirty bool
rewriteImports bool
ignoreTag []string // list of tags to ignore
excludePackage []string // list of package prefixes to exclude
statusCache []StatusItem
added map[string]bool
}
// Package maintains information pertaining to a package.
type Package struct {
OriginDir string // Origin directory
Dir string // Physical directory path of the package.
Status Status // Status and location of the package.
*pkgspec.Pkg
Local string // Current location of a package relative to $GOPATH/src.
Gopath string // Includes trailing "src".
Files []*File
inVendor bool // Different then Status.Location, this is in *any* vendor tree.
inTree bool
ignoreFile []string
// used in resolveUnknown function. Not persisted.
referenced map[string]*Package
}
// File holds a reference to the imports in a file and the file locaiton.
type File struct {
Package *Package
Path string
Imports []string
ImportComment string
}
type RootType byte
const (
RootVendor RootType = iota
RootWD
RootVendorOrWD
)
func (pkg *Package) String() string {
return pkg.Local
}
type packageList []*Package
func (li packageList) Len() int { return len(li) }
func (li packageList) Swap(i, j int) { li[i], li[j] = li[j], li[i] }
func (li packageList) Less(i, j int) bool {
if li[i].Path != li[j].Path {
return li[i].Path < li[j].Path
}
return li[i].Local < li[j].Local
}
// NewContextWD creates a new context. It looks for a root folder by finding
// a vendor file.
func NewContextWD(rt RootType) (*Context, error) {
wd, err := os.Getwd()
if err != nil {
return nil, err
}
pathToVendorFile := filepath.Join("vendor", vendorFilename)
rootIndicator := "vendor"
vendorFolder := "vendor"
root := wd
if rt == RootVendor || rt == RootVendorOrWD {
tryRoot, err := findRoot(wd, rootIndicator)
switch rt {
case RootVendor:
if err != nil {
return nil, err
}
root = tryRoot
case RootVendorOrWD:
if err == nil {
root = tryRoot
}
}
}
// Check for old vendor file location.
oldLocation := filepath.Join(root, vendorFilename)
if _, err := os.Stat(oldLocation); err == nil {
return nil, ErrOldVersion{`Use the "migrate" command to update.`}
}
return NewContext(root, pathToVendorFile, vendorFolder, false)
}
// NewContext creates new context from a given root folder and vendor file path.
// The vendorFolder is where vendor packages should be placed.
func NewContext(root, vendorFilePathRel, vendorFolder string, rewriteImports bool) (*Context, error) {
dprintf("CTX: %s\n", root)
var err error
// Get GOROOT. First check ENV, then run "go env" and find the GOROOT line.
goroot := os.Getenv("GOROOT")
if len(goroot) == 0 {
// If GOROOT is not set, get from go cmd.
cmd := exec.Command("go", "env")
var goEnv []byte
goEnv, err = cmd.CombinedOutput()
if err != nil {
return nil, err
}
for _, line := range strings.Split(string(goEnv), "\n") {
if v, ok := pathos.GoEnv("GOROOT", line); ok {
goroot = v
break
}
}
}
if goroot == "" {
return nil, ErrMissingGOROOT
}
goroot = filepath.Join(goroot, "src")
// Get the GOPATHs. Prepend the GOROOT to the list.
all := os.Getenv("GOPATH")
if len(all) == 0 {
return nil, ErrMissingGOPATH
}
gopathList := filepath.SplitList(all)
gopathGoroot := make([]string, 0, len(gopathList)+1)
gopathGoroot = append(gopathGoroot, goroot)
for _, gopath := range gopathList {
srcPath := filepath.Join(gopath, "src") + string(filepath.Separator)
srcPathEvaled, err := filepath.EvalSymlinks(srcPath)
if err != nil {
return nil, err
}
gopathGoroot = append(gopathGoroot, srcPath, srcPathEvaled+string(filepath.Separator))
}
rootToVendorFile, _ := filepath.Split(vendorFilePathRel)
vendorFilePath := filepath.Join(root, vendorFilePathRel)
ctx := &Context{
RootDir: root,
GopathList: gopathGoroot,
Goroot: goroot,
VendorFilePath: vendorFilePath,
VendorFolder: vendorFolder,
RootToVendorFile: pathos.SlashToImportPath(rootToVendorFile),
VendorDiscoverFolder: "vendor",
Package: make(map[string]*Package),
RewriteRule: make(map[string]string, 3),
rewriteImports: rewriteImports,
}
ctx.RootImportPath, ctx.RootGopath, err = ctx.findImportPath(root)
if err != nil {
return nil, err
}
vf, err := readVendorFile(path.Join(ctx.RootImportPath, vendorFolder)+"/", vendorFilePath)
if err != nil {
if !os.IsNotExist(err) {
return nil, err
}
vf = &vendorfile.File{}
}
ctx.VendorFile = vf
ctx.IgnoreBuildAndPackage(vf.Ignore)
return ctx, nil
}
// IgnoreBuildAndPackage takes a space separated list of tags or package prefixes
// to ignore.
// Tags are words, packages are folders, containing or ending with a "/".
// "a b c" will ignore tags "a" OR "b" OR "c".
// "p/x q/" will ignore packages "p/x" OR "p/x/y" OR "q" OR "q/z", etc.
func (ctx *Context) IgnoreBuildAndPackage(ignore string) {
ctx.dirty = true
ors := strings.Fields(ignore)
ctx.ignoreTag = make([]string, 0, len(ors))
ctx.excludePackage = make([]string, 0, len(ors))
for _, or := range ors {
if len(or) == 0 {
continue
}
if strings.Index(or, "/") != -1 {
// package
ctx.excludePackage = append(ctx.excludePackage, strings.Trim(or, "./"))
} else {
// tag
ctx.ignoreTag = append(ctx.ignoreTag, or)
}
}
}
// Write to the set io.Writer for logging.
func (ctx *Context) Write(s []byte) (int, error) {
if ctx.Logger != nil {
return ctx.Logger.Write(s)
}
return len(s), nil
}
// VendorFilePackagePath finds a given vendor file package give the import path.
func (ctx *Context) VendorFilePackagePath(path string) *vendorfile.Package {
for _, pkg := range ctx.VendorFile.Package {
if pkg.Remove {
continue
}
if pkg.Path == path {
return pkg
}
}
return nil
}
// findPackageChild finds any package under the current package.
// Used for finding tree overlaps.
func (ctx *Context) findPackageChild(ck *Package) []*Package {
out := make([]*Package, 0, 3)
for _, pkg := range ctx.Package {
if pkg == ck {
continue
}
if pkg.inVendor == false {
continue
}
if pkg.Status.Presence == PresenceTree {
continue
}
if strings.HasPrefix(pkg.Path, ck.Path+"/") {
out = append(out, pkg)
}
}
return out
}
// findPackageParentTree finds any parent tree package that would
// include the given canonical path.
func (ctx *Context) findPackageParentTree(ck *Package) []string {
out := make([]string, 0, 1)
for _, pkg := range ctx.Package {
if pkg.inVendor == false {
continue
}
if pkg.IncludeTree == false || pkg == ck {
continue
}
// pkg.Path = github.com/usera/pkg, tree = true
// ck.Path = github.com/usera/pkg/dance
if strings.HasPrefix(ck.Path, pkg.Path+"/") {
out = append(out, pkg.Local)
}
}
return out
}
// updatePackageReferences populates the referenced field in each Package.
func (ctx *Context) updatePackageReferences() {
pathUnderDirLookup := make(map[string]map[string]*Package)
findCanonicalUnderDir := func(dir, path string) *Package {
if importMap, found := pathUnderDirLookup[dir]; found {
if pkg, found2 := importMap[path]; found2 {
return pkg
}
} else {
pathUnderDirLookup[dir] = make(map[string]*Package)
}
for _, pkg := range ctx.Package {
if !pkg.inVendor {
continue
}
removeFromEnd := len(pkg.Path) + len(ctx.VendorDiscoverFolder) + 2
nextLen := len(pkg.Dir) - removeFromEnd
if nextLen < 0 {
continue
}
checkDir := pkg.Dir[:nextLen]
if !pathos.FileHasPrefix(dir, checkDir) {
continue
}
if pkg.Path != path {
continue
}
pathUnderDirLookup[dir][path] = pkg
return pkg
}
pathUnderDirLookup[dir][path] = nil
return nil
}
for _, pkg := range ctx.Package {
pkg.referenced = make(map[string]*Package, len(pkg.referenced))
}
for _, pkg := range ctx.Package {
for _, f := range pkg.Files {
for _, imp := range f.Imports {
if vpkg := findCanonicalUnderDir(pkg.Dir, imp); vpkg != nil {
vpkg.referenced[pkg.Local] = pkg
continue
}
if other, found := ctx.Package[imp]; found {
other.referenced[pkg.Local] = pkg
continue
}
}
}
}
// Transfer all references from the child to the top parent.
for _, pkg := range ctx.Package {
if parentTrees := ctx.findPackageParentTree(pkg); len(parentTrees) > 0 {
if parentPkg := ctx.Package[parentTrees[0]]; parentPkg != nil {
for opath, opkg := range pkg.referenced {
// Do not transfer internal references.
if strings.HasPrefix(opkg.Path, parentPkg.Path+"/") {
continue
}
parentPkg.referenced[opath] = opkg
}
pkg.referenced = make(map[string]*Package, 0)
}
}
}
}
|
[
"\"GOROOT\"",
"\"GOPATH\""
] |
[] |
[
"GOPATH",
"GOROOT"
] |
[]
|
["GOPATH", "GOROOT"]
|
go
| 2 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.