filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
vendor/github.com/ligato/vpp-agent/plugins/orchestrator/orchestrator.go | // Copyright (c) 2019 Cisco and/or its affiliates.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package orchestrator
import (
"os"
"strings"
"github.com/ligato/cn-infra/datasync"
"github.com/ligato/cn-infra/infra"
"github.com/ligato/cn-infra/logging"
"github.com/ligato/cn-infra/rpc/grpc"
"golang.org/x/net/context"
api "github.com/ligato/vpp-agent/api/genericmanager"
"github.com/ligato/vpp-agent/pkg/models"
kvs "github.com/ligato/vpp-agent/plugins/kvscheduler/api"
)
var (
// EnableStatusPublishing enables status publishing.
EnableStatusPublishing = os.Getenv("ENABLE_STATUS_PUBLISHING") != ""
debugOrchestrator = os.Getenv("DEBUG_ORCHESTRATOR") != ""
)
// Plugin implements sync service for GRPC.
type Plugin struct {
Deps
manager *genericManagerSvc
// datasync channels
changeChan chan datasync.ChangeEvent
resyncChan chan datasync.ResyncEvent
watchDataReg datasync.WatchRegistration
*dispatcher
}
// Deps represents dependencies for the plugin.
type Deps struct {
infra.PluginDeps
GRPC grpc.Server
KVScheduler kvs.KVScheduler
Watcher datasync.KeyValProtoWatcher
StatusPublisher datasync.KeyProtoValWriter
}
// Init registers the service to GRPC server.
func (p *Plugin) Init() (err error) {
p.dispatcher = &dispatcher{
log: logging.DefaultRegistry.NewLogger("dispatcher"),
db: newMemStore(),
kvs: p.KVScheduler,
}
// register grpc service
p.manager = &genericManagerSvc{
log: p.log,
dispatch: p.dispatcher,
}
if grpcServer := p.GRPC.GetServer(); grpcServer != nil {
api.RegisterGenericManagerServer(grpcServer, p.manager)
} else {
p.log.Infof("grpc server not available")
}
nbPrefixes := p.kvs.GetRegisteredNBKeyPrefixes()
if len(nbPrefixes) > 0 {
p.log.Infof("Watch starting for %d registered NB prefixes", len(nbPrefixes))
} else {
p.log.Warnf("No registered NB prefixes found in KVScheduler (ensure that all KVDescriptors are registered before this)")
}
var prefixes []string
for _, prefix := range nbPrefixes {
p.log.Debugf("- watching NB prefix: %s", prefix)
prefixes = append(prefixes, prefix)
}
// initialize datasync channels
p.resyncChan = make(chan datasync.ResyncEvent)
p.changeChan = make(chan datasync.ChangeEvent)
p.watchDataReg, err = p.Watcher.Watch(p.PluginName.String(),
p.changeChan, p.resyncChan, prefixes...)
if err != nil {
return err
}
return nil
}
// AfterInit subscribes to known NB prefixes.
func (p *Plugin) AfterInit() (err error) {
go p.watchEvents()
statusChan := make(chan *kvs.BaseValueStatus, 100)
p.kvs.WatchValueStatus(statusChan, nil)
go p.watchStatus(statusChan)
return nil
}
// InitialSync will start initial synchronization with downstream.
func (p *Plugin) InitialSync() {
// FIXME: KVScheduler needs to have some type of sync that only refreshes state from SB
p.Log.Debugf("starting initial sync")
txn := p.KVScheduler.StartNBTransaction()
ctx := kvs.WithResync(context.Background(), kvs.DownstreamResync, true)
if _, err := txn.Commit(ctx); err != nil {
p.Log.Warnf("initial sync failed: %v", err)
} else {
p.Log.Infof("initial sync complete")
}
}
func (p *Plugin) watchEvents() {
for {
select {
case e := <-p.changeChan:
p.log.Debugf("=> received CHANGE event (%v changes)", len(e.GetChanges()))
var err error
var kvPairs []KeyVal
for _, x := range e.GetChanges() {
kv := KeyVal{
Key: x.GetKey(),
}
if x.GetChangeType() != datasync.Delete {
kv.Val, err = models.UnmarshalLazyValue(kv.Key, x)
if err != nil {
p.log.Errorf("decoding value for key %q failed: %v", kv.Key, err)
continue
}
}
kvPairs = append(kvPairs, kv)
}
if len(kvPairs) == 0 {
p.log.Warn("no valid kv pairs received in change event")
e.Done(nil)
continue
}
p.log.Debugf("Change with %d items", len(kvPairs))
ctx := e.GetContext()
if ctx == nil {
ctx = context.Background()
}
_, withDataSrc := DataSrcFromContext(ctx)
if !withDataSrc {
ctx = DataSrcContext(ctx, "datasync")
}
ctx = kvs.WithRetryDefault(ctx)
_, err = p.PushData(ctx, kvPairs)
e.Done(err)
case e := <-p.resyncChan:
p.log.Debugf("=> received RESYNC event (%v prefixes)", len(e.GetValues()))
var kvPairs []KeyVal
for prefix, iter := range e.GetValues() {
var keyVals []datasync.KeyVal
for x, done := iter.GetNext(); !done; x, done = iter.GetNext() {
key := x.GetKey()
val, err := models.UnmarshalLazyValue(key, x)
if err != nil {
p.log.Errorf("unmarshal value for key %q failed: %v", key, err)
continue
}
kvPairs = append(kvPairs, KeyVal{
Key: key,
Val: val,
})
p.log.Debugf(" -- key: %s", x.GetKey())
keyVals = append(keyVals, x)
}
if len(keyVals) > 0 {
p.log.Debugf("- %q (%v items)", prefix, len(keyVals))
} else {
p.log.Debugf("- %q (no items)", prefix)
}
for _, x := range keyVals {
p.log.Debugf("\t - %q: (rev: %v)", x.GetKey(), x.GetRevision())
}
}
p.log.Debugf("Resync with %d items", len(kvPairs))
ctx := e.GetContext()
if ctx == nil {
ctx = context.Background()
}
_, withDataSrc := DataSrcFromContext(ctx)
if !withDataSrc {
ctx = DataSrcContext(ctx, "datasync")
}
ctx = kvs.WithResync(ctx, kvs.FullResync, true)
ctx = kvs.WithRetryDefault(ctx)
_, err := p.PushData(ctx, kvPairs)
e.Done(err)
}
}
}
func (p *Plugin) watchStatus(ch <-chan *kvs.BaseValueStatus) {
for {
select {
case s := <-ch:
p.debugf("STATUS: %15s %v ===> %v (%v) %v",
s.Value.State, s.Value.Details, s.Value.Key, s.Value.LastOperation, s.Value.Error)
for _, dv := range s.DerivedValues {
p.debugf(" \t%15s %v ---> %v (%v) %v",
dv.State, dv.Details, dv.Key, dv.LastOperation, dv.Error)
}
if EnableStatusPublishing {
p.publishStatuses([]Result{
{Key: s.Value.Key, Status: s.Value},
})
}
}
}
}
func (p *Plugin) publishStatuses(results []Result) {
if p.StatusPublisher == nil {
return
}
p.debugf("publishing %d statuses", len(results))
for _, res := range results {
statusKey := strings.Replace(res.Key, "config/", "config-status/", 1)
if statusKey == res.Key {
p.debugf("replace for key %q failed", res.Key)
continue
}
if err := p.StatusPublisher.Put(statusKey, res.Status, datasync.WithClientLifetimeTTL()); err != nil {
p.debugf("publishing status for key %q failed: %v", statusKey, err)
}
}
}
func (p *Plugin) debugf(f string, a ...interface{}) {
if debugOrchestrator {
p.log.Debugf(f, a...)
}
}
| [
"\"ENABLE_STATUS_PUBLISHING\"",
"\"DEBUG_ORCHESTRATOR\""
]
| []
| [
"DEBUG_ORCHESTRATOR",
"ENABLE_STATUS_PUBLISHING"
]
| [] | ["DEBUG_ORCHESTRATOR", "ENABLE_STATUS_PUBLISHING"] | go | 2 | 0 | |
batch/batch/cleanup_sidecar.py | import logging
import os
import subprocess as sp
from aiohttp import web
from hailtop import gear
from .stoppable_server import StoppableServer
copy_output_cmd = os.environ.get('COPY_OUTPUT_CMD')
gear.configure_logging()
log = logging.getLogger('cleanup_sidecar')
app = web.Application()
routes = web.RouteTableDef()
server = StoppableServer(app, '0.0.0.0', 5000)
@routes.post('/')
async def finish(request):
del request
if copy_output_cmd is not None:
log.info(f'copying out data')
try:
copy_output = sp.check_output(copy_output_cmd, shell=True, stderr=sp.STDOUT)
log.info(copy_output.decode('ascii'))
except sp.CalledProcessError as err:
log.error(f'bad exit code {err.returncode}: {err.output}')
log.exception(f'exiting 1 due to exception')
server.stop(1)
log.info(f'exiting cleanly')
await server.stop(0)
return web.Response()
if __name__ == '__main__':
app.add_routes(routes)
server.run()
| []
| []
| [
"COPY_OUTPUT_CMD"
]
| [] | ["COPY_OUTPUT_CMD"] | python | 1 | 0 | |
misc/python/materialize/cli/mzbench.py | # Copyright Materialize, Inc. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
#
# mzbuild.py -- script to run materialized benchmarks
import argparse
import csv
import itertools
import multiprocessing
import os
import pathlib
import subprocess
import sys
import typing
import uuid
import webbrowser
def mzbuild_tag(git_ref: str) -> str:
if not git_ref:
return git_ref
try:
return (
subprocess.check_output(
["git", "describe", "--exact-match", git_ref], stderr=subprocess.STDOUT
)
.strip()
.decode()
)
except subprocess.CalledProcessError:
unstable_ref = (
subprocess.check_output(["git", "rev-parse", "--verify", git_ref])
.strip()
.decode()
)
return f"unstable-{unstable_ref}"
def mzcompose_location(mz_root: str) -> pathlib.Path:
"""Return the absolute path to mzcompose.
MZ_ROOT is expected to be set via pyactivate.
"""
return pathlib.Path(mz_root, "bin", "mzcompose")
def main(args: argparse.Namespace) -> None:
# Ensure that we are working out of the git directory so that commands, such as git, will work
mz_root = os.environ["MZ_ROOT"]
os.chdir(mz_root)
worker_counts = enumerate_cpu_counts()
if args.no_benchmark_this_checkout:
git_references = args.git_references
else:
git_references = [None, *args.git_references]
if args.verbose:
build_tags = [None, *[mzbuild_tag(ref) for ref in args.git_references]]
print(f"DEBUG: num_iterations={args.num_measurements}")
print(f"DEBUG: worker_counts={worker_counts}")
print(f"DEBUG: mzbuild_tags={build_tags}")
if args.size == "benchmark-ci":
# Explicitly override the worker counts for the CI benchmark
worker_counts = [1]
setup_benchmark = [
mzcompose_location(mz_root),
"--mz-find",
args.composition,
"run",
f"setup-benchmark-{args.size}",
]
run_benchmark = [
mzcompose_location(mz_root),
"--mz-find",
args.composition,
"run",
f"run-benchmark-{args.size}",
]
field_names = [
"git_revision",
"num_workers",
"iteration",
"seconds_taken",
"rows_per_second",
"grafana_url",
]
results_writer = csv.DictWriter(sys.stdout, field_names)
results_writer.writeheader()
# We use check_output because check_call does not capture output
try:
subprocess.check_output(setup_benchmark, stderr=subprocess.STDOUT)
except (subprocess.CalledProcessError,) as e:
print(
f"Setup benchmark failed! Output from failed command:\n{e.output.decode()}"
)
raise
if args.web:
try:
web_command = [
mzcompose_location(mz_root),
"--mz-find",
args.composition,
"web",
f"perf-dash-web",
]
output = subprocess.check_output(web_command, stderr=subprocess.STDOUT)
except (subprocess.CalledProcessError,) as e:
print(f"Failed to open browser to perf-dash:\n{e.output.decode()}")
raise
iterations = range(0, args.num_measurements)
for (iteration, worker_count, git_ref) in itertools.product(
iterations, worker_counts, git_references
):
# Sadly, environment variables are the only way to pass this information into containers
# started by mzcompose
child_env = os.environ.copy()
child_env["MZ_ROOT"] = mz_root
child_env["MZ_WORKERS"] = str(worker_count)
child_env["MZBENCH_ID"] = args.benchmark_id
child_env["MZBUILD_WAIT_FOR_IMAGE"] = "true"
if git_ref:
child_env["MZBENCH_GIT_REF"] = git_ref
child_env["MZBUILD_MATERIALIZED_TAG"] = mzbuild_tag(git_ref)
try:
output = subprocess.check_output(
run_benchmark, env=child_env, stderr=subprocess.STDOUT
)
except (subprocess.CalledProcessError,) as e:
# TODO: Don't exit with error on simple benchmark failure
print(
f"Setup benchmark failed! Output from failed command:\n{e.output.decode()}"
)
raise
# TODO: Replace parsing output from mzcompose with reading from a well known file or topic
for line in output.decode().splitlines():
if line.startswith("SUCCESS!"):
for token in line.split(" "):
if token.startswith("seconds_taken="):
seconds_taken = token[len("seconds_taken=") :]
elif token.startswith("rows_per_sec="):
rows_per_second = token[len("rows_per_sec=") :]
elif line.startswith("Grafana URL: "):
grafana_url = line[len("Grafana URL: ") :]
results_writer.writerow(
{
"git_revision": git_ref if git_ref else "None",
"num_workers": worker_count,
"iteration": iteration,
"seconds_taken": seconds_taken,
"rows_per_second": rows_per_second,
"grafana_url": grafana_url,
}
)
def enumerate_cpu_counts() -> typing.List[int]:
"""This program prints the number of CPU counts to benchmark on this machine.
We remove some percentage of CPU cores off the top for system / background processing. With
the CPUs that remain, we generate a list of evenly spaced worker counts. The list is limited
by the number of trials desired. This is meant to help us explore the number of CPUs that
should be dedicated to MZ_WORKERS, not as a prescription for the correct values to choose.
On a Macbook with 8 cores, this will return [6, 4, 3, 2].
On a 56 core machine, this returns [24, 18, 12, 6].
On a 96 core machine, this returns [41, 30, 20, 10].
"""
# 15% overhead and count physical cores only
max_cpus = round(multiprocessing.cpu_count() * 0.425)
num_trials = 4
# Yield the fractional points (4/4, 3/4, ...) between max and 0, not including 0
worker_counts = [round(i * max_cpus / num_trials) for i in range(num_trials, 0, -1)]
return list(reversed(sorted(set(worker_counts))))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-b",
"--benchmark-id",
type=str,
default=str(uuid.uuid4()),
help="Pseudo-unique identifier to use for this benchmark",
)
parser.add_argument(
"-n",
"--num-measurements",
type=int,
default=6,
help="Number of times to repeat each benchmark iteration",
)
parser.add_argument(
"-s",
"--size",
type=str,
default="medium",
choices=["medium", "ci", "large"],
help="Name of the mzcompose composition to run",
)
parser.add_argument(
"--no-benchmark-this-checkout",
action="store_true",
help="Don't benchmark the version of materialized in this checkout",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="Enable verbose logging output"
)
parser.add_argument(
"-w",
"--web",
action="store_true",
help="Open a web browser showing results visualizations",
)
parser.add_argument(
"composition",
type=str,
help="Name of the mzcompose composition to run",
)
parser.add_argument(
"git_references",
type=str,
nargs="*",
help="Materialized builds to test as well, identified by git reference",
)
args = parser.parse_args()
main(args)
| []
| []
| [
"MZ_ROOT"
]
| [] | ["MZ_ROOT"] | python | 1 | 0 | |
test/e2e/scheduling/nvidia-gpus.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"context"
"os"
"regexp"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/test/e2e/framework"
e2egpu "k8s.io/kubernetes/test/e2e/framework/gpu"
e2ejob "k8s.io/kubernetes/test/e2e/framework/job"
e2emanifest "k8s.io/kubernetes/test/e2e/framework/manifest"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
e2eresource "k8s.io/kubernetes/test/e2e/framework/resource"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
testPodNamePrefix = "nvidia-gpu-"
// Nvidia driver installation can take upwards of 5 minutes.
driverInstallTimeout = 10 * time.Minute
)
var (
gpuResourceName v1.ResourceName
dsYamlURL string
)
func makeCudaAdditionDevicePluginTestPod() *v1.Pod {
podName := testPodNamePrefix + string(uuid.NewUUID())
testPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Name: "vector-addition-cuda8",
Image: imageutils.GetE2EImage(imageutils.CudaVectorAdd),
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
gpuResourceName: *resource.NewQuantity(1, resource.DecimalSI),
},
},
},
{
Name: "vector-addition-cuda10",
Image: imageutils.GetE2EImage(imageutils.CudaVectorAdd2),
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
gpuResourceName: *resource.NewQuantity(1, resource.DecimalSI),
},
},
},
},
},
}
return testPod
}
func logOSImages(f *framework.Framework) {
nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "getting node list")
for _, node := range nodeList.Items {
framework.Logf("Nodename: %v, OS Image: %v", node.Name, node.Status.NodeInfo.OSImage)
}
}
func areGPUsAvailableOnAllSchedulableNodes(f *framework.Framework) bool {
framework.Logf("Getting list of Nodes from API server")
nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "getting node list")
for _, node := range nodeList.Items {
if node.Spec.Unschedulable {
continue
}
framework.Logf("gpuResourceName %s", gpuResourceName)
if val, ok := node.Status.Capacity[gpuResourceName]; !ok || val.Value() == 0 {
framework.Logf("Nvidia GPUs not available on Node: %q", node.Name)
return false
}
}
framework.Logf("Nvidia GPUs exist on all schedulable nodes")
return true
}
func getGPUsAvailable(f *framework.Framework) int64 {
nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "getting node list")
var gpusAvailable int64
for _, node := range nodeList.Items {
if val, ok := node.Status.Allocatable[gpuResourceName]; ok {
gpusAvailable += (&val).Value()
}
}
return gpusAvailable
}
// SetupNVIDIAGPUNode install Nvidia Drivers and wait for Nvidia GPUs to be available on nodes
func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *framework.ContainerResourceGatherer {
logOSImages(f)
dsYamlURLFromEnv := os.Getenv("NVIDIA_DRIVER_INSTALLER_DAEMONSET")
if dsYamlURLFromEnv != "" {
dsYamlURL = dsYamlURLFromEnv
} else {
dsYamlURL = "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml"
}
gpuResourceName = e2egpu.NVIDIAGPUResourceName
framework.Logf("Using %v", dsYamlURL)
// Creates the DaemonSet that installs Nvidia Drivers.
ds, err := e2emanifest.DaemonSetFromURL(dsYamlURL)
framework.ExpectNoError(err)
ds.Namespace = f.Namespace.Name
_, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), ds, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create nvidia-driver-installer daemonset")
framework.Logf("Successfully created daemonset to install Nvidia drivers.")
pods, err := e2eresource.WaitForControlledPods(f.ClientSet, ds.Namespace, ds.Name, extensionsinternal.Kind("DaemonSet"))
framework.ExpectNoError(err, "failed to get pods controlled by the nvidia-driver-installer daemonset")
devicepluginPods, err := e2eresource.WaitForControlledPods(f.ClientSet, "kube-system", "nvidia-gpu-device-plugin", extensionsinternal.Kind("DaemonSet"))
if err == nil {
framework.Logf("Adding deviceplugin addon pod.")
pods.Items = append(pods.Items, devicepluginPods.Items...)
}
var rsgather *framework.ContainerResourceGatherer
if setupResourceGatherer {
framework.Logf("Starting ResourceUsageGather for the created DaemonSet pods.")
rsgather, err = framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{InKubemark: false, Nodes: framework.AllNodes, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods)
framework.ExpectNoError(err, "creating ResourceUsageGather for the daemonset pods")
go rsgather.StartGatheringData()
}
// Wait for Nvidia GPUs to be available on nodes
framework.Logf("Waiting for drivers to be installed and GPUs to be available in Node Capacity...")
gomega.Eventually(func() bool {
return areGPUsAvailableOnAllSchedulableNodes(f)
}, driverInstallTimeout, time.Second).Should(gomega.BeTrue())
return rsgather
}
func getGPUsPerPod() int64 {
var gpusPerPod int64
gpuPod := makeCudaAdditionDevicePluginTestPod()
for _, container := range gpuPod.Spec.Containers {
if val, ok := container.Resources.Limits[gpuResourceName]; ok {
gpusPerPod += (&val).Value()
}
}
return gpusPerPod
}
func testNvidiaGPUs(f *framework.Framework) {
rsgather := SetupNVIDIAGPUNode(f, true)
gpuPodNum := getGPUsAvailable(f) / getGPUsPerPod()
framework.Logf("Creating %d pods and have the pods run a CUDA app", gpuPodNum)
podList := []*v1.Pod{}
for i := int64(0); i < gpuPodNum; i++ {
podList = append(podList, f.PodClient().Create(makeCudaAdditionDevicePluginTestPod()))
}
framework.Logf("Wait for all test pods to succeed")
// Wait for all pods to succeed
for _, pod := range podList {
f.PodClient().WaitForSuccess(pod.Name, 5*time.Minute)
logContainers(f, pod)
}
framework.Logf("Stopping ResourceUsageGather")
constraints := make(map[string]framework.ResourceConstraint)
// For now, just gets summary. Can pass valid constraints in the future.
summary, err := rsgather.StopAndSummarize([]int{50, 90, 100}, constraints)
f.TestSummaries = append(f.TestSummaries, summary)
framework.ExpectNoError(err, "getting resource usage summary")
}
func logContainers(f *framework.Framework, pod *v1.Pod) {
for _, container := range pod.Spec.Containers {
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, container.Name)
framework.ExpectNoError(err, "Should be able to get container logs for container: %s", container.Name)
framework.Logf("Got container logs for %s:\n%v", container.Name, logs)
}
}
var _ = SIGDescribe("[Feature:GPUDevicePlugin]", func() {
f := framework.NewDefaultFramework("device-plugin-gpus")
ginkgo.It("run Nvidia GPU Device Plugin tests", func() {
testNvidiaGPUs(f)
})
})
func testNvidiaGPUsJob(f *framework.Framework) {
_ = SetupNVIDIAGPUNode(f, false)
// Job set to have 5 completions with parallelism of 1 to ensure that it lasts long enough to experience the node recreation
completions := int32(5)
ginkgo.By("Starting GPU job")
StartJob(f, completions)
job, err := e2ejob.GetJob(f.ClientSet, f.Namespace.Name, "cuda-add")
framework.ExpectNoError(err)
// make sure job is running by waiting for its first pod to start running
err = e2ejob.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, 1)
framework.ExpectNoError(err)
numNodes, err := e2enode.TotalRegistered(f.ClientSet)
framework.ExpectNoError(err)
nodes, err := e2enode.CheckReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
framework.ExpectNoError(err)
ginkgo.By("Recreating nodes")
err = gce.RecreateNodes(f.ClientSet, nodes)
framework.ExpectNoError(err)
ginkgo.By("Done recreating nodes")
ginkgo.By("Waiting for gpu job to finish")
err = e2ejob.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name)
framework.ExpectNoError(err)
ginkgo.By("Done with gpu job")
gomega.Expect(job.Status.Failed).To(gomega.BeZero(), "Job pods failed during node recreation: %v", job.Status.Failed)
VerifyJobNCompletions(f, completions)
}
// StartJob starts a simple CUDA job that requests gpu and the specified number of completions
func StartJob(f *framework.Framework, completions int32) {
var activeSeconds int64 = 3600
testJob := e2ejob.NewTestJob("succeed", "cuda-add", v1.RestartPolicyAlways, 1, completions, &activeSeconds, 6)
testJob.Spec.Template.Spec = v1.PodSpec{
RestartPolicy: v1.RestartPolicyOnFailure,
Containers: []v1.Container{
{
Name: "vector-addition",
Image: imageutils.GetE2EImage(imageutils.CudaVectorAdd),
Command: []string{"/bin/sh", "-c", "./vectorAdd && sleep 60"},
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
gpuResourceName: *resource.NewQuantity(1, resource.DecimalSI),
},
},
},
},
}
ns := f.Namespace.Name
_, err := e2ejob.CreateJob(f.ClientSet, ns, testJob)
framework.ExpectNoError(err)
framework.Logf("Created job %v", testJob)
}
// VerifyJobNCompletions verifies that the job has completions number of successful pods
func VerifyJobNCompletions(f *framework.Framework, completions int32) {
ns := f.Namespace.Name
pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, "cuda-add")
framework.ExpectNoError(err)
createdPods := pods.Items
createdPodNames := podNames(createdPods)
framework.Logf("Got the following pods for job cuda-add: %v", createdPodNames)
successes := int32(0)
regex := regexp.MustCompile("PASSED")
for _, podName := range createdPodNames {
f.PodClient().WaitForFinish(podName, 5*time.Minute)
logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podName, "vector-addition")
framework.ExpectNoError(err, "Should be able to get logs for pod %v", podName)
if regex.MatchString(logs) {
successes++
}
}
if successes != completions {
framework.Failf("Only got %v completions. Expected %v completions.", successes, completions)
}
}
func podNames(pods []v1.Pod) []string {
originalPodNames := make([]string, len(pods))
for i, p := range pods {
originalPodNames[i] = p.ObjectMeta.Name
}
return originalPodNames
}
var _ = SIGDescribe("GPUDevicePluginAcrossRecreate [Feature:Recreate]", func() {
ginkgo.BeforeEach(func() {
e2eskipper.SkipUnlessProviderIs("gce", "gke")
})
f := framework.NewDefaultFramework("device-plugin-gpus-recreate")
ginkgo.It("run Nvidia GPU Device Plugin tests with a recreation", func() {
testNvidiaGPUsJob(f)
})
})
| [
"\"NVIDIA_DRIVER_INSTALLER_DAEMONSET\""
]
| []
| [
"NVIDIA_DRIVER_INSTALLER_DAEMONSET"
]
| [] | ["NVIDIA_DRIVER_INSTALLER_DAEMONSET"] | go | 1 | 0 | |
entrypoint.go | package main
import (
"flag"
"log"
"net/http"
"os"
"os/exec"
"time"
)
var folder string = os.Getenv("FOLDER")
var port string = ":8080"
var remote string = os.Getenv("REMOTE")
var delay time.Duration
func init() {
var err error
delay, err = time.ParseDuration(os.Getenv("DELAY"))
if err != nil {
delay = 24 * time.Hour
}
flag.StringVar(&folder, "folder", folder, "Folder to store mirror into")
flag.StringVar(&port, "address", port, "Address to listen on")
flag.StringVar(&remote, "remote", remote, "Remote to fetch data from")
flag.DurationVar(&delay, "delay", delay, "Regular interval to run sync at")
flag.Parse()
if folder == "" {
panic("-folder may not be blank")
}
if remote == "" {
panic("-remote may not be blank")
}
}
func main() {
go Sync()
Serve()
}
// Serve serves the http server
func Serve() {
log.Printf("Accepting connections on port %s", port)
err := http.ListenAndServe(port, http.FileServer(http.Dir(folder)))
if err == nil {
return
}
log.Fatal(err)
}
// Sync regularly sync the folder with the provided
func Sync() {
for {
RunSyncCommand()
log.Printf("Waiting %s for next sync invocation", delay)
time.Sleep(delay)
}
}
// RunSyncCommand runs a single command to sync
func RunSyncCommand() {
args := []string{"rsync", "-a", "--delete", remote, folder}
log.Printf("Running sync command: %v", args)
command := exec.Command(args[0], args[1:]...)
command.Stdout = os.Stdout
command.Stderr = os.Stderr
if err := command.Run(); err != nil {
log.Printf("Sync command exited with error: %s", err)
} else {
log.Printf("Sync command finished")
}
}
| [
"\"FOLDER\"",
"\"REMOTE\"",
"\"DELAY\""
]
| []
| [
"DELAY",
"FOLDER",
"REMOTE"
]
| [] | ["DELAY", "FOLDER", "REMOTE"] | go | 3 | 0 | |
cmd/e2e/e2e.go | /*
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"os"
goruntime "runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/test/e2e"
"github.com/golang/glog"
flag "github.com/spf13/pflag"
)
var (
authConfig = flag.String("auth_config", os.Getenv("HOME")+"/.kubernetes_auth", "Path to the auth info file.")
certDir = flag.String("cert_dir", "", "Path to the directory containing the certs. Default is empty, which doesn't use certs.")
reportDir = flag.String("report_dir", "", "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.")
host = flag.String("host", "", "The host to connect to")
repoRoot = flag.String("repo_root", "./", "Root directory of kubernetes repository, for finding test files. Default assumes working directory is repository root")
provider = flag.String("provider", "", "The name of the Kubernetes provider")
orderseed = flag.Int64("orderseed", 0, "If non-zero, seed of random test shuffle order. (Otherwise random.)")
times = flag.Int("times", 1, "Number of times each test is eligible to be run. Individual order is determined by shuffling --times instances of each test using --orderseed (like a multi-deck shoe of cards).")
testList util.StringList
)
func init() {
flag.VarP(&testList, "test", "t", "Test to execute (may be repeated or comma separated list of tests.) Defaults to running all tests.")
}
func main() {
util.InitFlags()
goruntime.GOMAXPROCS(goruntime.NumCPU())
if *provider == "" {
glog.Error("e2e needs the have the --provider flag set")
os.Exit(1)
}
if *times <= 0 {
glog.Error("Invalid --times (negative or no testing requested)!")
os.Exit(1)
}
e2e.RunE2ETests(*authConfig, *certDir, *host, *repoRoot, *provider, *orderseed, *times, *reportDir, testList)
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
from dotenv import load_dotenv
load_dotenv()
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pkg/process/config/config.go | package config
import (
"bytes"
"encoding/json"
"fmt"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
"time"
model "github.com/DataDog/agent-payload/process"
"github.com/DataDog/datadog-agent/pkg/config"
"github.com/DataDog/datadog-agent/pkg/process/util"
"github.com/DataDog/datadog-agent/pkg/process/util/api"
"github.com/DataDog/datadog-agent/pkg/util/fargate"
"github.com/DataDog/datadog-agent/pkg/util/log"
)
var (
// defaultProxyPort is the default port used for proxies.
// This mirrors the configuration for the infrastructure agent.
defaultProxyPort = 3128
// defaultSystemProbeBPFDir is the default path for eBPF programs
defaultSystemProbeBPFDir = "/opt/datadog-agent/embedded/share/system-probe/ebpf"
processChecks = []string{"process", "rtprocess"}
containerChecks = []string{"container", "rtcontainer"}
)
type proxyFunc func(*http.Request) (*url.URL, error)
// WindowsConfig stores all windows-specific configuration for the process-agent and system-probe.
type WindowsConfig struct {
// Number of checks runs between refreshes of command-line arguments
ArgsRefreshInterval int
// Controls getting process arguments immediately when a new process is discovered
AddNewArgs bool
//System Probe Configuration
// EnableMonotonicCount determines if we will calculate send/recv bytes of connections with headers and retransmits
EnableMonotonicCount bool
}
// AgentConfig is the global config for the process-agent. This information
// is sourced from config files and the environment variables.
type AgentConfig struct {
Enabled bool
HostName string
APIEndpoints []api.Endpoint
OrchestratorEndpoints []api.Endpoint
LogFile string
LogLevel string
LogToConsole bool
QueueSize int // The number of items allowed in each delivery queue.
ProcessQueueBytes int // The total number of bytes that can be enqueued for delivery to the process intake endpoint
PodQueueBytes int // The total number of bytes that can be enqueued for delivery to the orchestrator endpoint
Blacklist []*regexp.Regexp
Scrubber *DataScrubber
MaxPerMessage int
MaxConnsPerMessage int
AllowRealTime bool
Transport *http.Transport `json:"-"`
DDAgentBin string
StatsdHost string
StatsdPort int
ProcessExpVarPort int
// host type of the agent, used to populate container payload with additional host information
ContainerHostType model.ContainerHostType
// System probe collection configuration
EnableSystemProbe bool
DisableTCPTracing bool
DisableUDPTracing bool
DisableIPv6Tracing bool
DisableDNSInspection bool
CollectLocalDNS bool
SystemProbeAddress string
SystemProbeLogFile string
SystemProbeBPFDir string
MaxTrackedConnections uint
SysProbeBPFDebug bool
ExcludedBPFLinuxVersions []string
ExcludedSourceConnections map[string][]string
ExcludedDestinationConnections map[string][]string
EnableConntrack bool
ConntrackMaxStateSize int
ConntrackRateLimit int
SystemProbeDebugPort int
ClosedChannelSize int
MaxClosedConnectionsBuffered int
MaxConnectionsStateBuffered int
OffsetGuessThreshold uint64
EnableTracepoints bool
// DNS stats configuration
CollectDNSStats bool
DNSTimeout time.Duration
// Orchestrator collection configuration
OrchestrationCollectionEnabled bool
KubeClusterName string
IsScrubbingEnabled bool
// Check config
EnabledChecks []string
CheckIntervals map[string]time.Duration
// Internal store of a proxy used for generating the Transport
proxy proxyFunc
// Windows-specific config
Windows WindowsConfig
}
// CheckIsEnabled returns a bool indicating if the given check name is enabled.
func (a AgentConfig) CheckIsEnabled(checkName string) bool {
return util.StringInSlice(a.EnabledChecks, checkName)
}
// CheckInterval returns the interval for the given check name, defaulting to 10s if not found.
func (a AgentConfig) CheckInterval(checkName string) time.Duration {
d, ok := a.CheckIntervals[checkName]
if !ok {
log.Errorf("missing check interval for '%s', you must set a default", checkName)
d = 10 * time.Second
}
return d
}
const (
defaultProcessEndpoint = "https://process.datadoghq.com"
defaultOrchestratorEndpoint = "https://orchestrator.datadoghq.com"
maxMessageBatch = 100
maxConnsMessageBatch = 1000
defaultMaxTrackedConnections = 65536
maxOffsetThreshold = 3000
)
// NewDefaultTransport provides a http transport configuration with sane default timeouts
func NewDefaultTransport() *http.Transport {
return &http.Transport{
MaxIdleConns: 5,
IdleConnTimeout: 90 * time.Second,
Dial: (&net.Dialer{
Timeout: 10 * time.Second,
KeepAlive: 10 * time.Second,
}).Dial,
TLSHandshakeTimeout: 5 * time.Second,
ResponseHeaderTimeout: 5 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
}
// NewDefaultAgentConfig returns an AgentConfig with defaults initialized
func NewDefaultAgentConfig(canAccessContainers bool) *AgentConfig {
processEndpoint, err := url.Parse(defaultProcessEndpoint)
if err != nil {
// This is a hardcoded URL so parsing it should not fail
panic(err)
}
orchestratorEndpoint, err := url.Parse(defaultOrchestratorEndpoint)
if err != nil {
// This is a hardcoded URL so parsing it should not fail
panic(err)
}
var enabledChecks []string
if canAccessContainers {
enabledChecks = containerChecks
}
ac := &AgentConfig{
Enabled: canAccessContainers, // We'll always run inside of a container.
APIEndpoints: []api.Endpoint{{Endpoint: processEndpoint}},
OrchestratorEndpoints: []api.Endpoint{{Endpoint: orchestratorEndpoint}},
LogFile: defaultLogFilePath,
LogLevel: "info",
LogToConsole: false,
// Allow buffering up to 75 megabytes of payload data in total
ProcessQueueBytes: 60 * 1000 * 1000,
PodQueueBytes: 15 * 1000 * 1000,
// This can be fairly high as the input should get throttled by queue bytes first.
// Assuming we generate ~8 checks/minute (for process/network), this should allow buffering of ~30 minutes of data assuming it fits within the queue bytes memory budget
QueueSize: 256,
MaxPerMessage: 100,
MaxConnsPerMessage: 600,
AllowRealTime: true,
HostName: "",
Transport: NewDefaultTransport(),
ProcessExpVarPort: 6062,
ContainerHostType: model.ContainerHostType_notSpecified,
// Statsd for internal instrumentation
StatsdHost: "127.0.0.1",
StatsdPort: 8125,
// System probe collection configuration
EnableSystemProbe: false,
DisableTCPTracing: false,
DisableUDPTracing: false,
DisableIPv6Tracing: false,
DisableDNSInspection: false,
SystemProbeAddress: defaultSystemProbeAddress,
SystemProbeLogFile: defaultSystemProbeLogFilePath,
SystemProbeBPFDir: defaultSystemProbeBPFDir,
MaxTrackedConnections: defaultMaxTrackedConnections,
EnableConntrack: true,
ClosedChannelSize: 500,
ConntrackMaxStateSize: defaultMaxTrackedConnections * 2,
ConntrackRateLimit: 500,
OffsetGuessThreshold: 400,
EnableTracepoints: false,
// Check config
EnabledChecks: enabledChecks,
CheckIntervals: map[string]time.Duration{
"process": 10 * time.Second,
"rtprocess": 2 * time.Second,
"container": 10 * time.Second,
"rtcontainer": 2 * time.Second,
"connections": 30 * time.Second,
"pod": 10 * time.Second,
},
// DataScrubber to hide command line sensitive words
Scrubber: NewDefaultDataScrubber(),
Blacklist: make([]*regexp.Regexp, 0),
// Windows process config
Windows: WindowsConfig{
ArgsRefreshInterval: 15, // with default 20s check interval we refresh every 5m
AddNewArgs: true,
EnableMonotonicCount: false,
},
}
// Set default values for proc/sys paths if unset.
// Don't set this is /host is not mounted to use context within container.
// Generally only applicable for container-only cases like Fargate.
if config.IsContainerized() && util.PathExists("/host") {
if v := os.Getenv("HOST_PROC"); v == "" {
os.Setenv("HOST_PROC", "/host/proc")
}
if v := os.Getenv("HOST_SYS"); v == "" {
os.Setenv("HOST_SYS", "/host/sys")
}
}
return ac
}
func loadConfigIfExists(path string) error {
if util.PathExists(path) {
config.Datadog.AddConfigPath(path)
if strings.HasSuffix(path, ".yaml") { // If they set a config file directly, let's try to honor that
config.Datadog.SetConfigFile(path)
}
if _, err := config.LoadWithoutSecret(); err != nil {
return err
}
} else {
log.Infof("no config exists at %s, ignoring...", path)
}
return nil
}
func mergeConfigIfExists(path string) error {
if util.PathExists(path) {
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
if err := config.Datadog.MergeConfig(file); err != nil {
return err
}
} else {
log.Infof("no config exists at %s, ignoring...", path)
}
return nil
}
// NewAgentConfig returns an AgentConfig using a configuration file. It can be nil
// if there is no file available. In this case we'll configure only via environment.
func NewAgentConfig(loggerName config.LoggerName, yamlPath, netYamlPath string) (*AgentConfig, error) {
var err error
// Note: This only considers container sources that are already setup. It's possible that container sources may
// need a few minutes to be ready on newly provisioned hosts.
_, err = util.GetContainers()
canAccessContainers := err == nil
cfg := NewDefaultAgentConfig(canAccessContainers)
// For Agent 6 we will have a YAML config file to use.
if err := loadConfigIfExists(yamlPath); err != nil {
return nil, err
}
if err := cfg.LoadProcessYamlConfig(yamlPath); err != nil {
return nil, err
}
// (Re)configure the logging from our configuration
if err := setupLogger(loggerName, cfg.LogFile, cfg); err != nil {
log.Errorf("failed to setup configured logger: %s", err)
return nil, err
}
// For system probe, there is an additional config file that is shared with the system-probe
mergeConfigIfExists(netYamlPath) //nolint:errcheck
if err = cfg.loadSysProbeYamlConfig(netYamlPath); err != nil {
return nil, err
}
// TODO: Once proxies have been moved to common config util, remove this
if cfg.proxy, err = proxyFromEnv(cfg.proxy); err != nil {
log.Errorf("error parsing environment proxy settings, not using a proxy: %s", err)
cfg.proxy = nil
}
// Python-style log level has WARNING vs WARN
if strings.ToLower(cfg.LogLevel) == "warning" {
cfg.LogLevel = "warn"
}
if cfg.HostName == "" {
if fargate.IsFargateInstance() {
if hostname, err := fargate.GetFargateHost(); err == nil {
cfg.HostName = hostname
} else {
log.Errorf("Cannot get Fargate host: %v", err)
}
} else if hostname, err := getHostname(cfg.DDAgentBin); err == nil {
cfg.HostName = hostname
} else {
log.Errorf("Cannot get hostname: %v", err)
}
}
cfg.ContainerHostType = getContainerHostType()
if cfg.proxy != nil {
cfg.Transport.Proxy = cfg.proxy
}
// sanity check. This element is used with the modulo operator (%), so it can't be zero.
// if it is, log the error, and assume the config was attempting to disable
if cfg.Windows.ArgsRefreshInterval == 0 {
log.Warnf("invalid configuration: windows_collect_skip_new_args was set to 0. Disabling argument collection")
cfg.Windows.ArgsRefreshInterval = -1
}
// activate the pod collection if enabled and we have the cluster name set
if cfg.OrchestrationCollectionEnabled && cfg.KubeClusterName != "" {
cfg.EnabledChecks = append(cfg.EnabledChecks, "pod")
}
return cfg, nil
}
// NewSystemProbeConfig returns a system-probe specific AgentConfig using a configuration file. It can be nil
// if there is no file available. In this case we'll configure only via environment.
func NewSystemProbeConfig(loggerName config.LoggerName, yamlPath string) (*AgentConfig, error) {
cfg := NewDefaultAgentConfig(false) // We don't access the container APIs in the system-probe
// When the system-probe is enabled in a separate container, we need a way to also disable the system-probe
// packaged in the main agent container (without disabling network collection on the process-agent).
//
// If this environment flag is set, it'll sure it will not start
if ok, _ := isAffirmative(os.Getenv("DD_SYSTEM_PROBE_EXTERNAL")); ok {
cfg.EnableSystemProbe = false
return cfg, nil
}
loadConfigIfExists(yamlPath) //nolint:errcheck
if err := cfg.loadSysProbeYamlConfig(yamlPath); err != nil {
return nil, err
}
// (Re)configure the logging from our configuration, with the system probe log file + config options
if err := setupLogger(loggerName, cfg.SystemProbeLogFile, cfg); err != nil {
log.Errorf("failed to setup configured logger: %s", err)
return nil, err
}
return cfg, nil
}
// getContainerHostType uses the fargate library to detect container environment and returns the protobuf version of it
func getContainerHostType() model.ContainerHostType {
switch fargate.GetOrchestrator() {
case fargate.ECS:
return model.ContainerHostType_fargateECS
case fargate.EKS:
return model.ContainerHostType_fargateEKS
}
return model.ContainerHostType_notSpecified
}
func loadEnvVariables() {
// The following environment variables will be loaded in the order listed, meaning variables
// further down the list may override prior variables.
for _, variable := range []struct{ env, cfg string }{
{"DD_PROCESS_AGENT_CONTAINER_SOURCE", "process_config.container_source"},
{"DD_SCRUB_ARGS", "process_config.scrub_args"},
{"DD_STRIP_PROCESS_ARGS", "process_config.strip_proc_arguments"},
{"DD_PROCESS_AGENT_URL", "process_config.process_dd_url"},
{"DD_ORCHESTRATOR_URL", "process_config.orchestrator_dd_url"},
{"DD_HOSTNAME", "hostname"},
{"DD_DOGSTATSD_PORT", "dogstatsd_port"},
{"DD_BIND_HOST", "bind_host"},
{"HTTPS_PROXY", "proxy.https"},
{"DD_PROXY_HTTPS", "proxy.https"},
{"DD_LOGS_STDOUT", "log_to_console"},
{"LOG_TO_CONSOLE", "log_to_console"},
{"DD_LOG_TO_CONSOLE", "log_to_console"},
{"LOG_LEVEL", "log_level"}, // Support LOG_LEVEL and DD_LOG_LEVEL but prefer DD_LOG_LEVEL
{"DD_LOG_LEVEL", "log_level"},
} {
if v, ok := os.LookupEnv(variable.env); ok {
config.Datadog.Set(variable.cfg, v)
}
}
// Load the System Probe environment variables
loadSysProbeEnvVariables()
// Support API_KEY and DD_API_KEY but prefer DD_API_KEY.
apiKey, envKey := os.Getenv("DD_API_KEY"), "DD_API_KEY"
if apiKey == "" {
apiKey, envKey = os.Getenv("API_KEY"), "API_KEY"
}
if apiKey != "" { // We don't want to overwrite the API KEY provided as an environment variable
log.Infof("overriding API key from env %s value", envKey)
config.Datadog.Set("api_key", config.SanitizeAPIKey(strings.Split(apiKey, ",")[0]))
}
if v := os.Getenv("DD_CUSTOM_SENSITIVE_WORDS"); v != "" {
config.Datadog.Set("process_config.custom_sensitive_words", strings.Split(v, ","))
}
if v := os.Getenv("DD_PROCESS_ADDITIONAL_ENDPOINTS"); v != "" {
endpoints := make(map[string][]string)
if err := json.Unmarshal([]byte(v), &endpoints); err != nil {
log.Errorf(`Could not parse DD_PROCESS_ADDITIONAL_ENDPOINTS: %v. It must be of the form '{"https://process.agent.datadoghq.com": ["apikey1", ...], ...}'.`, err)
} else {
config.Datadog.Set("process_config.additional_endpoints", endpoints)
}
}
if v := os.Getenv("DD_ORCHESTRATOR_ADDITIONAL_ENDPOINTS"); v != "" {
endpoints := make(map[string][]string)
if err := json.Unmarshal([]byte(v), &endpoints); err != nil {
log.Errorf(`Could not parse DD_ORCHESTRATOR_ADDITIONAL_ENDPOINTS: %v. It must be of the form '{"https://process.agent.datadoghq.com": ["apikey1", ...], ...}'.`, err)
} else {
config.Datadog.Set("process_config.orchestrator_additional_endpoints", endpoints)
}
}
}
func loadSysProbeEnvVariables() {
for _, variable := range []struct{ env, cfg string }{
{"DD_SYSTEM_PROBE_ENABLED", "system_probe_config.enabled"},
{"DD_SYSPROBE_SOCKET", "system_probe_config.sysprobe_socket"},
{"DD_SYSTEM_PROBE_CONNTRACK_IGNORE_ENOBUFS", "system_probe_config.conntrack_ignore_enobufs"},
{"DD_DISABLE_TCP_TRACING", "system_probe_config.disable_tcp"},
{"DD_DISABLE_UDP_TRACING", "system_probe_config.disable_udp"},
{"DD_DISABLE_IPV6_TRACING", "system_probe_config.disable_ipv6"},
{"DD_DISABLE_DNS_INSPECTION", "system_probe_config.disable_dns_inspection"},
{"DD_COLLECT_LOCAL_DNS", "system_probe_config.collect_local_dns"},
{"DD_COLLECT_DNS_STATS", "system_probe_config.collect_dns_stats"},
} {
if v, ok := os.LookupEnv(variable.env); ok {
config.Datadog.Set(variable.cfg, v)
}
}
}
// IsBlacklisted returns a boolean indicating if the given command is blacklisted by our config.
func IsBlacklisted(cmdline []string, blacklist []*regexp.Regexp) bool {
cmd := strings.Join(cmdline, " ")
for _, b := range blacklist {
if b.MatchString(cmd) {
return true
}
}
return false
}
func isAffirmative(value string) (bool, error) {
if value == "" {
return false, fmt.Errorf("value is empty")
}
v := strings.ToLower(value)
return v == "true" || v == "yes" || v == "1", nil
}
// getHostname shells out to obtain the hostname used by the infra agent
// falling back to os.Hostname() if it is unavailable
func getHostname(ddAgentBin string) (string, error) {
cmd := exec.Command(ddAgentBin, "hostname")
// Copying all environment variables to child process
// Windows: Required, so the child process can load DLLs, etc.
// Linux: Optional, but will make use of DD_HOSTNAME and DOCKER_DD_AGENT if they exist
cmd.Env = os.Environ()
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err := cmd.Run()
if err != nil {
log.Infof("error retrieving dd-agent hostname, falling back to os.Hostname(): %v", err)
return os.Hostname()
}
hostname := strings.TrimSpace(stdout.String())
if hostname == "" {
log.Infof("error retrieving dd-agent hostname, falling back to os.Hostname(): %s", stderr.String())
return os.Hostname()
}
return hostname, err
}
// proxyFromEnv parses out the proxy configuration from the ENV variables in a
// similar way to getProxySettings and, if enough values are available, returns
// a new proxy URL value. If the environment is not set for this then the
// `defaultVal` is returned.
func proxyFromEnv(defaultVal proxyFunc) (proxyFunc, error) {
var host string
scheme := "http"
if v := os.Getenv("PROXY_HOST"); v != "" {
// accept either http://myproxy.com or myproxy.com
if i := strings.Index(v, "://"); i != -1 {
// when available, parse the scheme from the url
scheme = v[0:i]
host = v[i+3:]
} else {
host = v
}
}
if host == "" {
return defaultVal, nil
}
port := defaultProxyPort
if v := os.Getenv("PROXY_PORT"); v != "" {
port, _ = strconv.Atoi(v)
}
var user, password string
if v := os.Getenv("PROXY_USER"); v != "" {
user = v
}
if v := os.Getenv("PROXY_PASSWORD"); v != "" {
password = v
}
return constructProxy(host, scheme, port, user, password)
}
// constructProxy constructs a *url.Url for a proxy given the parts of a
// Note that we assume we have at least a non-empty host for this call but
// all other values can be their defaults (empty string or 0).
func constructProxy(host, scheme string, port int, user, password string) (proxyFunc, error) {
var userpass *url.Userinfo
if user != "" {
if password != "" {
userpass = url.UserPassword(user, password)
} else {
userpass = url.User(user)
}
}
var path string
if userpass != nil {
path = fmt.Sprintf("%s@%s:%v", userpass.String(), host, port)
} else {
path = fmt.Sprintf("%s:%v", host, port)
}
if scheme != "" {
path = fmt.Sprintf("%s://%s", scheme, path)
}
u, err := url.Parse(path)
if err != nil {
return nil, err
}
return http.ProxyURL(u), nil
}
func setupLogger(loggerName config.LoggerName, logFile string, cfg *AgentConfig) error {
return config.SetupLogger(
loggerName,
cfg.LogLevel,
logFile,
config.GetSyslogURI(),
config.Datadog.GetBool("syslog_rfc"),
config.Datadog.GetBool("log_to_console"),
config.Datadog.GetBool("log_format_json"),
)
}
| [
"\"HOST_PROC\"",
"\"HOST_SYS\"",
"\"DD_SYSTEM_PROBE_EXTERNAL\"",
"\"DD_API_KEY\"",
"\"API_KEY\"",
"\"DD_CUSTOM_SENSITIVE_WORDS\"",
"\"DD_PROCESS_ADDITIONAL_ENDPOINTS\"",
"\"DD_ORCHESTRATOR_ADDITIONAL_ENDPOINTS\"",
"\"PROXY_HOST\"",
"\"PROXY_PORT\"",
"\"PROXY_USER\"",
"\"PROXY_PASSWORD\""
]
| []
| [
"HOST_SYS",
"PROXY_PASSWORD",
"DD_SYSTEM_PROBE_EXTERNAL",
"PROXY_HOST",
"API_KEY",
"HOST_PROC",
"PROXY_USER",
"PROXY_PORT",
"DD_PROCESS_ADDITIONAL_ENDPOINTS",
"DD_ORCHESTRATOR_ADDITIONAL_ENDPOINTS",
"DD_API_KEY",
"DD_CUSTOM_SENSITIVE_WORDS"
]
| [] | ["HOST_SYS", "PROXY_PASSWORD", "DD_SYSTEM_PROBE_EXTERNAL", "PROXY_HOST", "API_KEY", "HOST_PROC", "PROXY_USER", "PROXY_PORT", "DD_PROCESS_ADDITIONAL_ENDPOINTS", "DD_ORCHESTRATOR_ADDITIONAL_ENDPOINTS", "DD_API_KEY", "DD_CUSTOM_SENSITIVE_WORDS"] | go | 12 | 0 | |
provisional-st/scripts/unit-tests/unit-tests.py | '''
FILE: unit-tests.py
PURPOSE: Provides unit testing for this directory.
PROJECT: Land Satellites Data Systems Science Research and Development
(LSRD) at the USGS EROS
LICENSE: NASA Open Source Agreement 1.3
HISTORY:
Date Reason
---------------- --------------------------------------------------------
Sep/2015 Initial implementation
'''
import os
import sys
import shutil
import glob
import filecmp
import unittest
# Add the parent directory where the modules to test are located
sys.path.insert(0, '..')
from extract_auxiliary_narr_data import AuxNARRGribProcessor
from st_environment import Environment
class LSRD_ValidationFramework(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(LSRD_ValidationFramework, self).__init__(*args, **kwargs)
if not self.name:
raise Exception('self.name must be defined')
# Verify the environment
self.lsrd_validation_dir = os.environ.get('LSRD_VALIDATION_DIR')
if self.lsrd_validation_dir is None:
raise Exception('Missing environment variable LSRD_VALIDATION_DIR')
def assertFilesEqual(self, file_1, file_2):
'''Assert that two files are equal or not.'''
self.assertTrue(os.path.exists(file_1),
'{0} Does not exist'.format(file_1))
self.assertTrue(os.path.exists(file_2),
'{0} Does not exist'.format(file_2))
self.assertTrue(filecmp.cmp(file_1, file_2))
class AuxNARRGribProcessor_TestCase(LSRD_ValidationFramework):
'''Tests for Grib file processing.'''
def __init__(self, *args, **kwargs):
self.name = 'AuxNARRGribProcessor_TestCase'
super(AuxNARRGribProcessor_TestCase, self).__init__(*args, **kwargs)
# Validation data is presummed to be available if the directory exists
self.validation_path = os.path.join(self.lsrd_validation_dir,
self.name)
if not os.path.isdir(self.validation_path):
raise Exception('Missing validation data for [{0}]'
.format(self.name))
# Define the directories that are produced
self.directories = ['HGT_1', 'HGT_2',
'SPFH_1', 'SPFH_2',
'TMP_1', 'TMP_2']
def setUp(self):
'''setup'''
self.input_xml = os.path.join(self.validation_path,
'LT50420342011119PAC01.xml')
# Specify the XML metadata file defining the data to process
self.processor = AuxNARRGribProcessor(self.input_xml)
# Process the associated AUX data
self.processor.extract_aux_data()
def tearDown(self):
'''Cleanup'''
for directory in self.directories:
if os.path.isdir(directory):
shutil.rmtree(directory)
def test_process_grib_data(self):
'''Test the processing of grib files from our internal archive.'''
for directory in self.directories:
self.assertEqual(True, os.path.isdir(directory))
# Start with the local files
files = glob.glob(os.path.join(directory, '*'))
# Add the validation files
validation_directory = os.path.join(self.validation_path,
directory)
files.extend(glob.glob(os.path.join(validation_directory, '*')))
# We only want the filenames
files = [os.path.basename(x) for x in files]
# Make a unique list of the filenames
files = sorted(list(set(files)))
# Process through each file
for filename in files:
local_file = os.path.join(directory, filename)
validation_file = os.path.join(validation_directory, filename)
self.assertFilesEqual(validation_file, local_file)
class Environment_TestCase(LSRD_ValidationFramework):
'''Tests Environment Class'''
def __init__(self, *args, **kwargs):
self.name = 'Environment_TestCase'
super(Environment_TestCase, self).__init__(*args, **kwargs)
def setUp(self):
'''setup'''
os.environ['ST_DATA_DIR'] = '/usr/local'
os.environ['ST_AUX_DIR'] = '/usr/local'
os.environ['ASTER_GED_SERVER_NAME'] = 'ASTER_GED_SERVER_NAME'
self.environment = Environment()
def test_ST_DATA_DIR(self):
'''Test the ST_DATA_DIR environment variable'''
self.assertEqual('/usr/local',
self.environment.get_st_data_directory())
def test_ST_AUX_DIR(self):
'''Test the ST_AUX_DIR environment variable'''
self.assertEqual('/usr/local',
self.environment.get_st_aux_directory())
def test_ASTER_GED_SERVER_NAME(self):
'''Test the ASTER_GED_SERVER_NAME environment variable'''
self.assertEqual('ASTER_GED_SERVER_NAME',
self.environment.get_aster_ged_server_name())
if __name__ == '__main__':
unittest.main()
| []
| []
| [
"ASTER_GED_SERVER_NAME",
"ST_AUX_DIR",
"LSRD_VALIDATION_DIR",
"ST_DATA_DIR"
]
| [] | ["ASTER_GED_SERVER_NAME", "ST_AUX_DIR", "LSRD_VALIDATION_DIR", "ST_DATA_DIR"] | python | 4 | 0 | |
app/job/main/aegis/dao/dao_test.go | package dao
import (
"flag"
"os"
"testing"
"time"
"go-common/app/job/main/aegis/conf"
"go-common/app/job/main/aegis/model"
)
var (
d *Dao
task1 = &model.Task{ID: 1, BusinessID: 1, FlowID: 1, UID: 1, Weight: 3, Ctime: model.IntTime(time.Now().Unix())}
task2 = &model.Task{ID: 2, BusinessID: 1, FlowID: 1, UID: 1, Weight: 2, Ctime: model.IntTime(time.Now().Unix())}
task3 = &model.Task{ID: 3, BusinessID: 1, FlowID: 1, UID: 1, Weight: 1, Ctime: model.IntTime(time.Now().Unix())}
)
func TestMain(m *testing.M) {
if os.Getenv("DEPLOY_ENV") != "" {
flag.Set("app_id", "main.archive.aegis-job")
flag.Set("conf_token", "aed3cc21ca345ffc284c6036da32352b")
flag.Set("tree_id", "61819")
flag.Set("conf_version", "1")
flag.Set("deploy_env", "uat")
flag.Set("conf_host", "config.bilibili.co")
flag.Set("conf_path", "/tmp")
flag.Set("region", "sh")
flag.Set("zone", "sh001")
} else {
flag.Set("conf", "../cmd/aegis-job.toml")
}
flag.Parse()
if err := conf.Init(); err != nil {
panic(err)
}
d = New(conf.Conf)
os.Exit(m.Run())
}
| [
"\"DEPLOY_ENV\""
]
| []
| [
"DEPLOY_ENV"
]
| [] | ["DEPLOY_ENV"] | go | 1 | 0 | |
shellish/paging.py | """
Support for directing output to a pager and then restoring tty state.
"""
import contextlib
import fcntl
import os
import shutil
import signal
import subprocess
import sys
import termios
import warnings
_pager_active = False
@contextlib.contextmanager
def tty_restoration(file=None):
if file is None:
file = sys.stdout
assert file.isatty()
tcsave = termios.tcgetattr(file)
fcsave = fcntl.fcntl(file, fcntl.F_GETFL)
try:
yield
finally:
fcntl.fcntl(file, fcntl.F_SETFL, fcsave)
termios.tcsetattr(file, termios.TCSADRAIN, tcsave)
def pager_process(pagercmd, stdout=None, stderr=None):
if stdout is None:
stdout = sys.stdout
if stderr is None:
stderr = sys.stderr
# When running interactively `less` does not handle window resizes
# unless we explicitly hardcode the new term size into the env. There
# is currently a bug in docker for mac that sometimes breaks this test.
env = os.environ.copy()
termsize = shutil.get_terminal_size()
if 0 in termsize:
warnings.warn("Could not determine terminal size")
termsize = os.terminal_size((80, 24))
env['COLUMNS'] = str(termsize.columns)
env['LINES'] = str(termsize.lines)
return subprocess.Popen(pagercmd, shell=True, universal_newlines=True,
bufsize=1, stdin=subprocess.PIPE, stdout=stdout,
stderr=stderr, env=env)
@contextlib.contextmanager
def pager_redirect(desc, *, pagercmd=None, istty=None, file=None,
substitutions=None):
""" Redirect output to file/stdout to a pager process. Care is taken to
restore the controlling tty stdio files to their original state. """
global _pager_active
if file is None:
file = sys.stdout
if not pagercmd or not file.isatty() or _pager_active:
yield
return
subs = {"desc": desc}
if substitutions is not None:
subs.update(substitutions)
pagercmd = pagercmd.format(**subs)
with tty_restoration():
p = pager_process(pagercmd)
if istty is None:
p.stdin.isatty = file.isatty
else:
p.stdin.isatty = lambda: istty
stdout_save = sys.stdout
sys.stdout = p.stdin
_pager_active = True
try:
yield
finally:
_pager_active = False
sys.stdout = stdout_save
try:
p.stdin.close()
except BrokenPipeError:
pass
while p.poll() is None:
try:
p.wait()
except KeyboardInterrupt:
pass
| []
| []
| []
| [] | [] | python | 0 | 0 | |
junos/resource_security_dynamic_address_feed_server_test.go | package junos_test
import (
"os"
"testing"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
)
func TestAccJunosSecurityDynamicAddressFeedServer_basic(t *testing.T) {
if os.Getenv("TESTACC_SRX") != "" {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: testAccJunosSecurityDynamicAddressFeedServerConfigCreate(),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("junos_security_dynamic_address_feed_server.testacc_dyn_add_feed_srv",
"feed_name.#", "2"),
),
},
{
Config: testAccJunosSecurityDynamicAddressFeedServerConfigUpdate(),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("junos_security_dynamic_address_feed_server.testacc_dyn_add_feed_srv",
"feed_name.#", "3"),
),
},
{
ResourceName: "junos_security_dynamic_address_feed_server.testacc_dyn_add_feed_srv",
ImportState: true,
ImportStateVerify: true,
},
},
})
}
}
func testAccJunosSecurityDynamicAddressFeedServerConfigCreate() string {
return `
resource "junos_security_dynamic_address_feed_server" "testacc_dyn_add_feed_srv" {
name = "tfacc_dafeedsrv"
hostname = "example.com"
feed_name {
name = "feed_b"
path = "/srx/"
}
feed_name {
name = "feed_a"
path = "/srx/"
}
}
`
}
func testAccJunosSecurityDynamicAddressFeedServerConfigUpdate() string {
return `
resource "junos_security_dynamic_address_feed_server" "testacc_dyn_add_feed_srv" {
name = "tfacc_dafeedsrv"
hostname = "example.com/?test=#1"
description = "testacc junos_security_dynamic_address_feed_server"
feed_name {
name = "feed_b"
path = "/srx/"
description = "testacc junos_security_dynamic_address_feed_server feed_b"
hold_interval = 1110
update_interval = 120
}
feed_name {
name = "feed_a"
path = "/srx/"
hold_interval = 0
}
feed_name {
name = "feed_0"
path = "/srx/"
description = "testacc junos_security_dynamic_address_feed_server feed_0"
hold_interval = 1130
update_interval = 140
}
hold_interval = 1150
update_interval = 160
}
`
}
| [
"\"TESTACC_SRX\""
]
| []
| [
"TESTACC_SRX"
]
| [] | ["TESTACC_SRX"] | go | 1 | 0 | |
share/qt/extract_strings_qt.py | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/asocoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *asocoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("asocoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| []
| []
| [
"XGETTEXT"
]
| [] | ["XGETTEXT"] | python | 1 | 0 | |
cli/service.go | package main
import (
"github.com/kardianos/service"
"log"
"os"
"os/user"
"runtime"
)
type sol struct {
}
func (s *sol) Start(srv service.Service) error {
go runServer(DaemonPort)
return nil
}
func (s *sol) Stop(srv service.Service) error {
return nil
}
func getRunUser() string {
currentUser, _ := user.Current()
if currentUser.Username == "root" && runtime.GOOS != "windows" {
return os.Getenv("SUDO_USER")
}
return currentUser.Username
}
func getCVPMDConfig() *service.Config {
realUsername := getRunUser()
srvConf := &service.Config{
Name: "cvpmd",
DisplayName: "CVPM Daemon",
Description: "Computer Vision Package Manager[Daemon]",
Arguments: []string{"daemon", "run"},
UserName: realUsername,
}
return srvConf
}
// cvpm daemon install -> install the background daemon service
func InstallService() {
srvConfig := getCVPMDConfig()
dae := &sol{}
s, err := service.New(dae, srvConfig)
if err != nil {
log.Fatal(err)
}
err = s.Install()
if err != nil {
log.Fatal(err)
}
err = s.Start()
if err != nil {
log.Fatal(err)
}
}
// cvpm daemon uninstall -> uninstall the background daemon service
func UninstallService() {
srvConfig := getCVPMDConfig()
dae := &sol{}
s, err := service.New(dae, srvConfig)
if err != nil {
log.Fatal(err)
}
err = s.Stop()
if err != nil {
log.Fatal(err)
}
err = s.Uninstall()
if err != nil {
log.Fatal(err)
}
}
| [
"\"SUDO_USER\""
]
| []
| [
"SUDO_USER"
]
| [] | ["SUDO_USER"] | go | 1 | 0 | |
astropy/utils/iers/tests/test_iers.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import warnings
from pathlib import Path
import pytest
import numpy as np
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.iers import iers
from astropy import units as u
from astropy.table import QTable
from astropy.time import Time, TimeDelta
TRAVIS = os.environ.get('TRAVIS', False)
FILE_NOT_FOUND_ERROR = getattr(__builtins__, 'FileNotFoundError', OSError)
try:
iers.IERS_A.open('finals2000A.all') # check if IERS_A is available
except OSError:
HAS_IERS_A = False
else:
HAS_IERS_A = True
IERS_A_EXCERPT = get_pkg_data_filename(os.path.join('data', 'iers_a_excerpt'))
def setup_module():
# Need auto_download so that IERS_B won't be loaded and cause tests to
# fail. Files to be downloaded are handled appropriately in the tests.
iers.conf.auto_download = True
def teardown_module():
# This setting is to be consistent with astropy/conftest.py
iers.conf.auto_download = False
class TestBasic():
"""Basic tests that IERS_B returns correct values"""
@pytest.mark.parametrize('iers_cls', (iers.IERS_B, iers.IERS))
def test_simple(self, iers_cls):
"""Test the default behaviour for IERS_B and IERS."""
# Arguably, IERS itself should not be used at all, but it used to
# provide IERS_B by default so we check that it continues to do so.
# Eventually, IERS should probably be deprecated.
iers_cls.close()
assert iers_cls.iers_table is None
iers_tab = iers_cls.open()
assert iers_cls.iers_table is not None
assert iers_cls.iers_table is iers_tab
assert isinstance(iers_tab, QTable)
assert isinstance(iers_tab, iers.IERS_B)
assert (iers_tab['UT1_UTC'].unit / u.second).is_unity()
assert (iers_tab['PM_x'].unit / u.arcsecond).is_unity()
assert (iers_tab['PM_y'].unit / u.arcsecond).is_unity()
jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5])
ut1_utc = iers_tab.ut1_utc(jd1, jd2)
assert isinstance(ut1_utc, u.Quantity)
assert (ut1_utc.unit / u.second).is_unity()
# IERS files change at the 0.1 ms level; see gh-6981
assert_quantity_allclose(ut1_utc, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=0.1*u.ms)
# should be future-proof; surely we've moved to another planet by then
with pytest.raises(IndexError):
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0.)
# also check it returns the right status
ut1_utc2, status2 = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status2 == iers.FROM_IERS_B)
ut1_utc4, status4 = iers_tab.ut1_utc(1e11, 0., return_status=True)
assert status4 == iers.TIME_BEYOND_IERS_RANGE
# check it works via Time too
t = Time(jd1, jd2, format='jd', scale='utc')
ut1_utc3 = iers_tab.ut1_utc(t)
assert_quantity_allclose(ut1_utc3, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=0.1*u.ms)
# Table behaves properly as a table (e.g. can be sliced)
assert len(iers_tab[:2]) == 2
def test_open_filename(self):
iers.IERS_B.close()
iers.IERS_B.open(iers.IERS_B_FILE)
assert iers.IERS_B.iers_table is not None
assert isinstance(iers.IERS_B.iers_table, QTable)
iers.IERS_B.close()
with pytest.raises(FILE_NOT_FOUND_ERROR):
iers.IERS_B.open('surely this does not exist')
def test_open_network_url(self):
iers.IERS_A.close()
iers.IERS_A.open(Path(IERS_A_EXCERPT).as_uri())
assert iers.IERS_A.iers_table is not None
assert isinstance(iers.IERS_A.iers_table, QTable)
iers.IERS_A.close()
class TestIERS_AExcerpt():
def test_simple(self):
# Test the IERS A reader. It is also a regression tests that ensures
# values do not get overridden by IERS B; see #4933.
iers_tab = iers.IERS_A.open(IERS_A_EXCERPT)
assert (iers_tab['UT1_UTC'].unit / u.second).is_unity()
assert 'P' in iers_tab['UT1Flag']
assert 'I' in iers_tab['UT1Flag']
assert 'B' in iers_tab['UT1Flag']
assert np.all((iers_tab['UT1Flag'] == 'I') |
(iers_tab['UT1Flag'] == 'P') |
(iers_tab['UT1Flag'] == 'B'))
assert (iers_tab['dX_2000A'].unit / u.marcsec).is_unity()
assert (iers_tab['dY_2000A'].unit / u.marcsec).is_unity()
assert 'P' in iers_tab['NutFlag']
assert 'I' in iers_tab['NutFlag']
assert 'B' in iers_tab['NutFlag']
assert np.all((iers_tab['NutFlag'] == 'P') |
(iers_tab['NutFlag'] == 'I') |
(iers_tab['NutFlag'] == 'B'))
assert (iers_tab['PM_x'].unit / u.arcsecond).is_unity()
assert (iers_tab['PM_y'].unit / u.arcsecond).is_unity()
assert 'P' in iers_tab['PolPMFlag']
assert 'I' in iers_tab['PolPMFlag']
assert 'B' in iers_tab['PolPMFlag']
assert np.all((iers_tab['PolPMFlag'] == 'P') |
(iers_tab['PolPMFlag'] == 'I') |
(iers_tab['PolPMFlag'] == 'B'))
t = Time([57053., 57054., 57055.], format='mjd')
ut1_utc, status = iers_tab.ut1_utc(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
# These values are *exactly* as given in the table, so they should
# match to double precision accuracy.
assert_quantity_allclose(ut1_utc,
[-0.4916557, -0.4925323, -0.4934373] * u.s,
atol=0.1*u.ms)
dcip_x, dcip_y, status = iers_tab.dcip_xy(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
# These values are *exactly* as given in the table, so they should
# match to double precision accuracy.
print(dcip_x)
print(dcip_y)
assert_quantity_allclose(dcip_x,
[-0.086, -0.093, -0.087] * u.marcsec,
atol=1.*u.narcsec)
assert_quantity_allclose(dcip_y,
[0.094, 0.081, 0.072] * u.marcsec,
atol=1*u.narcsec)
pm_x, pm_y, status = iers_tab.pm_xy(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
assert_quantity_allclose(pm_x,
[0.003734, 0.004581, 0.004623] * u.arcsec,
atol=0.1*u.marcsec)
assert_quantity_allclose(pm_y,
[0.310824, 0.313150, 0.315517] * u.arcsec,
atol=0.1*u.marcsec)
# Table behaves properly as a table (e.g. can be sliced)
assert len(iers_tab[:2]) == 2
@pytest.mark.skipif('not HAS_IERS_A')
class TestIERS_A():
def test_simple(self):
"""Test that open() by default reads a 'finals2000A.all' file."""
# Ensure we remove any cached table (gh-5131).
iers.IERS_A.close()
iers_tab = iers.IERS_A.open()
jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5])
ut1_utc, status = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status == iers.FROM_IERS_B)
assert_quantity_allclose(ut1_utc, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=0.1*u.ms)
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0., return_status=True)
assert status2 == iers.TIME_BEYOND_IERS_RANGE
tnow = Time.now()
ut1_utc3, status3 = iers_tab.ut1_utc(tnow, return_status=True)
assert status3 == iers.FROM_IERS_A_PREDICTION
assert ut1_utc3 != 0.
class TestIERS_Auto():
def setup_class(self):
"""Set up useful data for the tests.
"""
self.N = 40
self.ame = 30.0
self.iers_a_file_1 = get_pkg_data_filename(
os.path.join('data', 'finals2000A-2016-02-30-test'))
self.iers_a_file_2 = get_pkg_data_filename(
os.path.join('data', 'finals2000A-2016-04-30-test'))
self.iers_a_url_1 = Path(self.iers_a_file_1).as_uri()
self.iers_a_url_2 = Path(self.iers_a_file_2).as_uri()
self.t = Time.now() + TimeDelta(10, format='jd') * np.arange(self.N)
def teardown_method(self, method):
"""Run this after every test.
"""
iers.IERS_Auto.close()
def test_interpolate_error_formatting(self):
"""Regression test: make sure the error message in
IERS_Auto._check_interpolate_indices() is formatted correctly.
"""
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
with iers.conf.set_temp('iers_auto_url_mirror', self.iers_a_url_1):
with iers.conf.set_temp('auto_max_age', self.ame):
with pytest.raises(ValueError) as err:
iers_table = iers.IERS_Auto.open()
with warnings.catch_warnings():
# Ignoring this if it comes up -- IERS_Auto predictive
# values are older than 30.0 days but downloading the
# latest table did not find newer values
warnings.simplefilter('ignore', iers.IERSStaleWarning)
iers_table.ut1_utc(self.t.jd1, self.t.jd2)
assert str(err.value) == iers.INTERPOLATE_ERROR.format(self.ame)
def test_auto_max_age_none(self):
"""Make sure that iers.INTERPOLATE_ERROR's advice about setting
auto_max_age = None actually works.
"""
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
with iers.conf.set_temp('auto_max_age', None):
iers_table = iers.IERS_Auto.open()
delta = iers_table.ut1_utc(self.t.jd1, self.t.jd2)
assert isinstance(delta, np.ndarray)
assert delta.shape == (self.N,)
assert_quantity_allclose(delta, np.array([-0.2246227]*self.N)*u.s)
def test_auto_max_age_minimum(self):
"""Check that the minimum auto_max_age is enforced.
"""
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
with iers.conf.set_temp('auto_max_age', 5.0):
with pytest.raises(ValueError) as err:
iers_table = iers.IERS_Auto.open()
_ = iers_table.ut1_utc(self.t.jd1, self.t.jd2)
assert str(err.value) == 'IERS auto_max_age configuration value must be larger than 10 days'
def test_no_auto_download(self):
with iers.conf.set_temp('auto_download', False):
t = iers.IERS_Auto.open()
assert type(t) is iers.IERS_B
@pytest.mark.remote_data
def test_simple(self):
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
dat = iers.IERS_Auto.open()
assert dat['MJD'][0] == 57359.0 * u.d
assert dat['MJD'][-1] == 57539.0 * u.d
# Pretend we are accessing at a time 7 days after start of predictive data
predictive_mjd = dat.meta['predictive_mjd']
dat._time_now = Time(predictive_mjd, format='mjd') + 7 * u.d
# Look at times before and after the test file begins. 0.1292905 is
# the IERS-B value from MJD=57359. The value in
# finals2000A-2016-02-30-test has been replaced at this point.
assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1293286)
assert np.allclose(dat.ut1_utc(Time(60000, format='mjd').jd).value, -0.2246227)
# Now pretend we are accessing at time 60 days after start of predictive data.
# There will be a warning when downloading the file doesn't give new data
# and an exception when extrapolating into the future with insufficient data.
dat._time_now = Time(predictive_mjd, format='mjd') + 60 * u.d
assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1293286)
with pytest.warns(iers.IERSStaleWarning, match='IERS_Auto predictive '
'values are older') as warns, \
pytest.raises(ValueError, match='interpolating from IERS_Auto '
'using predictive values'):
dat.ut1_utc(Time(60000, format='mjd').jd)
assert len(warns) == 1
# Warning only if we are getting return status
with pytest.warns(iers.IERSStaleWarning, match='IERS_Auto '
'predictive values are older') as warns:
dat.ut1_utc(Time(60000, format='mjd').jd, return_status=True)
assert len(warns) == 1
# Now set auto_max_age = None which says that we don't care how old the
# available IERS-A file is. There should be no warnings or exceptions.
with iers.conf.set_temp('auto_max_age', None):
dat.ut1_utc(Time(60000, format='mjd').jd)
# Now point to a later file with same values but MJD increased by
# 60 days and see that things work. dat._time_now is still the same value
# as before, i.e. right around the start of predictive values for the new file.
# (In other words this is like downloading the latest file online right now).
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_2):
# Look at times before and after the test file begins. This forces a new download.
assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1293286)
assert np.allclose(dat.ut1_utc(Time(60000, format='mjd').jd).value, -0.3)
# Now the time range should be different.
assert dat['MJD'][0] == 57359.0 * u.d
assert dat['MJD'][-1] == (57539.0 + 60) * u.d
@pytest.mark.remote_data
def test_IERS_B_parameters_loading_into_IERS_Auto():
A = iers.IERS_Auto.open()
B = iers.IERS_B.open()
ok_A = A["MJD"] <= B["MJD"][-1]
assert not np.all(ok_A), "IERS B covers all of IERS A: should not happen"
# We only overwrite IERS_B values in the IERS_A table that were already
# there in the first place. Better take that into account.
ok_A &= np.isfinite(A["UT1_UTC_B"])
i_B = np.searchsorted(B["MJD"], A["MJD"][ok_A])
assert np.all(np.diff(i_B) == 1), "Valid region not contiguous"
assert np.all(A["MJD"][ok_A] == B["MJD"][i_B])
# Check that values are copied correctly. Since units are not
# necessarily the same, we use allclose with very strict tolerance.
for name in ("UT1_UTC", "PM_x", "PM_y", "dX_2000A", "dY_2000A"):
assert_quantity_allclose(
A[name][ok_A], B[name][i_B], rtol=1e-15,
err_msg=("Bug #9206 IERS B parameter {} not copied over "
"correctly to IERS Auto".format(name)))
# Issue with FTP, rework test into previous one when it's fixed
@pytest.mark.skipif("TRAVIS", reason="Flaky on Travis CI")
@pytest.mark.remote_data
def test_iers_a_dl():
iersa_tab = iers.IERS_A.open(iers.IERS_A_URL, cache=False)
try:
# some basic checks to ensure the format makes sense
assert len(iersa_tab) > 0
assert 'UT1_UTC_A' in iersa_tab.colnames
finally:
iers.IERS_A.close()
@pytest.mark.remote_data
def test_iers_a_dl_mirror():
iersa_tab = iers.IERS_A.open(iers.IERS_A_URL_MIRROR, cache=False)
try:
# some basic checks to ensure the format makes sense
assert len(iersa_tab) > 0
assert 'UT1_UTC_A' in iersa_tab.colnames
finally:
iers.IERS_A.close()
@pytest.mark.remote_data
def test_iers_b_dl():
iersb_tab = iers.IERS_B.open(iers.IERS_B_URL, cache=False)
try:
# some basic checks to ensure the format makes sense
assert len(iersb_tab) > 0
assert 'UT1_UTC' in iersb_tab.colnames
finally:
iers.IERS_B.close()
| []
| []
| [
"TRAVIS"
]
| [] | ["TRAVIS"] | python | 1 | 0 | |
avalon/pipeline.py | """Core pipeline functionality"""
import os
import sys
import json
import errno
import types
import shutil
import getpass
import logging
import weakref
import inspect
import traceback
import importlib
from collections import OrderedDict
from . import (
io,
lib,
Session,
_registered_host,
_registered_root,
_registered_config,
_registered_plugins,
_registered_plugin_paths,
_registered_event_handlers,
)
from .vendor import six
self = sys.modules[__name__]
self._is_installed = False
self._config = None
self.data = {}
log = logging.getLogger(__name__)
AVALON_CONTAINER_ID = "pyblish.avalon.container"
class IncompatibleLoaderError(ValueError):
"""Error when Loader is incompatible with a representation."""
pass
def install(host):
"""Install `host` into the running Python session.
Arguments:
host (module): A Python module containing the Avalon
avalon host-interface.
"""
io.install()
missing = list()
for key in ("AVALON_PROJECT", "AVALON_ASSET"):
if key not in Session:
missing.append(key)
assert not missing, (
"%s missing from environment, %s" % (
", ".join(missing),
json.dumps(Session, indent=4, sort_keys=True)
))
project = Session["AVALON_PROJECT"]
log.info("Activating %s.." % project)
config = find_config()
# Optional host install function
if hasattr(host, "install"):
host.install(config)
register_host(host)
register_config(config)
config.install()
self._is_installed = True
self._config = config
log.info("Successfully installed Avalon!")
def find_config():
log.info("Finding configuration for project..")
config = Session["AVALON_CONFIG"]
if not config:
raise EnvironmentError("No configuration found in "
"the project nor environment")
log.info("Found %s, loading.." % config)
return importlib.import_module(config)
def uninstall():
"""Undo all of what `install()` did"""
config = registered_config()
try:
registered_host().uninstall(config)
except AttributeError:
pass
try:
config.uninstall()
except AttributeError:
pass
deregister_host()
deregister_config()
io.uninstall()
log.info("Successfully uninstalled Avalon!")
def is_installed():
"""Return state of installation
Returns:
True if installed, False otherwise
"""
return self._is_installed
def publish():
"""Shorthand to publish from within host"""
from pyblish import util
return util.publish()
@lib.log
class Loader(list):
"""Load representation into host application
Arguments:
context (dict): avalon-core:context-1.0
name (str, optional): Use pre-defined name
namespace (str, optional): Use pre-defined namespace
.. versionadded:: 4.0
This class was introduced
"""
families = list()
representations = list()
order = 0
def __init__(self, context):
template = context["project"]["config"]["template"]["publish"]
data = {
key: value["name"]
for key, value in context.items()
}
data["root"] = registered_root()
data["silo"] = context["asset"]["silo"]
fname = template.format(**data)
self.fname = fname
def load(self, context, name=None, namespace=None, options=None):
"""Load asset via database
Arguments:
context (dict): Full parenthood of representation to load
name (str, optional): Use pre-defined name
namespace (str, optional): Use pre-defined namespace
options (dict, optional): Additional settings dictionary
"""
raise NotImplementedError("Loader.load() must be "
"implemented by subclass")
def update(self, container, representation):
"""Update `container` to `representation`
Arguments:
container (avalon-core:container-1.0): Container to update,
from `host.ls()`.
representation (dict): Update the container to this representation.
"""
raise NotImplementedError("Loader.update() must be "
"implemented by subclass")
def remove(self, container):
"""Remove a container
Arguments:
container (avalon-core:container-1.0): Container to remove,
from `host.ls()`.
Returns:
bool: Whether the container was deleted
"""
raise NotImplementedError("Loader.remove() must be "
"implemented by subclass")
@lib.log
class Creator(object):
"""Determine how assets are created"""
label = None
family = None
defaults = None
def __init__(self, name, asset, options=None, data=None):
self.name = name # For backwards compatibility
self.options = options
# Default data
self.data = OrderedDict()
self.data["id"] = "pyblish.avalon.instance"
self.data["family"] = self.family
self.data["asset"] = asset
self.data["subset"] = name
self.data["active"] = True
self.data.update(data or {})
def process(self):
pass
@lib.log
class Action(object):
"""A custom action available"""
name = None
label = None
icon = None
color = None
order = 0
def is_compatible(self, session):
"""Return whether the class is compatible with the Session."""
return True
def process(self, session, **kwargs):
pass
class InventoryAction(object):
"""A custom action for the scene inventory tool
If registered the action will be visible in the Right Mouse Button menu
under the submenu "Actions".
"""
label = None
icon = None
color = None
order = 0
@staticmethod
def is_compatible(container):
"""Override function in a custom class
This method is specifically used to ensure the action can operate on
the container.
Args:
container(dict): the data of a loaded asset, see host.ls()
Returns:
bool
"""
return bool(container.get("objectName"))
def process(self, containers):
"""Override function in a custom class
This method will receive all containers even those which are
incompatible. It is advised to create a small filter along the lines
of this example:
valid_containers = filter(self.is_compatible(c) for c in containers)
The return value will need to be a True-ish value to trigger
the data_changed signal in order to refresh the view.
You can return a list of container names to trigger GUI to select
treeview items.
You can return a dict to carry extra GUI options. For example:
{
"objectNames": [container names...],
"options": {"mode": "toggle",
"clear": False}
}
Currently workable GUI options are:
- clear (bool): Clear current selection before selecting by action.
Default `True`.
- mode (str): selection mode, use one of these:
"select", "deselect", "toggle". Default is "select".
Args:
containers (list): list of dictionaries
Return:
bool, list or dict
"""
return True
class Application(Action):
"""Default application launcher
This is a convenience application Action that when "config" refers to a
parsed application `.toml` this can launch the application.
"""
config = None
def is_compatible(self, session):
required = ["AVALON_PROJECTS",
"AVALON_PROJECT",
"AVALON_SILO",
"AVALON_ASSET",
"AVALON_TASK"]
missing = [x for x in required if x not in session]
if missing:
self.log.debug("Missing keys: %s" % (missing,))
return False
return True
def environ(self, session):
"""Build application environment"""
session = session.copy()
session["AVALON_APP"] = self.config["application_dir"]
session["AVALON_APP_NAME"] = self.name
# Compute work directory
project = io.find_one({"type": "project"})
template = project["config"]["template"]["work"]
workdir = _format_work_template(template, session)
session["AVALON_WORKDIR"] = workdir
# Construct application environment from .toml config
app_environment = self.config.get("environment", {})
for key, value in app_environment.copy().items():
if isinstance(value, list):
# Treat list values as paths, e.g. PYTHONPATH=[]
app_environment[key] = os.pathsep.join(value)
elif isinstance(value, six.string_types):
if lib.PY2:
# Protect against unicode in the environment
encoding = sys.getfilesystemencoding()
app_environment[key] = value.encode(encoding)
else:
app_environment[key] = value
else:
log.error(
"%s: Unsupported environment reference in %s for %s"
% (value, self.name, key)
)
# Build environment
env = os.environ.copy()
env.update(session)
app_environment = self._format(app_environment, **env)
env.update(app_environment)
return env
def initialize(self, environment):
"""Initialize work directory"""
# Create working directory
workdir = environment["AVALON_WORKDIR"]
workdir_existed = os.path.exists(workdir)
if not workdir_existed:
os.makedirs(workdir)
self.log.info("Creating working directory '%s'" % workdir)
# Create default directories from app configuration
default_dirs = self.config.get("default_dirs", [])
default_dirs = self._format(default_dirs, **environment)
if default_dirs:
self.log.debug("Creating default directories..")
for dirname in default_dirs:
try:
os.makedirs(os.path.join(workdir, dirname))
self.log.debug(" - %s" % dirname)
except OSError as e:
# An already existing default directory is fine.
if e.errno == errno.EEXIST:
pass
else:
raise
# Perform application copy
for src, dst in self.config.get("copy", {}).items():
dst = os.path.join(workdir, dst)
# Expand env vars
src, dst = self._format([src, dst], **environment)
try:
self.log.info("Copying %s -> %s" % (src, dst))
shutil.copy(src, dst)
except OSError as e:
self.log.error("Could not copy application file: %s" % e)
self.log.error(" - %s -> %s" % (src, dst))
def launch(self, environment):
executable = lib.which(self.config["executable"])
if executable is None:
raise ValueError(
"'%s' not found on your PATH\n%s"
% (self.config["executable"], os.getenv("PATH"))
)
args = self.config.get("args", [])
return lib.launch(
executable=executable,
args=args,
environment=environment,
cwd=environment["AVALON_WORKDIR"]
)
def process(self, session, **kwargs):
"""Process the full Application action"""
environment = self.environ(session)
if kwargs.get("initialize", True):
self.initialize(environment)
if kwargs.get("launch", True):
return self.launch(environment)
def _format(self, original, **kwargs):
"""Utility recursive dict formatting that logs the error clearly."""
try:
return lib.dict_format(original, **kwargs)
except KeyError as e:
log.error(
"One of the {variables} defined in the application "
"definition wasn't found in this session.\n"
"The variable was %s " % e
)
log.error(json.dumps(kwargs, indent=4, sort_keys=True))
raise ValueError(
"This is typically a bug in the pipeline, "
"ask your developer.")
def discover(superclass):
"""Find and return subclasses of `superclass`"""
registered = _registered_plugins.get(superclass, list())
plugins = dict()
# Include plug-ins from registered paths
for path in _registered_plugin_paths.get(superclass, list()):
for module in lib.modules_from_path(path):
for plugin in plugin_from_module(superclass, module):
if plugin.__name__ in plugins:
print("Duplicate plug-in found: %s" % plugin)
continue
plugins[plugin.__name__] = plugin
for plugin in registered:
if plugin.__name__ in plugins:
print("Warning: Overwriting %s" % plugin.__name__)
plugins[plugin.__name__] = plugin
return sorted(plugins.values(), key=lambda Plugin: Plugin.__name__)
def plugin_from_module(superclass, module):
"""Return plug-ins from module
Arguments:
superclass (superclass): Superclass of subclasses to look for
module (types.ModuleType): Imported module from which to
parse valid Avalon plug-ins.
Returns:
List of plug-ins, or empty list if none is found.
"""
types = list()
def recursive_bases(klass):
r = []
bases = klass.__bases__
r.extend(bases)
for base in bases:
r.extend(recursive_bases(base))
return r
for name in dir(module):
# It could be anything at this point
obj = getattr(module, name)
if not inspect.isclass(obj):
continue
# These are subclassed from nothing, not even `object`
if not len(obj.__bases__) > 0:
continue
# Use string comparison rather than `issubclass`
# in order to support reloading of this module.
bases = recursive_bases(obj)
if not any(base.__name__ == superclass.__name__ for base in bases):
continue
types.append(obj)
return types
def on(event, callback):
"""Call `callback` on `event`
Register `callback` to be run when `event` occurs.
Example:
>>> def on_init():
... print("Init happened")
...
>>> on("init", on_init)
>>> del on_init
Arguments:
event (str): Name of event
callback (callable): Any callable
"""
if event not in _registered_event_handlers:
_registered_event_handlers[event] = weakref.WeakSet()
events = _registered_event_handlers[event]
events.add(callback)
def before(event, callback):
"""Convenience to `on()` for before-events"""
on("before_" + event, callback)
def after(event, callback):
"""Convenience to `on()` for after-events"""
on("after_" + event, callback)
def emit(event, args=None):
"""Trigger an `event`
Example:
>>> def on_init():
... print("Init happened")
...
>>> on("init", on_init)
>>> emit("init")
Init happened
>>> del on_init
Arguments:
event (str): Name of event
args (list, optional): List of arguments passed to callback
"""
callbacks = _registered_event_handlers.get(event, set())
args = args or list()
for callback in callbacks:
try:
callback(*args)
except Exception:
log.warning(traceback.format_exc())
def register_plugin(superclass, obj):
"""Register an individual `obj` of type `superclass`
Arguments:
superclass (type): Superclass of plug-in
obj (object): Subclass of `superclass`
"""
if superclass not in _registered_plugins:
_registered_plugins[superclass] = list()
if obj not in _registered_plugins[superclass]:
_registered_plugins[superclass].append(obj)
def register_plugin_path(superclass, path):
"""Register a directory of one or more plug-ins
Arguments:
superclass (type): Superclass of plug-ins to look for during discovery
path (str): Absolute path to directory in which to discover plug-ins
"""
if superclass not in _registered_plugin_paths:
_registered_plugin_paths[superclass] = list()
path = os.path.normpath(path)
if path not in _registered_plugin_paths[superclass]:
_registered_plugin_paths[superclass].append(path)
def registered_plugin_paths():
"""Return all currently registered plug-in paths"""
# Prohibit editing in-place
duplicate = {
superclass: paths[:]
for superclass, paths in _registered_plugin_paths.items()
}
return duplicate
def deregister_plugin(superclass, plugin):
"""Oppsite of `register_plugin()`"""
_registered_plugins[superclass].remove(plugin)
def deregister_plugin_path(superclass, path):
"""Oppsite of `register_plugin_path()`"""
_registered_plugin_paths[superclass].remove(path)
def register_root(path):
"""Register currently active root"""
log.info("Registering root: %s" % path)
_registered_root["_"] = path
def registered_root():
"""Return currently registered root"""
return os.path.normpath(
_registered_root["_"] or
Session.get("AVALON_PROJECTS") or ""
)
def register_host(host):
"""Register a new host for the current process
Arguments:
host (ModuleType): A module implementing the
Host API interface. See the Host API
documentation for details on what is
required, or browse the source code.
"""
signatures = {
"ls": []
}
_validate_signature(host, signatures)
_registered_host["_"] = host
def register_config(config):
"""Register a new config for the current process
Arguments:
config (ModuleType): A module implementing the Config API.
"""
signatures = {
"install": [],
"uninstall": [],
}
_validate_signature(config, signatures)
_registered_config["_"] = config
def _validate_signature(module, signatures):
# Required signatures for each member
missing = list()
invalid = list()
success = True
for member in signatures:
if not hasattr(module, member):
missing.append(member)
success = False
else:
attr = getattr(module, member)
signature = inspect.getargspec(attr)[0]
required_signature = signatures[member]
assert isinstance(signature, list)
assert isinstance(required_signature, list)
if not all(member in signature
for member in required_signature):
invalid.append({
"member": member,
"signature": ", ".join(signature),
"required": ", ".join(required_signature)
})
success = False
if not success:
report = list()
if missing:
report.append(
"Incomplete interface for module: '%s'\n"
"Missing: %s" % (module, ", ".join(
"'%s'" % member for member in missing))
)
if invalid:
report.append(
"'%s': One or more members were found, but didn't "
"have the right argument signature." % module.__name__
)
for member in invalid:
report.append(
" Found: {member}({signature})".format(**member)
)
report.append(
" Expected: {member}({required})".format(**member)
)
raise ValueError("\n".join(report))
def deregister_config():
"""Undo `register_config()`"""
_registered_config["_"] = None
def registered_config():
"""Return currently registered config"""
return _registered_config["_"]
def registered_host():
"""Return currently registered host"""
return _registered_host["_"]
def deregister_host():
_registered_host["_"] = default_host()
def default_host():
"""A default host, in place of anything better
This may be considered as reference for the
interface a host must implement. It also ensures
that the system runs, even when nothing is there
to support it.
"""
host = types.ModuleType("defaultHost")
def ls():
return list()
host.__dict__.update({
"ls": ls
})
return host
def debug_host():
"""A debug host, useful to debugging features that depend on a host"""
host = types.ModuleType("debugHost")
def ls():
containers = [
{
"representation": "ee-ft-a-uuid1",
"schema": "avalon-core:container-1.0",
"name": "Bruce01",
"objectName": "Bruce01_node",
"namespace": "_bruce01_",
"version": 3,
},
{
"representation": "aa-bc-s-uuid2",
"schema": "avalon-core:container-1.0",
"name": "Bruce02",
"objectName": "Bruce01_node",
"namespace": "_bruce02_",
"version": 2,
}
]
for container in containers:
yield container
host.__dict__.update({
"ls": ls
})
return host
def create(name, asset, family, options=None, data=None):
"""Create a new instance
Associate nodes with a subset and family. These nodes are later
validated, according to their `family`, and integrated into the
shared environment, relative their `subset`.
Data relative each family, along with default data, are imprinted
into the resulting objectSet. This data is later used by extractors
and finally asset browsers to help identify the origin of the asset.
Arguments:
name (str): Name of subset
asset (str): Name of asset
family (str): Name of family
options (dict, optional): Additional options from GUI
data (dict, optional): Additional data from GUI
Raises:
NameError on `subset` already exists
KeyError on invalid dynamic property
RuntimeError on host error
Returns:
Name of instance
"""
host = registered_host()
plugins = list()
for Plugin in discover(Creator):
has_family = family == Plugin.family
if not has_family:
continue
Plugin.log.info(
"Creating '%s' with '%s'" % (name, Plugin.__name__)
)
try:
plugin = Plugin(name, asset, options, data)
with host.maintained_selection():
print("Running %s" % plugin)
instance = plugin.process()
except Exception as e:
log.warning(e)
continue
plugins.append(plugin)
assert plugins, "No Creator plug-ins were run, this is a bug"
return instance
def get_representation_context(representation):
"""Return parenthood context for representation.
Args:
representation (str or io.ObjectId or dict): The representation id
or full representation as returned by the database.
Returns:
dict: The full representation context.
"""
assert representation is not None, "This is a bug"
if isinstance(representation, (six.string_types, io.ObjectId)):
representation = io.find_one(
{"_id": io.ObjectId(str(representation))})
version, subset, asset, project = io.parenthood(representation)
assert all([representation, version, subset, asset, project]), (
"This is a bug"
)
context = {
"project": project,
"asset": asset,
"subset": subset,
"version": version,
"representation": representation,
}
return context
def update_current_task(task=None, asset=None, app=None):
"""Update active Session to a new task work area.
This updates the live Session to a different `asset`, `task` or `app`.
Args:
task (str): The task to set.
asset (str): The asset to set.
app (str): The app to set.
Returns:
dict: The changed key, values in the current Session.
"""
mapping = {
"AVALON_ASSET": asset,
"AVALON_TASK": task,
"AVALON_APP": app,
}
changed = {key: value for key, value in mapping.items() if value}
if not changed:
return
# Update silo when asset changed
if "AVALON_ASSET" in changed:
asset_document = io.find_one({"name": changed["AVALON_ASSET"],
"type": "asset"},
projection={"silo": True})
assert asset_document, "Asset must exist"
changed["AVALON_SILO"] = asset_document["silo"]
# Compute work directory (with the temporary changed session so far)
project = io.find_one({"type": "project"},
projection={"config.template.work": True})
template = project["config"]["template"]["work"]
_session = Session.copy()
_session.update(changed)
changed["AVALON_WORKDIR"] = _format_work_template(template, _session)
# Update the full session in one go to avoid half updates
Session.update(changed)
# Update the environment
os.environ.update(changed)
# Emit session change
emit("taskChanged", changed.copy())
return changed
def _format_work_template(template, session=None):
"""Return a formatted configuration template with a Session.
Note: This *cannot* format the templates for published files since the
session does not hold the context for a published file. Instead use
`get_representation_path` to parse the full path to a published file.
Args:
template (str): The template to format.
session (dict, Optional): The Session to use. If not provided use the
currently active global Session.
Returns:
str: The fully formatted path.
"""
if session is None:
session = Session
return template.format(**{
"root": registered_root(),
"project": session["AVALON_PROJECT"],
"silo": session["AVALON_SILO"],
"asset": session["AVALON_ASSET"],
"task": session["AVALON_TASK"],
"app": session["AVALON_APP"],
"user": session.get("AVALON_USER", getpass.getuser())
})
def _make_backwards_compatible_loader(Loader):
"""Convert a old-style Loaders with `process` method to new-style Loader
This will make a dynamic class inheriting the old-style loader together
with a BackwardsCompatibleLoader. This backwards compatible loader will
expose `load`, `remove` and `update` in the same old way for Maya loaders.
The `load` method will then call `process()` just like before.
"""
# Assume new-style loader when no `process` method is exposed
# then we don't swap the loader with a backwards compatible one.
if not hasattr(Loader, "process"):
return Loader
log.warning("Making loader backwards compatible: %s", Loader.__name__)
from avalon.maya.compat import BackwardsCompatibleLoader
return type(Loader.__name__, (BackwardsCompatibleLoader, Loader), {})
def load(Loader, representation, namespace=None, name=None, options=None,
**kwargs):
"""Use Loader to load a representation.
Args:
Loader (Loader): The loader class to trigger.
representation (str or io.ObjectId or dict): The representation id
or full representation as returned by the database.
namespace (str, Optional): The namespace to assign. Defaults to None.
name (str, Optional): The name to assign. Defaults to subset name.
options (dict, Optional): Additional options to pass on to the loader.
Returns:
The return of the `loader.load()` method.
Raises:
IncompatibleLoaderError: When the loader is not compatible with
the representation.
"""
Loader = _make_backwards_compatible_loader(Loader)
context = get_representation_context(representation)
# Ensure the Loader is compatible for the representation
if not is_compatible_loader(Loader, context):
raise IncompatibleLoaderError("Loader {} is incompatible with "
"{}".format(Loader.__name__,
context["subset"]["name"]))
# Ensure options is a dictionary when no explicit options provided
if options is None:
options = kwargs.get("data", dict()) # "data" for backward compat
assert isinstance(options, dict), "Options must be a dictionary"
# Fallback to subset when name is None
if name is None:
name = context["subset"]["name"]
log.info(
"Running '%s' on '%s'" % (Loader.__name__, context["asset"]["name"])
)
loader = Loader(context)
return loader.load(context, name, namespace, options)
def _get_container_loader(container):
"""Return the Loader corresponding to the container"""
loader = container["loader"]
for Plugin in discover(Loader):
# TODO: Ensure the loader is valid
if Plugin.__name__ == loader:
return Plugin
def remove(container):
"""Remove a container"""
Loader = _get_container_loader(container)
if not Loader:
raise RuntimeError("Can't remove container. See log for details.")
Loader = _make_backwards_compatible_loader(Loader)
loader = Loader(get_representation_context(container["representation"]))
return loader.remove(container)
def update(container, version=-1):
"""Update a container"""
# Compute the different version from 'representation'
current_representation = io.find_one({
"_id": io.ObjectId(container["representation"])
})
assert current_representation is not None, "This is a bug"
current_version, subset, asset, project = io.parenthood(
current_representation)
if version == -1:
new_version = io.find_one({
"type": "version",
"parent": subset["_id"]
}, sort=[("name", -1)])
else:
new_version = io.find_one({
"type": "version",
"parent": subset["_id"],
"name": version,
})
assert new_version is not None, "This is a bug"
new_representation = io.find_one({
"type": "representation",
"parent": new_version["_id"],
"name": current_representation["name"]
})
# Run update on the Loader for this container
Loader = _get_container_loader(container)
if not Loader:
raise RuntimeError("Can't update container. See log for details.")
Loader = _make_backwards_compatible_loader(Loader)
loader = Loader(get_representation_context(container["representation"]))
return loader.update(container, new_representation)
def switch(container, representation):
"""Switch a container to representation
Args:
container (dict): container information
representation (dict): representation data from document
Returns:
function call
"""
# Get the Loader for this container
Loader = _get_container_loader(container)
if not Loader:
raise RuntimeError("Can't switch container. See log for details.")
if not hasattr(Loader, "switch"):
# Backwards compatibility (classes without switch support
# might be better to just have "switch" raise NotImplementedError
# on the base class of Loader\
raise RuntimeError("Loader '{}' does not support 'switch'".format(
Loader.label
))
# Get the new representation to switch to
new_representation = io.find_one({
"type": "representation",
"_id": representation["_id"],
})
new_context = get_representation_context(new_representation)
assert is_compatible_loader(Loader, new_context), ("Must be compatible "
"Loader")
Loader = _make_backwards_compatible_loader(Loader)
loader = Loader(new_context)
return loader.switch(container, new_representation)
def get_representation_path(representation):
"""Get filename from representation document
Args:
representation(dict): representation document from the database
Returns:
str: fullpath of the representation
"""
version_, subset, asset, project = io.parenthood(representation)
template_publish = project["config"]["template"]["publish"]
return template_publish.format(**{
"root": registered_root(),
"project": project["name"],
"asset": asset["name"],
"silo": asset["silo"],
"subset": subset["name"],
"version": version_["name"],
"representation": representation["name"],
"user": Session.get("AVALON_USER", getpass.getuser()),
"app": Session.get("AVALON_APP", ""),
"task": Session.get("AVALON_TASK", "")
})
def is_compatible_loader(Loader, context):
"""Return whether a loader is compatible with a context.
This checks the version's families and the representation for the given
Loader.
Returns:
bool
"""
families = context["version"]["data"]["families"]
representation = context["representation"]
has_family = ("*" in Loader.families or
any(family in Loader.families for family in families))
has_representation = ("*" in Loader.representations or
representation["name"] in Loader.representations)
return has_family and has_representation
def loaders_from_representation(loaders, representation):
"""Return all compatible loaders for a representation."""
context = get_representation_context(representation)
return [l for l in loaders if is_compatible_loader(l, context)]
| []
| []
| [
"PATH"
]
| [] | ["PATH"] | python | 1 | 0 | |
pkg/sql/executor.go | // Copyright 2019 The SQLFlow Authors. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sql
import (
"bytes"
"database/sql"
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path"
"strings"
"sync"
"time"
pb "sqlflow.org/sqlflow/pkg/server/proto"
"sqlflow.org/sqlflow/pkg/sql/codegen/tensorflow"
"sqlflow.org/sqlflow/pkg/sql/codegen/xgboost"
)
// Run executes a SQL query and returns a stream of rows or messages
func Run(slct string, db *DB, modelDir string, session *pb.Session) *PipeReader {
splittedSQL, err := splitExtendedSQL(slct)
if err != nil {
rd, wr := Pipe()
// return the lexer error message to client side
go func() {
defer wr.Close()
wr.Write(err)
}()
return rd
}
if len(splittedSQL) == 2 {
return runExtendedSQL(slct, db, modelDir, session)
}
return runStandardSQL(slct, db)
}
// splitExtendedSQL splits an extended select statement into
// its select clause and the rest. For example,
//
// input:
// "select ... train ... with ..."
// output:
// ["select ...", "train ... with ..."].
//
// input:
// "select ... predict ... using ..."
// output:
// ["select ...", "predict ... using ..."].
//
// input:
// "select ..."
// output:
// ["select ..."]
func splitExtendedSQL(slct string) ([]string, error) {
l := newLexer(slct)
var n sqlSymType
var typ []int
var pos []int
for {
t := l.Lex(&n)
if t < 0 {
return []string{}, fmt.Errorf("Lex: Unknown problem %s", slct[0-t:])
}
if t == 0 {
break
}
typ = append(typ, t)
pos = append(pos, l.pos)
}
for i := 1; i < len(typ)-2; i++ {
if (typ[i] == TRAIN && typ[i+1] == IDENT && typ[i+2] == WITH) ||
(typ[i] == PREDICT && typ[i+1] == IDENT && typ[i+2] == USING) ||
(typ[i] == PREDICT && typ[i+1] == IDENT && typ[i+2] == WITH) ||
(typ[i] == ANALYZE && typ[i+1] == IDENT && typ[i+2] == WITH) ||
(typ[i] == ANALYZE && typ[i+1] == IDENT && typ[i+2] == USING) {
return []string{slct[:pos[i-1]], slct[pos[i-1]:]}, nil
}
}
return []string{slct}, nil
}
// SplitMultipleSQL returns a list of SQL statements if the input statements contains mutiple
// SQL statements separated by ;
func SplitMultipleSQL(statements string) ([]string, error) {
l := newLexer(statements)
var n sqlSymType
var sqlList []string
splitPos := 0
for {
t := l.Lex(&n)
if t < 0 {
return []string{}, fmt.Errorf("Lex: Unknown problem %s", statements[0-t:])
}
if t == 0 {
if len(sqlList) == 0 {
// NOTE: this line support executing SQL statement without a trailing ";"
sqlList = append(sqlList, statements)
}
break
}
if t == ';' {
splited := statements[splitPos:l.pos]
splited = strings.TrimSpace(splited)
sqlList = append(sqlList, splited)
splitPos = l.pos
}
}
return sqlList, nil
}
// TODO(weiguo): isQuery is a hacky way to decide which API to call:
// https://golang.org/pkg/database/sql/#DB.Exec .
// We will need to extend our parser to be a full SQL parser in the future.
func isQuery(slct string) bool {
s := strings.ToUpper(strings.TrimSpace(slct))
has := strings.Contains
if strings.HasPrefix(s, "SELECT") && !has(s, "INTO") {
return true
}
if strings.HasPrefix(s, "SHOW") && (has(s, "CREATE") || has(s, "DATABASES") || has(s, "TABLES")) {
return true
}
if strings.HasPrefix(s, "DESCRIBE") {
return true
}
return false
}
func runStandardSQL(slct string, db *DB) *PipeReader {
if isQuery(slct) {
return runQuery(slct, db)
}
return runExec(slct, db)
}
// query runs slct and writes the retrieved rows into pipe wr.
func query(slct string, db *DB, wr *PipeWriter) error {
defer func(startAt time.Time) {
log.Debugf("runQuery %v finished, elapsed:%v", slct, time.Since(startAt))
}(time.Now())
rows, err := db.Query(slct)
if err != nil {
return fmt.Errorf("runQuery failed: %v", err)
}
defer rows.Close()
columns, err := rows.Columns()
if err != nil {
return fmt.Errorf("failed to get columns: %v", err)
}
columnTypes, err := rows.ColumnTypes()
if err != nil {
return fmt.Errorf("failed to get columnTypes: %v", err)
}
header := make(map[string]interface{})
header["columnNames"] = columns
if e := wr.Write(header); e != nil {
return e
}
for rows.Next() {
if e := parseRow(columns, columnTypes, rows, wr); e != nil {
return e
}
}
return nil
}
// parseRow calls rows.Scan to retrieve the current row, and convert
// each cell value from {}interface to an accurary value. It then
// writes the converted row into wr.
func parseRow(columns []string, columnTypes []*sql.ColumnType, rows *sql.Rows, wr *PipeWriter) error {
// Since we don't know the table schema in advance, we create
// a slice of empty interface and add column types at
// runtime. Some databases support dynamic types between rows,
// such as sqlite's affinity. So we move columnTypes inside
// the row.Next() loop.
count := len(columns)
values := make([]interface{}, count)
for i, ct := range columnTypes {
v, e := createByType(ct.ScanType())
if e != nil {
return e
}
values[i] = v
}
if err := rows.Scan(values...); err != nil {
return err
}
row := make([]interface{}, count)
for i, val := range values {
v, e := parseVal(val)
if e != nil {
return e
}
row[i] = v
}
if e := wr.Write(row); e != nil {
return e
}
return nil
}
// runQeury creates a pipe before starting a goroutine that execute
// query, which runs slct and writes retrieved rows to a pipe.
// runQuery returns the read end of the pipe. The caller doesn't have
// to close the pipe because the query goroutine will close it after
// data retrieval.
func runQuery(slct string, db *DB) *PipeReader {
// FIXME(tony): how to deal with large tables?
// TODO(tony): test on null table elements
rd, wr := Pipe()
go func() {
defer wr.Close()
if e := query(slct, db, wr); e != nil {
log.Errorf("runQuery error:%v", e)
if e != ErrClosedPipe {
if err := wr.Write(e); err != nil {
log.Errorf("runQuery error(piping):%v", err)
}
}
}
}()
return rd
}
func runExec(slct string, db *DB) *PipeReader {
rd, wr := Pipe()
go func() {
defer wr.Close()
err := func() error {
defer func(startAt time.Time) {
log.Debugf("runEexc %v finished, elapsed:%v", slct, time.Since(startAt))
}(time.Now())
res, e := db.Exec(slct)
if e != nil {
return fmt.Errorf("runExec failed: %v", e)
}
affected, e := res.RowsAffected()
if e != nil {
return fmt.Errorf("failed to get affected row number: %v", e)
}
if affected > 1 {
return wr.Write(fmt.Sprintf("%d rows affected", affected))
}
// gomaxcompute does not return affected rows number
if affected < 0 {
return wr.Write("OK")
}
return wr.Write(fmt.Sprintf("%d row affected", affected))
}()
if err != nil {
log.Errorf("runExec error:%v", err)
if err != ErrClosedPipe {
if err := wr.Write(err); err != nil {
log.Errorf("runExec error(piping):%v", err)
}
}
}
}()
return rd
}
func isUnsupervisedLearning(pr *extendedSelect) bool {
// TODO(Yancey1989): It's an immature way to determinate whether it's a unsupservised learning model or not.
if pr.label == "" {
return true
}
return false
}
func runExtendedSQL(slct string, db *DB, modelDir string, session *pb.Session) *PipeReader {
rd, wr := Pipe()
go func() {
defer wr.Close()
err := func() error {
defer func(startAt time.Time) {
log.Debugf("runExtendedSQL %v finished, elapsed:%v", slct, time.Since(startAt))
}(time.Now())
pr, e := newParser().Parse(slct)
if e != nil {
return e
}
// NOTE: the temporary directory must be in a host directory
// which can be mounted to Docker containers. If I don't
// specify the "/tmp" prefix, ioutil.TempDir would by default
// generate a directory in /private/tmp for macOS, which
// cannot be mounted by Docker into the container. For more
// detailed, please refer to
// https://docs.docker.com/docker-for-mac/osxfs/#namespaces.
cwd, e := ioutil.TempDir("/tmp", "sqlflow")
if e != nil {
return e
}
defer os.RemoveAll(cwd)
if pr.train {
if os.Getenv("SQLFLOW_submitter") == "elasticdl" {
return elasticDLTrain(wr, pr, db, cwd, session, nil)
}
var ds *trainAndValDataset
if !isUnsupervisedLearning(pr) {
// TODO(weiguo): fix the hard code 0.8
if ds, e = newTrainAndValDataset(db, pr.standardSelect.String(), pr.standardSelect.tables[0], 0.8); e != nil {
return e
}
defer releaseTrainAndValDataset(db, ds)
}
// FIXME(weiguo): temporary branch to alps
if os.Getenv("SQLFLOW_submitter") == "alps" {
return alpsTrain(wr, pr, db, cwd, session, ds)
}
return train(wr, pr, db, cwd, modelDir, slct, session, ds)
}
if pr.analyze {
return analyze(wr, pr, db, cwd, modelDir)
}
// FIXME(weiguo): temporary branch to alps
if os.Getenv("SQLFLOW_submitter") == "alps" {
return alpsPred(wr, pr, db, cwd, session)
} else if os.Getenv("SQLFLOW_submitter") == "elasticdl" {
return elasticDLPredict(wr, pr, db, cwd, session, nil)
}
return pred(wr, pr, db, cwd, modelDir, session)
}()
if err != nil {
log.Errorf("runExtendedSQL error:%v", err)
if err != ErrClosedPipe {
if err := wr.Write(err); err != nil {
log.Errorf("runExtendedSQL error(piping):%v", err)
}
}
}
}()
return rd
}
type logChanWriter struct {
wr *PipeWriter
m sync.Mutex
buf bytes.Buffer
prev string
}
func (cw *logChanWriter) Write(p []byte) (n int, err error) {
// Both cmd.Stdout and cmd.Stderr are writing to cw
cw.m.Lock()
defer cw.m.Unlock()
n, err = cw.buf.Write(p)
if err != nil {
return n, err
}
for {
line, err := cw.buf.ReadString('\n')
cw.prev = cw.prev + line
// ReadString returns err != nil if and only if the returned Data
// does not end in delim.
if err != nil {
break
}
if err := cw.wr.Write(cw.prev); err != nil {
return len(cw.prev), err
}
cw.prev = ""
}
return n, nil
}
func (cw *logChanWriter) Close() {
if len(cw.prev) > 0 {
cw.wr.Write(cw.prev)
cw.prev = ""
}
}
func train(wr *PipeWriter, tr *extendedSelect, db *DB, cwd string, modelDir string, slct string, session *pb.Session, ds *trainAndValDataset) error {
_, e := verify(tr, db)
if e != nil {
return e
}
var program bytes.Buffer
if isXGBoostModel(tr.estimator) {
ir, err := generateTrainIR(tr, db.String())
if err != nil {
return err
}
err = InferFeatureColumns(ir)
if err != nil {
return err
}
code, err := xgboost.Train(ir)
if err != nil {
return err
}
program.WriteString(code)
} else {
ir, err := generateTrainIR(tr, db.String())
if err != nil {
return err
}
err = InferFeatureColumns(ir)
if err != nil {
return err
}
// TODO(typhoonzero): change to use validation clause to fill in ir.ValidationSelect
// Clustering model will have ds == nil
if ds == nil {
ir.ValidationSelect = ir.Select
} else {
ir.ValidationSelect = fmt.Sprintf("SELECT * FROM %s", ds.validation)
}
code, err := tensorflow.Train(ir)
if err != nil {
return err
}
program.WriteString(code)
}
cw := &logChanWriter{wr: wr}
var buf bytes.Buffer
buf.WriteString(fmt.Sprintf("\n==========Program======\n%s\n=======Program Output===========\n", program.String()))
w := io.MultiWriter(cw, &buf)
defer cw.Close()
cmd := sqlflowCmd(cwd, db.driverName)
cmd.Stdin = &program
cmd.Stdout = w
cmd.Stderr = w
if e := cmd.Run(); e != nil {
return fmt.Errorf("predict failed: %v\n %s", e, buf.String())
}
m := model{workDir: cwd, TrainSelect: slct}
if modelDir != "" {
return m.saveTar(modelDir, tr.save)
}
return m.save(db, tr.save)
}
func loadModelMeta(pr *extendedSelect, db *DB, cwd, modelDir, modelName string) (*extendedSelect, fieldTypes, error) {
var m *model
var e error
if modelDir != "" {
m, e = loadTar(modelDir, cwd, modelName)
} else {
m, e = load(db, modelName, cwd)
}
if e != nil {
return nil, nil, fmt.Errorf("load %v", e)
}
// Parse the training SELECT statement used to train
// the model for the prediction.
tr, e := newParser().Parse(m.TrainSelect)
if e != nil {
return nil, nil, fmt.Errorf("parse: TrainSelect %v raise %v", m.TrainSelect, e)
}
if e := verifyColumnNameAndType(tr, pr, db); e != nil {
return nil, nil, fmt.Errorf("verifyColumnNameAndType: %v", e)
}
pr.trainClause = tr.trainClause
fts, e := verify(pr, db)
if e != nil {
return nil, nil, fmt.Errorf("verify: %v", e)
}
return pr, fts, nil
}
func pred(wr *PipeWriter, pr *extendedSelect, db *DB, cwd string, modelDir string, session *pb.Session) error {
pr, _, e := loadModelMeta(pr, db, cwd, modelDir, pr.model)
if e != nil {
return fmt.Errorf("loadModelMeta %v", e)
}
var program bytes.Buffer
if isXGBoostModel(pr.estimator) {
ir, err := generatePredictIR(pr, db.String(), cwd, modelDir)
if err != nil {
return err
}
code, err := xgboost.Pred(ir, session)
if err != nil {
return err
}
err = createPredictionTable(pr, db, session)
if err != nil {
return err
}
program.WriteString(code)
} else {
ir, err := generatePredictIR(pr, db.String(), cwd, modelDir)
if err != nil {
return err
}
err = InferFeatureColumns(ir.TrainIR)
if err != nil {
return err
}
code, err := tensorflow.Pred(ir, session)
if err != nil {
return err
}
err = createPredictionTable(pr, db, session)
if err != nil {
return err
}
program.WriteString(code)
}
var buf bytes.Buffer
buf.WriteString(fmt.Sprintf("\n==========Program======\n%s\n=======Program Output===========\n", program.String()))
cw := &logChanWriter{wr: wr}
w := io.MultiWriter(cw, &buf)
defer cw.Close()
cmd := sqlflowCmd(cwd, db.driverName)
cmd.Env = append(os.Environ())
cmd.Stdin = &program
cmd.Stdout = w
cmd.Stderr = w
if e := cmd.Run(); e != nil {
return fmt.Errorf("predict failed: %v\n %s", e, buf.String())
}
return nil
}
func analyze(wr *PipeWriter, pr *extendedSelect, db *DB, cwd, modelDir string) error {
cmd := exec.Command("python", "-u")
cmd.Dir = cwd
ir, err := generateAnalyzeIR(pr, db.String(), cwd, modelDir)
if err != nil {
return err
}
if !strings.HasPrefix(strings.ToUpper(ir.TrainIR.Estimator), `XGBOOST.`) {
return fmt.Errorf("unsupported model %s", ir.TrainIR.Estimator)
}
code, err := xgboost.Analyze(ir)
if err != nil {
return err
}
var program bytes.Buffer
program.WriteString(code)
cmd.Stdin = &program
if _, err := cmd.CombinedOutput(); err != nil {
return err
}
imgFile, err := os.Open(path.Join(cwd, "summary.png"))
if err != nil {
return err
}
defer imgFile.Close()
imgBytes, err := ioutil.ReadAll(imgFile)
if err != nil {
return err
}
imgBase64Str := base64.StdEncoding.EncodeToString(imgBytes)
img2html := fmt.Sprintf("<div align='center'><img src='data:image/png;base64,%s' /></div>", imgBase64Str)
wr.Write(img2html)
return nil
}
// Create prediction table with appropriate column type.
// If prediction table already exists, it will be overwritten.
func createPredictionTable(predParsed *extendedSelect, db *DB, session *pb.Session) error {
tableName, columnName, e := parseTableColumn(predParsed.into)
if e != nil {
return fmt.Errorf("invalid predParsed.into, %v", e)
}
dropStmt := fmt.Sprintf("drop table if exists %s;", tableName)
if _, e := db.Exec(dropStmt); e != nil {
return fmt.Errorf("failed executing %s: %q", dropStmt, e)
}
fts, e := verify(predParsed, db)
if e != nil {
return e
}
var b bytes.Buffer
fmt.Fprintf(&b, "create table %s (", tableName)
for _, c := range predParsed.columns["feature_columns"] {
name, err := getExpressionFieldName(c)
if err != nil {
return err
}
typ, ok := fts.get(name)
if !ok {
return fmt.Errorf("createPredictionTable: Cannot find type of field %s", name)
}
stype, e := universalizeColumnType(db.driverName, typ)
if e != nil {
return e
}
fmt.Fprintf(&b, "%s %s, ", name, stype)
}
// TODO(Yancey1989): For the current implementation, the prediction result column
// type is derivated by the pred-select-statement, the better way is derivating
// the result column type by the prediction result.
typ, ok := fts.get(columnName)
if !ok {
// NOTE(typhoonzero): Clustering model may not have label in select statement, default use INT type
typ = "INT"
}
stype, e := universalizeColumnType(db.driverName, typ)
if e != nil {
return e
}
if db.driverName == "hive" {
fmt.Fprintf(&b, "%s %s) ROW FORMAT DELIMITED FIELDS TERMINATED BY \"\\001\" STORED AS TEXTFILE;", columnName, stype)
} else {
fmt.Fprintf(&b, "%s %s);", columnName, stype)
}
createStmt := b.String()
if _, e := db.Exec(createStmt); e != nil {
return fmt.Errorf("failed executing %s: %q", createStmt, e)
}
return nil
}
// -------------------------- utilities --------------------------------------
func isXGBoostModel(estimator string) bool {
return strings.HasPrefix(strings.ToUpper(estimator), `XGBOOST.`)
}
func enableIR() bool {
return os.Getenv("SQLFLOW_codegen") == "ir"
}
func parseTableColumn(s string) (string, string, error) {
pos := strings.LastIndex(s, ".")
if pos == -1 || pos == len(s)-1 {
return "", "", fmt.Errorf("can not separate %s to table and column", s)
}
return s[:pos], s[pos+1:], nil
}
| [
"\"SQLFLOW_submitter\"",
"\"SQLFLOW_submitter\"",
"\"SQLFLOW_submitter\"",
"\"SQLFLOW_submitter\"",
"\"SQLFLOW_codegen\""
]
| []
| [
"SQLFLOW_submitter",
"SQLFLOW_codegen"
]
| [] | ["SQLFLOW_submitter", "SQLFLOW_codegen"] | go | 2 | 0 | |
scripts/floating_ip_test_resource.py | import fixtures
import testtools
import os
from connections import ContrailConnections
from contrail_test_init import *
from vn_test import *
from vm_test import *
from quantum_test import *
from vnc_api_test import *
from nova_test import *
from testresources import OptimisingTestSuite, TestResource
class SolnSetup(fixtures.Fixture):
def __init__(self, test_resource):
super(SolnSetup, self).__init__()
self.test_resource = test_resource
def setUp(self):
super(SolnSetup, self).setUp()
if 'PARAMS_FILE' in os.environ:
self.ini_file = os.environ.get('PARAMS_FILE')
else:
self.ini_file = 'params.ini'
self.inputs = self.useFixture(ContrailTestInit(self.ini_file))
self.connections = ContrailConnections(self.inputs)
self.quantum_fixture = self.connections.quantum_fixture
self.nova_fixture = self.connections.nova_fixture
self.vnc_lib = self.connections.vnc_lib
self.logger = self.inputs.logger
self.setup_common_objects()
return self
# end setUp
def setup_common_objects(self):
(self.vn1_name, self.vn1_subnets) = ("vn1", ["11.1.1.0/24"])
(self.vn2_name, self.vn2_subnets) = ("vn2", ["22.1.1.0/24"])
(self.fvn_public_name, self.fvn_public_subnets) = (
"fip_vn_public", ['10.204.219.16/28'])
(self.fvn1_name, self.fvn1_subnets) = ("fip_vn1", ['100.1.1.0/24'])
(self.fvn2_name, self.fvn2_subnets) = ("fip_vn2", ['200.1.1.0/24'])
(self.fvn3_name, self.fvn3_subnets) = ("fip_vn3", ['170.1.1.0/29'])
(self.vn1_vm1_name, self.vn1_vm2_name) = ('vn1_vm1', 'vn1_vm2')
(self.vn2_vm1_name, self.vn2_vm2_name) = ('vn2_vm1', 'vn2_vm2')
(self.fvn_public_vm1_name) = ('fvn_public_vm1')
(self.fvn1_vm1_name) = ('fvn1_vm1')
(self.fvn2_vm1_name) = ('fvn2_vm1')
(self.fvn3_vm1_name) = ('fvn3_vm1')
(self.vn1_vm1_traffic_name) = 'VN1_VM1_traffic'
(self.fvn1_vm1_traffic_name) = 'FVN1_VM1_traffic'
# Get all compute host
host_list = []
for host in self.inputs.compute_ips:
host_list.append(self.inputs.host_data[host]['name'])
compute_1 = host_list[0]
compute_2 = host_list[0]
if len(host_list) > 1:
compute_1 = host_list[0]
compute_2 = host_list[1]
# Configure 6 VNs, 4 of them being Floating-VN
self.vn1_fixture = self.useFixture(
VNFixture(project_name=self.inputs.project_name,
connections=self.connections, inputs=self.inputs, vn_name=self.vn1_name, subnets=self.vn1_subnets))
self.vn2_fixture = self.useFixture(
VNFixture(project_name=self.inputs.project_name,
connections=self.connections, inputs=self.inputs, vn_name=self.vn2_name, subnets=self.vn2_subnets))
self.fvn_public_fixture = self.useFixture(VNFixture(
project_name=self.inputs.project_name, connections=self.connections, inputs=self.inputs, vn_name=self.fvn_public_name, subnets=self.fvn_public_subnets))
self.fvn1_fixture = self.useFixture(
VNFixture(project_name=self.inputs.project_name,
connections=self.connections, inputs=self.inputs, vn_name=self.fvn1_name, subnets=self.fvn1_subnets))
self.fvn2_fixture = self.useFixture(
VNFixture(project_name=self.inputs.project_name,
connections=self.connections, inputs=self.inputs, vn_name=self.fvn2_name, subnets=self.fvn2_subnets))
self.fvn3_fixture = self.useFixture(
VNFixture(project_name=self.inputs.project_name,
connections=self.connections, inputs=self.inputs, vn_name=self.fvn3_name, subnets=self.fvn3_subnets))
# Configure 2 VMs in VN1, 2 VMs in VN2, 1 VM in FVN_PUBLIC, 1 VM in
# FVN1,FVN2 and FVN3 each
self.vn1_vm1_fixture = self.useFixture(
VMFixture(
project_name=self.inputs.project_name, connections=self.connections,
vn_obj=self.vn1_fixture.obj, vm_name=self.vn1_vm1_name, node_name=compute_1))
self.vn1_vm2_fixture = self.useFixture(
VMFixture(
project_name=self.inputs.project_name, connections=self.connections,
vn_obj=self.vn1_fixture.obj, vm_name=self.vn1_vm2_name, node_name=compute_2))
self.vn2_vm1_fixture = self.useFixture(
VMFixture(
project_name=self.inputs.project_name, connections=self.connections,
vn_obj=self.vn2_fixture.obj, vm_name=self.vn2_vm1_name, node_name=compute_2))
self.vn2_vm2_fixture = self.useFixture(
VMFixture(
project_name=self.inputs.project_name, connections=self.connections,
vn_obj=self.vn2_fixture.obj, vm_name=self.vn2_vm2_name, node_name=compute_1))
self.fvn_public_vm1_fixture = self.useFixture(VMFixture(
project_name=self.inputs.project_name, connections=self.connections, vn_obj=self.fvn_public_fixture.obj, vm_name=self.fvn_public_vm1_name))
self.fvn1_vm1_fixture = self.useFixture(
VMFixture(
project_name=self.inputs.project_name, connections=self.connections,
vn_obj=self.fvn1_fixture.obj, vm_name=self.fvn1_vm1_name, node_name=compute_2))
self.fvn2_vm1_fixture = self.useFixture(
VMFixture(
project_name=self.inputs.project_name, connections=self.connections,
vn_obj=self.fvn2_fixture.obj, vm_name=self.fvn2_vm1_name, node_name=compute_1))
self.fvn3_vm1_fixture = self.useFixture(
VMFixture(
project_name=self.inputs.project_name, connections=self.connections,
vn_obj=self.fvn3_fixture.obj, vm_name=self.fvn3_vm1_name, node_name=compute_2))
self.fvn1_vm1_traffic_fixture = self.useFixture(
VMFixture(
project_name=self.inputs.project_name, connections=self.connections, vn_obj=self.fvn1_fixture.obj,
flavor='contrail_flavor_small', image_name='ubuntu-traffic', vm_name=self.fvn1_vm1_traffic_name, node_name=compute_2))
self.vn1_vm1_traffic_fixture = self.useFixture(
VMFixture(
project_name=self.inputs.project_name, connections=self.connections, vn_obj=self.vn1_fixture.obj,
flavor='contrail_flavor_small', image_name='ubuntu-traffic', vm_name=self.vn1_vm1_traffic_name, node_name=compute_1))
# end setup_common_objects
def tearDown(self):
print "Tearing down resources"
super(SolnSetup, self).cleanUp()
def dirtied(self):
self.test_resource.dirtied(self)
class _SolnSetupResource(TestResource):
def make(self, dependencyresource):
base_setup = SolnSetup(self)
base_setup.setUp()
return base_setup
# end make
def clean(self, base_setup):
print "Am cleaning up here"
# super(_SolnSetupResource,self).clean()
base_setup.tearDown()
# end
SolnSetupResource = _SolnSetupResource()
| []
| []
| [
"PARAMS_FILE"
]
| [] | ["PARAMS_FILE"] | python | 1 | 0 | |
pkg/config/config.go | // Copyright (c) arkade author(s) 2020. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
package config
import (
"fmt"
"os"
"path"
)
func GetUserDir() string {
home := os.Getenv("HOME")
root := fmt.Sprintf("%s/.arkade/", home)
return root
}
func InitUserDir() (string, error) {
home := os.Getenv("HOME")
root := fmt.Sprintf("%s/.arkade/", home)
if len(home) == 0 {
return home, fmt.Errorf("env-var HOME, not set")
}
binPath := path.Join(root, "/bin/")
err := os.MkdirAll(binPath, 0700)
if err != nil {
return binPath, err
}
helmPath := path.Join(root, "/.helm/")
helmErr := os.MkdirAll(helmPath, 0700)
if helmErr != nil {
return helmPath, helmErr
}
return root, nil
}
| [
"\"HOME\"",
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
experimental/rules_python_external/extract_wheels/__init__.py | """extract_wheels
extract_wheels resolves and fetches artifacts transitively from the Python Package Index (PyPI) based on a
requirements.txt. It generates the required BUILD files to consume these packages as Python libraries.
Under the hood, it depends on the `pip wheel` command to do resolution, download, and compilation into wheels.
"""
import argparse
import glob
import os
import subprocess
import sys
import json
from experimental.rules_python_external.extract_wheels.lib import bazel, requirements
def configure_reproducible_wheels() -> None:
"""Modifies the environment to make wheel building reproducible.
Wheels created from sdists are not reproducible by default. We can however workaround this by
patching in some configuration with environment variables.
"""
# wheel, by default, enables debug symbols in GCC. This incidentally captures the build path in the .so file
# We can override this behavior by disabling debug symbols entirely.
# https://github.com/pypa/pip/issues/6505
if "CFLAGS" in os.environ:
os.environ["CFLAGS"] += " -g0"
else:
os.environ["CFLAGS"] = "-g0"
# set SOURCE_DATE_EPOCH to 1980 so that we can use python wheels
# https://github.com/NixOS/nixpkgs/blob/master/doc/languages-frameworks/python.section.md#python-setuppy-bdist_wheel-cannot-create-whl
if "SOURCE_DATE_EPOCH" not in os.environ:
os.environ["SOURCE_DATE_EPOCH"] = "315532800"
# Python wheel metadata files can be unstable.
# See https://bitbucket.org/pypa/wheel/pull-requests/74/make-the-output-of-metadata-files/diff
if "PYTHONHASHSEED" not in os.environ:
os.environ["PYTHONHASHSEED"] = "0"
def main() -> None:
"""Main program.
Exits zero on successful program termination, non-zero otherwise.
"""
configure_reproducible_wheels()
parser = argparse.ArgumentParser(
description="Resolve and fetch artifacts transitively from PyPI"
)
parser.add_argument(
"--requirements",
action="store",
required=True,
help="Path to requirements.txt from where to install dependencies",
)
parser.add_argument(
"--repo",
action="store",
required=True,
help="The external repo name to install dependencies. In the format '@{REPO_NAME}'",
)
parser.add_argument(
"--extra_pip_args", action="store", help="Extra arguments to pass down to pip.",
)
parser.add_argument(
"--pip_data_exclude",
action="store",
help="Additional data exclusion parameters to add to the pip packages BUILD file.",
)
parser.add_argument(
"--enable_implicit_namespace_pkgs",
action="store_true",
help="Disables conversion of implicit namespace packages into pkg-util style packages.",
)
args = parser.parse_args()
pip_args = [sys.executable, "-m", "pip", "wheel", "-r", args.requirements]
if args.extra_pip_args:
pip_args += json.loads(args.extra_pip_args)["args"]
# Assumes any errors are logged by pip so do nothing. This command will fail if pip fails
subprocess.run(pip_args, check=True)
extras = requirements.parse_extras(args.requirements)
if args.pip_data_exclude:
pip_data_exclude = json.loads(args.pip_data_exclude)["exclude"]
else:
pip_data_exclude = []
targets = [
'"%s%s"'
% (
args.repo,
bazel.extract_wheel(
whl, extras, pip_data_exclude, args.enable_implicit_namespace_pkgs
),
)
for whl in glob.glob("*.whl")
]
with open("requirements.bzl", "w") as requirement_file:
requirement_file.write(
bazel.generate_requirements_file_contents(args.repo, targets)
)
| []
| []
| [
"CFLAGS",
"PYTHONHASHSEED",
"SOURCE_DATE_EPOCH"
]
| [] | ["CFLAGS", "PYTHONHASHSEED", "SOURCE_DATE_EPOCH"] | python | 3 | 0 | |
curio/kernel.py | # curio/kernel.py
#
# Main execution kernel.
#
# Curio is based on a few overarching design principles that drive the code
# you'll find here.
#
# 1. Environmental Isolation.
#
# Curio strictly separates the environment of async and synchronous
# programming. All functionality related to async operation is
# placed in async-function definitions. Async functions request
# the services of the kernel using low-level yield statements
# (traps). The kernel is an opaque black-box from the perspective
# of synchronous code. There is only one available
# operation--run(coro) which runs a new task. There are no other
# mechanisms available for interacting with the kernel from
# synchronous code. A good analogy might be the distinction
# between user and protected mode in an OS. User programs run in
# user-mode and the operating system kernel runs in protected mode.
# The same thing happens here. User programs in Curio can only run
# in async functions. Those programs can request the services of
# the kernel. However, they're not granted any further access than
# that (there is no API surface or anything that can be used).
#
# 2. Microkernels
#
# The low-level kernel is meant to be small, fast, and minimally
# featureful. In fact, almost nothing interesting happens in the
# kernel. Instead, almost every useful part of Curio gets
# implemented in async functions found elsewhere. If you're trying
# to add new features to Curio, don't add them to the kernel. Think
# about how to create objects and functions that operate at the
# async-function level instead. See files such as sync.py or
# queue.py for examples.
#
# 3. Decoupling
#
# No part of Curio has direct linkage to the Kernel class (it's
# not imported or used anywhere else in the code base). If you want,
# you can make a completely custom Kernel object and have the
# rest of Curio run on it. You just need to make sure you implement
# the required traps. This is in contrast to libraries such as
# asyncio where many parts of the implementation are required to
# carry a reference to the underlying event loop.
__all__ = ['Kernel', 'run' ]
# -- Standard Library
import socket
import time
import os
import errno
from selectors import DefaultSelector, EVENT_READ, EVENT_WRITE
from collections import deque
# Logger where uncaught exceptions from crashed tasks are logged
import logging
log = logging.getLogger(__name__)
# -- Curio
from .errors import *
from .task import Task
from .traps import _read_wait
from . import meta
from .debug import _create_debuggers
from .timequeue import TimeQueue
from .activation import Activation
class Kernel(object):
'''
Curio run-time kernel. The selector argument specifies a
different I/O selector. The debug argument specifies a list of
debugger objects to apply. For example:
from curio.debug import schedtrace, traptrace
k = Kernel(debug=[schedtrace, traptrace])
Use the kernel run() method to submit work to the kernel..
'''
def __init__(self, *, selector=None, debug=None, activations=None):
# Functions to call at shutdown
self._shutdown_funcs = []
# I/O Selector setup
self._selector = selector if selector else DefaultSelector()
self._call_at_shutdown(self._selector.close)
# Task table
self._tasks = {}
# Coroutine runner function (created upon first call to run())
self._runner = None
# Activations
self._activations = activations if activations else []
# Debugging (activations in disguise)
if debug:
self._activations.extend(_create_debuggers(debug))
def __del__(self):
if self._shutdown_funcs is not None:
raise RuntimeError(
'Curio kernel not properly terminated. Please use Kernel.run(shutdown=True)')
def __enter__(self):
return self
def __exit__(self, ty, val, tb):
if self._shutdown_funcs is not None:
self.run(shutdown=True)
def _call_at_shutdown(self, func):
self._shutdown_funcs.append(func)
# ----------
# Submit a new task to the kernel
def run(self, corofunc=None, *args, shutdown=False):
if self._shutdown_funcs is None:
raise RuntimeError("Can't run a kernel that's been shut down or crashed. Create a new kernel.")
coro = meta.instantiate_coroutine(corofunc, *args) if corofunc else None
with meta.running():
# Make the kernel runtime environment (if needed)
if not self._runner:
self._runner = self._make_kernel_runtime()
ret_val = ret_exc = None
# Run the supplied coroutine (if any)
if coro or not shutdown:
task = self._runner(coro)
if task:
ret_exc = task.exception
ret_val = task.result if not ret_exc else None
del task
# If shutdown has been requested, run the shutdown process
if shutdown:
# For "reasons" related to task scheduling, the task
# of shutting down all remaining tasks is best managed
# by a launching a task dedicated to carrying out the task (sic)
async def _shutdown_tasks(tocancel):
for task in tocancel:
await task.cancel()
tocancel = sorted(self._tasks.values(), key=lambda t: t.id, reverse=True)
self._runner(_shutdown_tasks(tocancel))
assert not self._tasks, "New tasks created during shutdown"
self._runner = None
# Call registered shutdown functions
for func in self._shutdown_funcs:
func()
self._shutdown_funcs = None
if ret_exc:
raise ret_exc
else:
return ret_val
# ------------------------------------------------------------
# Kernel runtime
#
# This function creates the kernel execution environment. It
# returns a single function (a closure) that executes a coroutine.
#
# At first glance, this function is going to look giant and
# insane. It is implementing the kernel runtime as a self-contained
# black box. There is no external API. The only possible
# communication is via traps defined in curio/traps.py.
# It's best to think of this as a "program within a program".
def _make_kernel_runtime(kernel):
# Motto: "What happens in the kernel stays in the kernel"
# ---- Kernel State
current = None # Currently running task
selector = kernel._selector # Event selector
ready = deque() # Ready queue
tasks = kernel._tasks # Task table
sleepq = TimeQueue() # Sleeping task queue
wake_queue = deque() # Thread wake queue
_activations = []
# ---- Bound methods
selector_register = selector.register
selector_unregister = selector.unregister
selector_modify = selector.modify
selector_select = selector.select
selector_getkey = selector.get_key
ready_popleft = ready.popleft
ready_append = ready.append
time_monotonic = time.monotonic
# ------------------------------------------------------------
# In-kernel task used for processing futures.
#
# Internal task that monitors the loopback socket--allowing the kernel to
# awake for non-I/O events.
# Loop-back sockets
notify_sock = None
wait_sock = None
async def _kernel_task():
wake_queue_popleft = wake_queue.popleft
while True:
await _read_wait(wait_sock)
data = wait_sock.recv(1000)
# Process any waking tasks. These are tasks that have
# been awakened externally to the event loop (e.g., by
# separate threads, Futures, etc.)
while wake_queue:
task, future = wake_queue_popleft()
# If the future associated with wakeup no longer
# matches the future stored on the task, wakeup is
# abandoned. It means that a timeout or
# cancellation event occurred in the time interval
# between the call to wake() and the
# subsequent processing of the waking task
if future and task.future is not future:
continue
task.future = None
task.state = 'READY'
task.cancel_func = None
ready_append(task)
# Force the kernel to wake, possibly scheduling a task to run.
# This method is called by threads running concurrently to the
# curio kernel. For example, it's triggered upon completion of
# Futures created by thread pools and processes. It's inherently
# dangerous for any kind of operation on the kernel to be
# performed by a separate thread. Thus, the *only* thing that
# happens here is that the task gets appended to a deque and a
# notification message is written to the kernel notification
# socket. append() and pop() operations on deques are thread safe
# and do not need additional locking. See
# https://docs.python.org/3/library/collections.html#collections.deque
# ----------
def wake(task=None, future=None):
if task:
wake_queue.append((task, future))
notify_sock.send(b'\x00')
def init_loopback():
nonlocal notify_sock, wait_sock
notify_sock, wait_sock = socket.socketpair()
wait_sock.setblocking(False)
notify_sock.setblocking(False)
kernel._call_at_shutdown(notify_sock.close)
kernel._call_at_shutdown(wait_sock.close)
# ------------------------------------------------------------
# Task management functions.
#
# Create a new task. Putting it on the ready queue
def new_task(coro):
task = Task(coro)
tasks[task.id] = task
reschedule_task(task)
for a in _activations:
a.created(task)
return task
# Reschedule a task, putting it back on the ready queue.
def reschedule_task(task):
ready_append(task)
task.state = 'READY'
task.cancel_func = None
# Suspend the current task
def suspend_task(state, cancel_func):
nonlocal current
current.state = state
current.cancel_func = cancel_func
# Unregister previous I/O request. Discussion follows:
#
# When a task performs I/O, it registers itself with the underlying
# I/O selector. When the task is reawakened, it unregisters itself
# and prepares to run. However, in many network applications, the
# task will perform a small amount of work and then go to sleep on
# exactly the same I/O resource that it was waiting on before. For
# example, a client handling task in a server will often spend most
# of its time waiting for incoming data on a single socket.
#
# Instead of always unregistering the task from the selector, we
# can defer the unregistration process until after the task goes
# back to sleep again. If it happens to be sleeping on the same
# resource as before, there's no need to unregister it--it will
# still be registered from the last I/O operation.
#
# The code here performs the unregister step for a task that
# ran, but is now sleeping for a *different* reason than repeating the
# prior I/O operation. There is coordination with code in _trap_io().
if current._last_io:
unregister_event(*current._last_io)
current._last_io = None
current = None
# Check if task has pending cancellation
def check_cancellation():
if current.allow_cancel and current.cancel_pending:
current._trap_result = current.cancel_pending
current.cancel_pending = None
return True
else:
return False
# Set a timeout or sleep event on the current task
def set_timeout(clock, sleep_type='timeout'):
if clock is None:
sleepq.cancel((current.id, sleep_type), getattr(current, sleep_type))
else:
sleepq.push((current.id, sleep_type), clock)
setattr(current, sleep_type, clock)
# ------------------------------------------------------------
# I/O Support functions
#
def register_event(fileobj, event, task):
try:
key = selector_getkey(fileobj)
mask, (rtask, wtask) = key.events, key.data
if event == EVENT_READ and rtask:
raise ReadResourceBusy(f"Multiple tasks can't wait to read on the same file descriptor {fileobj}")
if event == EVENT_WRITE and wtask:
raise WriteResourceBusy(f"Multiple tasks can't wait to write on the same file descriptor {fileobj}")
selector_modify(fileobj, mask | event,
(task, wtask) if event == EVENT_READ else (rtask, task))
except KeyError:
selector_register(fileobj, event,
(task, None) if event == EVENT_READ else (None, task))
def unregister_event(fileobj, event):
key = selector_getkey(fileobj)
mask, (rtask, wtask) = key.events, key.data
mask &= ~event
if not mask:
selector_unregister(fileobj)
else:
selector_modify(fileobj, mask,
(None, wtask) if event == EVENT_READ else (rtask, None))
# ------------------------------------------------------------
# Traps
#
# These implement the low-level functionality that is
# triggered by user-level code. They are never invoked directly
# and there is no public API outside the kernel. Instead,
# coroutines use a statement such as
#
# yield ('_trap_io', sock, EVENT_READ, 'READ_WAIT')
#
# to invoke a specific trap.
# ------------------------------------------------------------
# ----------------------------------------
# Wait for I/O
def trap_io(fileobj, event, state):
if check_cancellation():
return
# See comment about deferred unregister in run(). If the requested
# I/O operation is *different* than the last I/O operation that was
# performed by the task, we need to unregister the last I/O resource used
# and register a new one with the selector.
if current._last_io != (fileobj, event):
if current._last_io:
unregister_event(*current._last_io)
try:
register_event(fileobj, event, current)
except CurioError as e:
current._trap_result = e
return
# This step indicates that we have managed any deferred I/O management
# for the task. Otherwise, I/O will be unregistered.
current._last_io = None
suspend_task(state, lambda: unregister_event(fileobj, event))
# ----------------------------------------
# Return tasks currently waiting on a file obj.
def trap_io_waiting(fileobj):
try:
key = selector_getkey(fileobj)
rtask, wtask = key.data
rtask = rtask if rtask and rtask.cancel_func else None
wtask = wtask if wtask and wtask.cancel_func else None
current._trap_result = (rtask, wtask)
except KeyError:
current._trap_result = (None, None)
# ----------------------------------------
# Wait on a Future
def trap_future_wait(future, event):
if check_cancellation():
return
current.future = future
# Discussion: Each task records the future that it is
# currently waiting on. The completion callback below only
# attempts to wake the task if its stored Future is exactly
# the same one that was stored above. Due to support for
# cancellation and timeouts, it's possible that a task might
# abandon its attempt to wait for a Future and go on to
# perform other operations, including waiting for different
# Future in the future (got it?). However, a running thread
# or process still might go on to eventually complete the
# earlier work. In that case, it will trigger the callback,
# find that the task's current Future is now different, and
# discard the result.
future.add_done_callback(lambda fut, task=current: wake(task, fut))
# An optional threading.Event object can be passed and set to
# start a worker thread. This makes it possible to have a lock-free
# Future implementation where worker threads only start after the
# callback function has been set above.
if event:
event.set()
suspend_task('FUTURE_WAIT',
lambda task=current:
setattr(task, 'future', future.cancel() and None))
# ----------------------------------------
# Add a new task to the kernel
def trap_spawn(coro):
task = new_task(coro)
task.parentid = current.id
current._trap_result = task
# ----------------------------------------
# Cancel a task
def trap_cancel_task(task, exc=TaskCancelled, val=None):
if task.cancelled:
return
task.cancelled = True
# Cancelling a task also cancels any currently pending timeout.
# If a task is being cancelled, the delivery of a timeout is
# somewhat immaterial--the task is already being cancelled.
task.timeout = None
# Set the cancellation exception
if isinstance(exc, BaseException):
task.cancel_pending = exc
else:
task.cancel_pending = exc(exc.__name__ if val is None else val)
# If the task doesn't allow the delivery of a cancellation exception right now
# we're done. It's up to the task to check for it later
if not task.allow_cancel:
return
# If the task doesn't have a cancellation function set, it means the task
# is on the ready-queue. It's not safe to deliver a cancellation exception
# to it right now. Instead, we simply return. It will get cancelled
# the next time it performs a blocking operation
if not task.cancel_func:
return
# Cancel and reschedule the task
task.cancel_func()
task._trap_result = task.cancel_pending
reschedule_task(task)
task.cancel_pending = None
# ----------------------------------------
# Wait on a scheduler primitive
def trap_sched_wait(sched, state):
if check_cancellation():
return
suspend_task(state, sched._kernel_suspend(current))
# ----------------------------------------
# Reschedule one or more tasks from a scheduler primitive
def trap_sched_wake(sched, n):
tasks = sched._kernel_wake(n)
for task in tasks:
reschedule_task(task)
# ----------------------------------------
# Return the current value of the kernel clock
def trap_clock():
current._trap_result = time_monotonic()
# ----------------------------------------
# Sleep for a specified period. Returns value of monotonic clock.
# absolute flag indicates whether or not an absolute or relative clock
# interval has been provided
def trap_sleep(clock, absolute):
nonlocal current
if check_cancellation():
return
if clock == 0:
reschedule_task(current)
current = None
return
# We used to have a special case where sleep periods <= 0 would
# simply reschedule the task to the end of the ready queue without
# actually putting it on the sleep queue first. But this meant
# that if a task looped while calling sleep(0), it would allow
# other *ready* tasks to run, but block ever checking for I/O or
# timeouts, so sleeping tasks would never wake up. That's not what
# we want; sleep(0) should mean "please give other stuff a chance
# to run". So now we always go through the whole sleep machinery.
if not absolute:
clock += time_monotonic()
set_timeout(clock, 'sleep')
suspend_task('TIME_SLEEP',
lambda task=current: (sleepq.cancel((task.id, 'sleep'), task.sleep), setattr(task, 'sleep', None)))
# ----------------------------------------
# Set a timeout to be delivered to the calling task
def trap_set_timeout(timeout):
old_timeout = current.timeout
if timeout is None:
# If no timeout period is given, leave the current timeout in effect
pass
else:
set_timeout(timeout)
if old_timeout and current.timeout > old_timeout:
current.timeout = old_timeout
current._trap_result = old_timeout
# ----------------------------------------
# Clear a previously set timeout
def trap_unset_timeout(previous):
# Here's an evil corner case. Suppose the previous timeout in effect
# has already expired? If so, then we need to arrange for a timeout
# to be generated. However, this has to happen on the *next* blocking
# call, not on this trap. That's because the "unset" timeout feature
# is usually done in the finalization stage of the previous timeout
# handling. If we were to raise a TaskTimeout here, it would get mixed
# up with the prior timeout handling and all manner of head-explosion
# will occur.
set_timeout(None)
current._trap_result = now = time_monotonic()
if previous and previous >= 0 and previous < now:
# Perhaps create a TaskTimeout pending exception here.
set_timeout(previous)
else:
set_timeout(previous)
current.timeout = previous
# But there's one other evil corner case. It's possible that
# a timeout could be reset while a TaskTimeout exception
# is pending. If that happens, it means that the task has
# left the timeout block. We should probably take away the
# pending exception.
if isinstance(current.cancel_pending, TaskTimeout):
current.cancel_pending = None
# ----------------------------------------
# Return the running kernel
def trap_get_kernel():
current._trap_result = kernel
# ----------------------------------------
# Return the currently running task
def trap_get_current():
current._trap_result = current
# ------------------------------------------------------------
# Final setup.
# ------------------------------------------------------------
# Create the traps tables
kernel._traps = traps = { key:value for key, value in locals().items()
if key.startswith('trap_') }
# Initialize activations
kernel._activations = _activations = \
[ act() if (isinstance(act, type) and issubclass(act, Activation)) else act
for act in kernel._activations ]
for act in _activations:
act.activate(kernel)
# Initialize the loopback task (if not already initialized)
init_loopback()
task = new_task(_kernel_task())
task.daemon = True
# ------------------------------------------------------------
# Main Kernel Loop. Runs the supplied coroutine until it
# terminates. If no coroutine is supplied, it runs one cycle
# of the kernel.
# ------------------------------------------------------------
def kernel_run(coro):
nonlocal current
main_task = new_task(coro) if coro else None
del coro
while True:
# ------------------------------------------------------------
# I/O Polling/Waiting
# ------------------------------------------------------------
if ready or not main_task:
timeout = 0
else:
current_time = time_monotonic()
timeout = sleepq.next_deadline(current_time)
try:
events = selector_select(timeout)
except OSError as e:
# If there is nothing to select, windows throws an
# OSError, so just set events to an empty list.
if e.errno != getattr(errno, 'WSAEINVAL', None):
raise
events = []
# Reschedule tasks with completed I/O
for key, mask in events:
rtask, wtask = key.data
intfd = isinstance(key.fileobj, int)
if mask & EVENT_READ:
# Discussion: If the associated fileobj is *not* a
# bare integer file descriptor, we keep a record
# of the last I/O event in _last_io and leave the
# task registered on the event loop. If it
# performs the same I/O operation again, it will
# get a speed boost from not having to re-register
# its event. However, it's not safe to use this
# optimization with bare integer fds. These fds
# often get reused and there is a possibility that
# a fd will get closed and reopened on a different
# resource without it being detected by the
# kernel. For that case, its critical that we not
# leave the fd on the event loop.
rtask._last_io = None if intfd else (key.fileobj, EVENT_READ)
reschedule_task(rtask)
mask &= ~EVENT_READ
rtask = None
if mask & EVENT_WRITE:
wtask._last_io = None if intfd else (key.fileobj, EVENT_WRITE)
reschedule_task(wtask)
mask &= ~EVENT_WRITE
wtask = None
# Unregister the task if fileobj is not an integer fd (see
# note above).
if intfd:
if mask:
selector_modify(key.fileobj, mask, (rtask, wtask))
else:
selector_unregister(key.fileobj)
# ------------------------------------------------------------
# Time handling (sleep/timeouts)
# ------------------------------------------------------------
current_time = time_monotonic()
for tm, (taskid, sleep_type) in sleepq.expired(current_time):
# When a task wakes, verify that the timeout value matches that stored
# on the task. If it differs, it means that the task completed its
# operation, was cancelled, or is no longer concerned with this
# sleep operation. In that case, we do nothing
task = tasks.get(taskid)
if task is None:
continue
if tm != getattr(task, sleep_type):
continue
setattr(task, sleep_type, None)
if sleep_type == 'sleep':
task._trap_result = current_time
reschedule_task(task)
# If cancellation is allowed and the task is blocked, reschedule it
elif task.allow_cancel and task.cancel_func:
task.cancel_func()
task._trap_result = TaskTimeout(current_time)
reschedule_task(task)
# Task is on the ready queue or can't be cancelled right now;
# mark it as pending cancellation
else:
task.cancel_pending = TaskTimeout(current_time)
# ------------------------------------------------------------
# Run ready tasks
# ------------------------------------------------------------
for _ in range(len(ready)):
active = current = ready_popleft()
for a in _activations:
a.running(active)
active.state = 'RUNNING'
active.cycles += 1
# The current task runs until it suspends or terminates
while current:
try:
trap = current._send(current._trap_result)
except BaseException as e:
# If any exception has occurred, the task is done.
current = None
# Wake all joining tasks and enter the terminated state.
for wtask in active.joining._kernel_wake(len(active.joining)):
reschedule_task(wtask)
active.terminated = True
active.state = 'TERMINATED'
del tasks[active.id]
active.timeout = None
# Normal termination (set the result)
if isinstance(e, StopIteration):
active.result = e.value
else:
# Abnormal termination (set an exception)
active.exception = e
if (active != main_task and not isinstance(e, (CancelledError, SystemExit))):
log.error('Task Crash: %r', active, exc_info=True)
if not isinstance(e, Exception):
raise
break
# Run the trap function. This is never supposed to raise
# an exception unless there's a fatal programming error in
# the kernel itself. Such errors cause Curio to die. They
# are not reported back to tasks.
current._trap_result = None
try:
traps[trap[0]](*trap[1:])
except:
# Disable any further use of the kernel on fatal crash.
kernel._shutdown_funcs = None
raise
# --- The active task has suspended
# Some tricky task/thread interactions require knowing when
# a coroutine has suspended. If suspend_func has been set,
# trigger it and clear.
if active.suspend_func:
active.suspend_func()
active.suspend_func = None
# Unregister any prior I/O listening
if active._last_io:
unregister_event(*active._last_io)
active._last_io = None
# Trigger scheduler activations (if any)
for a in _activations:
a.suspended(active)
if active.terminated:
a.terminated(active)
current = active = None
# If the main task has terminated, we're done.
if main_task:
if main_task.terminated:
main_task.joined = True
return main_task
else:
return None
return kernel_run
def run(corofunc, *args, with_monitor=False, selector=None,
debug=None, activations=None, **kernel_extra):
'''
Run the curio kernel with an initial task and execute until all
tasks terminate. Returns the task's final result (if any). This
is a convenience function that should primarily be used for
launching the top-level task of a curio-based application. It
creates an entirely new kernel, runs the given task to completion,
and concludes by shutting down the kernel, releasing all resources used.
Don't use this function if you're repeatedly launching a lot of
new tasks to run in curio. Instead, create a Kernel instance and
use its run() method instead.
'''
kernel = Kernel(selector=selector, debug=debug, activations=activations,
**kernel_extra)
# Check if a monitor has been requested
if with_monitor or 'CURIOMONITOR' in os.environ:
from .monitor import Monitor
m = Monitor(kernel)
kernel._call_at_shutdown(m.close)
kernel.run(m.start)
with kernel:
return kernel.run(corofunc, *args)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
examples/Deeplab/experiments/deeplabv2.asppl.fix.andomscaleaug.mixup.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# File: hed.py
# Author: Yuxin Wu <[email protected]>
import cv2
import tensorflow as tf
from tensorflow.python import debug as tf_debug
import argparse
from six.moves import zip
import os
import numpy as np
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
from tensorpack.dataflow import dataset
from tensorpack.utils.gpu import get_nr_gpu
from tensorpack.utils.segmentation import predict_slider, visualize_label, predict_scaler
from tensorpack.utils.stats import MIoUStatistics
from tensorpack.utils import logger
from tensorpack.tfutils import optimizer
from tensorpack.tfutils.summary import add_moving_summary, add_param_summary
import tensorpack.tfutils.symbolic_functions as symbf
from tqdm import tqdm
from tensorpack.dataflow.imgaug.misc import ReduceMean
from imagenet_utils import (
fbresnet_augmentor, get_imagenet_dataflow, ImageNetModel,
eval_on_ILSVRC12)
from resnet_model import (
preresnet_group, preresnet_basicblock, preresnet_bottleneck,
resnet_group, resnet_basicblock, resnet_bottleneck_deeplab, se_resnet_bottleneck,
resnet_backbone)
CLASS_NUM = 21
CROP_SIZE = 321
IGNORE_LABEL = 255
def my_softmax_cross_entropy_with_ignore_label(logits, label, class_num, mask):
"""
This function accepts logits rather than predictions, and is more numerically stable than
:func:`class_balanced_cross_entropy`.
"""
with tf.name_scope('softmax_cross_entropy_with_ignore_label'):
# tf.assert_equal(logits.shape[1], label.shape[1]) # shape assert
# TODO need assert here
raw_prediction = tf.reshape(logits, [-1, class_num])
label = tf.reshape(label, [-1, class_num])
mask = tf.reshape(mask,[-1])
indices = tf.squeeze(tf.where(tf.equal(mask, 1)), axis=1,name="indices_to_get")
gt = tf.gather(label, indices)
prediction = tf.gather(raw_prediction, indices)
# Pixel-wise softmax loss.
loss = tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=gt)
return loss
class Model(ModelDesc):
def _get_inputs(self):
## Set static shape so that tensorflow knows shape at compile time.
return [InputDesc(tf.float32, [None, CROP_SIZE, CROP_SIZE, 3], 'image'),
InputDesc(tf.int32, [None, CROP_SIZE, CROP_SIZE, CLASS_NUM], 'gt'),
InputDesc(tf.int32, [None, CROP_SIZE, CROP_SIZE], 'mask')]
def _build_graph(self, inputs):
def vgg16(image):
with argscope(Conv2D, kernel_shape=3, nl=tf.nn.relu):
def aspp_branch(input, rate):
input = AtrousConv2D('aspp{}_conv0'.format(rate), input, 1024, kernel_shape=3, rate=6)
input = Dropout('aspp{}_dropout0'.format(rate), input, 0.5)
input = Conv2D('aspp{}_conv1'.format(rate), input, 1024)
input = Dropout('aspp{}_dropout1'.format(rate), input, 0.5)
input = Conv2D('aspp{}_conv2'.format(rate), input, CLASS_NUM, nl=tf.identity)
return input
l = Conv2D('conv1_1', image, 64)
l = Conv2D('conv1_2', l, 64)
l = MaxPooling('pool1', l, shape=3, stride=2)
# 112
l = Conv2D('conv2_1', l, 128)
l = Conv2D('conv2_2', l, 128)
l = MaxPooling('pool2', l, shape=3, stride=2)
# 56
l = Conv2D('conv3_1', l, 256)
l = Conv2D('conv3_2', l, 256)
l = Conv2D('conv3_3', l, 256)
l = MaxPooling('pool3', l, shape=3, stride=2)
# 28
l = Conv2D('conv4_1', l, 512)
l = Conv2D('conv4_2', l, 512)
l = Conv2D('conv4_3', l, 512)
l = MaxPooling('pool4', l, shape=3, stride=1) # original VGG16 pooling is 2, here is 1
# 28
l = AtrousConv2D('conv5_1', l, 512, kernel_shape=3, rate=2)
l = AtrousConv2D('conv5_2', l, 512, kernel_shape=3, rate=2)
l = AtrousConv2D('conv5_3', l, 512, kernel_shape=3, rate=2)
l = MaxPooling('pool5', l, shape=3, stride=1)
# 28
dilation6 = aspp_branch(l, rate=6)
dilation12 = aspp_branch(l, rate=12)
dilation18 = aspp_branch(l, rate=18)
dilation24 = aspp_branch(l, rate=24)
predict = dilation6 + dilation12 + dilation18 + dilation24
return predict
def resnet101(image):
mode = 'resnet'
depth = 101
basicblock = preresnet_basicblock if mode == 'preact' else resnet_basicblock
bottleneck = {
'resnet': resnet_bottleneck_deeplab,
'preact': preresnet_bottleneck,
'se': se_resnet_bottleneck}[mode]
num_blocks, block_func = {
18: ([2, 2, 2, 2], basicblock),
34: ([3, 4, 6, 3], basicblock),
50: ([3, 4, 6, 3], bottleneck),
101: ([3, 4, 23, 3], bottleneck),
152: ([3, 8, 36, 3], bottleneck)
}[depth]
def get_logits(image):
with argscope([Conv2D, MaxPooling, GlobalAvgPooling, BatchNorm], data_format="NHWC"):
return resnet_backbone(
image, num_blocks,
preresnet_group if mode == 'preact' else resnet_group, block_func)
return get_logits(image)
image, label, mask = inputs
#image = image - tf.constant([104, 116, 122], dtype='float32')
label = tf.identity(label, name="label")
mask = tf.identity(mask, name="label")
#predict = vgg16(image)
predict = resnet101(image)
costs = []
prob = tf.nn.softmax(predict, name='prob')
cost = my_softmax_cross_entropy_with_ignore_label(logits=predict, label=label,
class_num=CLASS_NUM, mask =mask)
prediction = tf.argmax(prob, axis=-1,name="prediction")
cost = tf.reduce_mean(cost, name='cross_entropy_loss') # the average cross-entropy loss
costs.append(cost)
if get_current_tower_context().is_training:
wd_w = tf.train.exponential_decay(2e-4, get_global_step_var(),
80000, 0.7, True)
wd_cost = tf.multiply(wd_w, regularize_cost('.*/W', tf.nn.l2_loss), name='wd_cost')
costs.append(wd_cost)
add_param_summary(('.*/W', ['histogram'])) # monitor W
self.cost = tf.add_n(costs, name='cost')
add_moving_summary(costs + [self.cost])
def _get_optimizer(self):
lr = tf.get_variable('learning_rate', initializer=2.5e-4, trainable=False)
opt = tf.train.AdamOptimizer(lr, epsilon=2.5e-4)
return optimizer.apply_grad_processors(
opt, [gradproc.ScaleGradient(
[('aspp.*_conv/W', 10),('aspp.*_conv/b',20)])])
def get_data(name, data_dir, meta_dir, batch_size):
isTrain = name == 'train'
ds = dataset.PascalVOC12(data_dir, meta_dir, name, shuffle=True)
class RandomCropWithPadding(imgaug.ImageAugmentor):
def _get_augment_params(self, img):
self.h0 = img.shape[0]
self.w0 = img.shape[1]
if CROP_SIZE > self.h0:
top = (CROP_SIZE - self.h0) / 2
bottom = (CROP_SIZE - self.h0) - top
else:
top = 0
bottom = 0
if CROP_SIZE > self.w0:
left = (CROP_SIZE - self.w0) / 2
right = (CROP_SIZE - self.w0) - left
else:
left = 0
right = 0
new_shape = (top + bottom + self.h0, left + right + self.w0)
diffh = new_shape[0] - CROP_SIZE
assert diffh >= 0
crop_start_h = 0 if diffh == 0 else self.rng.randint(diffh)
diffw = new_shape[1] - CROP_SIZE
assert diffw >= 0
crop_start_w = 0 if diffw == 0 else self.rng.randint(diffw)
return (top, bottom, left, right, crop_start_h, crop_start_w)
def _augment(self, img, param):
top, bottom, left, right, crop_start_h, crop_start_w = param
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=IGNORE_LABEL)
assert crop_start_h + CROP_SIZE <= img.shape[0], crop_start_w + CROP_SIZE <= img.shape[1]
return img[crop_start_h:crop_start_h + CROP_SIZE, crop_start_w:crop_start_w + CROP_SIZE]
if isTrain:
shape_aug = [
imgaug.RandomResize(xrange=(0.7, 1.5), yrange=(0.7, 1.5),
aspect_ratio_thres=0.15),
]
else:
shape_aug = []
pass
ds = AugmentImageComponents(ds, shape_aug, (0, 1), copy=False, is_segmentation = True)
if isTrain:
shape_aug = [
RandomCropWithPadding(),
imgaug.Flip(horiz=True),
]
else:
shape_aug = []
ds = AugmentImageComponents(ds, shape_aug, (0, 1), copy=False, is_segmentation=False)
shape_aug = [ReduceMean()]
ds = AugmentImageComponent(ds, shape_aug)
def f(ds):
alpha = 1
images, labels = ds
x1, x2 = np.split(images, 2, axis=0)
y1, y2 = np.split(labels, 2, axis=0)
y1_flaten = y1.flatten()
y2_flaten = y2.flatten()
y1_255_index = np.where(y1_flaten == 255)
y2_255_index = np.where(y2_flaten == 255)
y1_flaten[y1_255_index[0]] = 0 #fake data
y2_flaten[y2_255_index[0]] = 0 #fake data
y1_onehot = np.eye(CLASS_NUM)[y1_flaten] # one hot coding
y2_onehot = np.eye(CLASS_NUM)[y2_flaten] # one hot coding
y1_onehot = np.reshape(y1_onehot,(batch_size, CROP_SIZE, CROP_SIZE, CLASS_NUM))
y2_onehot = np.reshape(y2_onehot, (batch_size, CROP_SIZE, CROP_SIZE, CLASS_NUM))
# mixup:
weight = np.random.beta(alpha, alpha, batch_size)
x_weight = weight.reshape(batch_size, 1, 1, 1)
y_weight = weight.reshape(batch_size, 1, 1, 1)
x = x1 * x_weight + x2 * (1 - x_weight)
#if any label is 255, then set the final label 255 directly.
y = y1_onehot * y_weight + y2_onehot * (1 - y_weight)
mask = np.ones((batch_size, CROP_SIZE, CROP_SIZE), dtype=np.int8)
mask[np.unravel_index(y1_255_index, (batch_size,CROP_SIZE,CROP_SIZE))] = 0
mask[np.unravel_index(y2_255_index, (batch_size, CROP_SIZE, CROP_SIZE))] = 0
return [x, y, mask]
if isTrain:
ds = BatchData(ds, 2*batch_size)
ds = MapData(ds, f)
ds = PrefetchDataZMQ(ds, 2)
else:
ds = BatchData(ds, 1)
return ds
def view_data(data_dir, meta_dir, batch_size):
ds = RepeatedData(get_data('train',data_dir, meta_dir, batch_size), -1)
ds.reset_state()
for ims, labels in ds.get_data():
for im, label in zip(ims, labels):
#aa = visualize_label(label)
#pass
cv2.imshow("im", im / 255.0)
cv2.imshow("raw-label", label)
cv2.imshow("color-label", visualize_label(label))
cv2.waitKey(0)
def get_config(data_dir, meta_dir, batch_size):
logger.auto_set_dir()
dataset_train = get_data('train', data_dir, meta_dir, batch_size)
steps_per_epoch = dataset_train.size() * 8
dataset_val = get_data('val', data_dir, meta_dir, batch_size)
return TrainConfig(
dataflow=dataset_train,
callbacks=[
ModelSaver(),
ScheduledHyperParamSetter('learning_rate', [(2, 1e-4), (4, 1e-5), (6, 8e-6)]),
HumanHyperParamSetter('learning_rate'),
PeriodicTrigger(CalculateMIoU(CLASS_NUM), every_k_epochs=1),
ProgressBar(["cross_entropy_loss","cost","wd_cost"]),#uncomment it to debug for every step
#HookToCallback(tf_debug.LocalCLIDebugHook())
],
model=Model(),
steps_per_epoch=steps_per_epoch,
max_epoch=10,
)
def run(model_path, image_path, output):
pred_config = PredictConfig(
model=Model(),
session_init=get_model_loader(model_path),
input_names=['image'],
output_names=['output' + str(k) for k in range(1, 7)])
predictor = OfflinePredictor(pred_config)
im = cv2.imread(image_path)
assert im is not None
im = cv2.resize(
im, (im.shape[1] // 16 * 16, im.shape[0] // 16 * 16)
)[None, :, :, :].astype('float32')
outputs = predictor(im)
if output is None:
for k in range(6):
pred = outputs[k][0]
cv2.imwrite("out{}.png".format(
'-fused' if k == 5 else str(k + 1)), pred * 255)
else:
pred = outputs[5][0]
cv2.imwrite(output, pred * 255)
def proceed_validation(args, is_save = True, is_densecrf = False):
import cv2
ds = dataset.PascalVOC12(args.data_dir, args.meta_dir, "val")
ds = BatchData(ds, 1)
pred_config = PredictConfig(
model=Model(),
session_init=get_model_loader(args.load),
input_names=['image'],
output_names=['prob'])
predictor = OfflinePredictor(pred_config)
i = 0
stat = MIoUStatistics(CLASS_NUM)
logger.info("start validation....")
for image, label in tqdm(ds.get_data()):
label = np.squeeze(label)
image = np.squeeze(image)
prediction = predict_scaler(image, predictor, scales=[0.9, 1, 1.1], classes=CLASS_NUM, tile_size=CROP_SIZE, is_densecrf = is_densecrf)
prediction = np.argmax(prediction, axis=2)
stat.feed(prediction, label)
if is_save:
cv2.imwrite("result/{}.png".format(i), np.concatenate((image, visualize_label(label), visualize_label(prediction)), axis=1))
i += 1
logger.info("mIoU: {}".format(stat.mIoU))
logger.info("mean_accuracy: {}".format(stat.mean_accuracy))
logger.info("accuracy: {}".format(stat.accuracy))
class CalculateMIoU(Callback):
def __init__(self, nb_class):
self.nb_class = nb_class
def _setup_graph(self):
self.pred = self.trainer.get_predictor(
['image'], ['prob'])
def _before_train(self):
pass
def _trigger(self):
global args
self.val_ds = get_data('val', args.data_dir, args.meta_dir, args.batch_size)
self.val_ds.reset_state()
self.stat = MIoUStatistics(self.nb_class)
for image, label in tqdm(self.val_ds.get_data()):
label = np.squeeze(label)
image = np.squeeze(image)
prediction = predict_slider(image, self.pred, self.nb_class, tile_size=CROP_SIZE)
prediction = np.argmax(prediction, axis=2)
self.stat.feed(prediction, label)
self.trainer.monitors.put_scalar("mIoU", self.stat.mIoU)
self.trainer.monitors.put_scalar("mean_accuracy", self.stat.mean_accuracy)
self.trainer.monitors.put_scalar("accuracy", self.stat.accuracy)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--data_dir', default="/data_a/dataset/pascalvoc2012/VOC2012trainval/VOCdevkit/VOC2012",
help='dataset dir')
parser.add_argument('--meta_dir', default="pascalvoc12", help='meta dir')
parser.add_argument('--load', default="resnet101.npz", help='load model')
parser.add_argument('--view', help='view dataset', action='store_true')
parser.add_argument('--run', help='run model on images')
parser.add_argument('--batch_size', type=int, default = 8, help='batch_size')
parser.add_argument('--output', help='fused output filename. default to out-fused.png')
parser.add_argument('--validation', action='store_true', help='validate model on validation images')
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.view:
view_data(args.data_dir,args.meta_dir,args.batch_size)
elif args.run:
run(args.load, args.run, args.output)
elif args.validation:
proceed_validation(args)
else:
config = get_config(args.data_dir,args.meta_dir,args.batch_size)
if args.load:
config.session_init = get_model_loader(args.load)
launch_train_with_config(
config,
SyncMultiGPUTrainer(max(get_nr_gpu(), 1)))
| []
| []
| [
"CUDA_VISIBLE_DEVICES",
"TENSORPACK_TRAIN_API"
]
| [] | ["CUDA_VISIBLE_DEVICES", "TENSORPACK_TRAIN_API"] | python | 2 | 0 | |
pkg/logging/logger.go | /*
Copyright 2018 Carmen Chan & Tony Yip
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logging
import (
"os"
"github.com/sirupsen/logrus"
)
var root = logrus.New()
func init() {
if os.Getenv("DEBUG") != "" {
root.SetLevel(logrus.DebugLevel)
}
}
func GetRoot() *logrus.Logger {
return root
}
| [
"\"DEBUG\""
]
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | go | 1 | 0 | |
script/release/release/images.py | from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import base64
import json
import os
import docker
from enum import Enum
from .const import NAME
from .const import REPO_ROOT
from .utils import ScriptError
class Platform(Enum):
ALPINE = 'alpine'
DEBIAN = 'debian'
def __str__(self):
return self.value
class ImageManager(object):
def __init__(self, version, latest=False):
self.built_tags = []
self.docker_client = docker.APIClient(**docker.utils.kwargs_from_env())
self.version = version
self.latest = latest
if 'HUB_CREDENTIALS' in os.environ:
print('HUB_CREDENTIALS found in environment, issuing login')
credentials = json.loads(base64.urlsafe_b64decode(os.environ['HUB_CREDENTIALS']))
self.docker_client.login(
username=credentials['Username'], password=credentials['Password']
)
def _tag(self, image, existing_tag, new_tag):
existing_repo_tag = '{image}:{tag}'.format(image=image, tag=existing_tag)
new_repo_tag = '{image}:{tag}'.format(image=image, tag=new_tag)
self.docker_client.tag(existing_repo_tag, new_repo_tag)
self.built_tags.append(new_repo_tag)
def build_runtime_image(self, repository, platform):
git_sha = repository.write_git_sha()
compose_image_base_name = NAME
print('Building {image} image ({platform} based)'.format(
image=compose_image_base_name,
platform=platform
))
full_version = '{version}-{platform}'.format(version=self.version, platform=platform)
build_tag = '{image_base_image}:{full_version}'.format(
image_base_image=compose_image_base_name,
full_version=full_version
)
logstream = self.docker_client.build(
REPO_ROOT,
tag=build_tag,
buildargs={
'BUILD_PLATFORM': platform.value,
'GIT_COMMIT': git_sha,
},
decode=True
)
for chunk in logstream:
if 'error' in chunk:
raise ScriptError('Build error: {}'.format(chunk['error']))
if 'stream' in chunk:
print(chunk['stream'], end='')
self.built_tags.append(build_tag)
if platform == Platform.ALPINE:
self._tag(compose_image_base_name, full_version, self.version)
if self.latest:
self._tag(compose_image_base_name, full_version, platform)
if platform == Platform.ALPINE:
self._tag(compose_image_base_name, full_version, 'latest')
# Used for producing a test image for UCP
def build_ucp_test_image(self, repository):
print('Building test image (debian based for UCP e2e)')
git_sha = repository.write_git_sha()
compose_tests_image_base_name = NAME + '-tests'
ucp_test_image_tag = '{image}:{tag}'.format(
image=compose_tests_image_base_name,
tag=self.version
)
logstream = self.docker_client.build(
REPO_ROOT,
tag=ucp_test_image_tag,
target='build',
buildargs={
'BUILD_PLATFORM': Platform.DEBIAN.value,
'GIT_COMMIT': git_sha,
},
decode=True
)
for chunk in logstream:
if 'error' in chunk:
raise ScriptError('Build error: {}'.format(chunk['error']))
if 'stream' in chunk:
print(chunk['stream'], end='')
self.built_tags.append(ucp_test_image_tag)
self._tag(compose_tests_image_base_name, self.version, 'latest')
def build_images(self, repository):
self.build_runtime_image(repository, Platform.ALPINE)
self.build_runtime_image(repository, Platform.DEBIAN)
self.build_ucp_test_image(repository)
def check_images(self):
for name in self.built_tags:
try:
self.docker_client.inspect_image(name)
except docker.errors.ImageNotFound:
print('Expected image {} was not found'.format(name))
return False
return True
def push_images(self):
for name in self.built_tags:
print('Pushing {} to Docker Hub'.format(name))
logstream = self.docker_client.push(name, stream=True, decode=True)
for chunk in logstream:
if 'status' in chunk:
print(chunk['status'])
if 'error' in chunk:
raise ScriptError(
'Error pushing {name}: {err}'.format(name=name, err=chunk['error'])
)
| []
| []
| [
"HUB_CREDENTIALS"
]
| [] | ["HUB_CREDENTIALS"] | python | 1 | 0 | |
packages/amplify-graphql-searchable-transformer/streaming-lambda/python_streaming_function.py | import base64
import json
import logging
import os
import time
import traceback
from urllib.parse import urlparse, quote
from botocore.auth import SigV4Auth
from botocore.awsrequest import AWSRequest
from botocore.credentials import get_credentials
from botocore.endpoint import BotocoreHTTPSession
from botocore.session import Session
from boto3.dynamodb.types import TypeDeserializer
# The following parameters are required to configure the ES cluster
ES_ENDPOINT = os.environ['ES_ENDPOINT']
ES_REGION = os.environ['ES_REGION']
DEBUG = True if os.environ['DEBUG'] == "1" else False
ES_USE_EXTERNAL_VERSIONING = True if os.environ['ES_USE_EXTERNAL_VERSIONING'] == "true" else False
# ElasticSearch 6 deprecated having multiple mapping types in an index. Default to doc.
DOC_TYPE = 'doc'
ES_MAX_RETRIES = 3 # Max number of retries for exponential backoff
logger = logging.getLogger()
logger.setLevel(logging.DEBUG if DEBUG else logging.INFO)
logger.info("Streaming to ElasticSearch")
# custom encoder changes
# - sets to lists
class DDBTypesEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
return json.JSONEncoder.default(self, obj)
# Subclass of boto's TypeDeserializer for DynamoDB to adjust for DynamoDB Stream format.
class StreamTypeDeserializer(TypeDeserializer):
def _deserialize_n(self, value):
return float(value)
def _deserialize_b(self, value):
return value # Already in Base64
class ES_Exception(Exception):
'''Capture status_code from request'''
status_code = 0
payload = ''
def __init__(self, status_code, payload):
self.status_code = status_code
self.payload = payload
Exception.__init__(
self, 'ES_Exception: status_code={}, payload={}'.format(status_code, payload))
# Low-level POST data to Amazon Elasticsearch Service generating a Sigv4 signed request
def post_data_to_es(payload, region, creds, host, path, method='POST', proto='https://'):
'''Post data to ES endpoint with SigV4 signed http headers'''
req = AWSRequest(method=method, url=proto + host +
quote(path), data=payload, headers={'Host': host, 'Content-Type': 'application/json'})
SigV4Auth(creds, 'es', region).add_auth(req)
http_session = BotocoreHTTPSession()
res = http_session.send(req.prepare())
if res.status_code >= 200 and res.status_code <= 299:
return res._content
else:
raise ES_Exception(res.status_code, res._content)
# High-level POST data to Amazon Elasticsearch Service with exponential backoff
# according to suggested algorithm: http://docs.aws.amazon.com/general/latest/gr/api-retries.html
def post_to_es(payload):
'''Post data to ES cluster with exponential backoff'''
# Get aws_region and credentials to post signed URL to ES
es_region = ES_REGION or os.environ['AWS_REGION']
session = Session({'region': es_region})
creds = get_credentials(session)
es_url = urlparse(ES_ENDPOINT)
# Extract the domain name in ES_ENDPOINT
es_endpoint = es_url.netloc or es_url.path
# Post data with exponential backoff
retries = 0
while retries < ES_MAX_RETRIES:
if retries > 0:
seconds = (2 ** retries) * .1
logger.debug('Waiting for %.1f seconds', seconds)
time.sleep(seconds)
try:
es_ret_str = post_data_to_es(
payload, es_region, creds, es_endpoint, '/_bulk')
logger.debug('Return from ES: %s', es_ret_str)
es_ret = json.loads(es_ret_str)
if es_ret['errors']:
logger.error(
'ES post unsuccessful, errors present, took=%sms', es_ret['took'])
# Filter errors
es_errors = [item for item in es_ret['items']
if item.get('index', {}).get('error')]
logger.error('List of items with errors: %s',
json.dumps(es_errors))
else:
logger.info('ES post successful, took=%sms', es_ret['took'])
break # Sending to ES was ok, break retry loop
except ES_Exception as e:
if (e.status_code >= 500) and (e.status_code <= 599):
retries += 1 # Candidate for retry
else:
raise # Stop retrying, re-raise exception
# Extracts the DynamoDB table from an ARN
# ex: arn:aws:dynamodb:eu-west-1:123456789012:table/table-name/stream/2015-11-13T09:23:17.104 should return 'table-name'
def get_table_name_from_arn(arn):
return arn.split(':')[5].split('/')[1]
# Compute a compound doc index from the key(s) of the object in lexicographic order: "k1=key_val1|k2=key_val2"
def compute_doc_index(keys_raw, deserializer, formatIndex=False):
index = []
for key in sorted(keys_raw):
if formatIndex:
index.append('{}={}'.format(
key, deserializer.deserialize(keys_raw[key])))
else:
index.append(deserializer.deserialize(keys_raw[key]))
return '|'.join(map(str,index))
def _lambda_handler(event, context):
logger.debug('Event: %s', event)
records = event['Records']
ddb_deserializer = StreamTypeDeserializer()
es_actions = [] # Items to be added/updated/removed from ES - for bulk API
cnt_insert = cnt_modify = cnt_remove = 0
for record in records:
# Handle both native DynamoDB Streams or Streams data from Kinesis (for manual replay)
logger.debug('Record: %s', record)
if record.get('eventSource') == 'aws:dynamodb':
ddb = record['dynamodb']
ddb_table_name = get_table_name_from_arn(record['eventSourceARN'])
doc_seq = ddb['SequenceNumber']
elif record.get('eventSource') == 'aws:kinesis':
ddb = json.loads(base64.b64decode(record['kinesis']['data']))
ddb_table_name = ddb['SourceTable']
doc_seq = record['kinesis']['sequenceNumber']
else:
logger.error('Ignoring non-DynamoDB event sources: %s',
record.get('eventSource'))
continue
# Compute DynamoDB table, type and index for item
doc_table = ddb_table_name.lower()
doc_type = DOC_TYPE
doc_table_parts = doc_table.split('-')
doc_es_index_name = doc_table_parts[0] if len(doc_table_parts) > 0 else doc_table
# Dispatch according to event TYPE
event_name = record['eventName'].upper() # INSERT, MODIFY, REMOVE
logger.debug('doc_table=%s, event_name=%s, seq=%s',
doc_table, event_name, doc_seq)
# Treat events from a Kinesis stream as INSERTs
if event_name == 'AWS:KINESIS:RECORD':
event_name = 'INSERT'
is_ddb_insert_or_update = (event_name == 'INSERT') or (event_name == 'MODIFY')
is_ddb_delete = event_name == 'REMOVE'
image_name = 'NewImage' if is_ddb_insert_or_update else 'OldImage'
if image_name not in ddb:
logger.warning(
'Cannot process stream if it does not contain ' + image_name)
continue
logger.debug(image_name + ': %s', ddb[image_name])
# Deserialize DynamoDB type to Python types
doc_fields = ddb_deserializer.deserialize({'M': ddb[image_name]})
# Sync enabled APIs do soft delete. We need to delete the record in ES if _deleted field is set
if ES_USE_EXTERNAL_VERSIONING and event_name == 'MODIFY' and '_deleted' in doc_fields and doc_fields['_deleted']:
is_ddb_insert_or_update = False
is_ddb_delete = True
# Update counters
if event_name == 'INSERT':
cnt_insert += 1
elif event_name == 'MODIFY':
cnt_modify += 1
elif event_name == 'REMOVE':
cnt_remove += 1
else:
logger.warning('Unsupported event_name: %s', event_name)
logger.debug('Deserialized doc_fields: %s', doc_fields)
if ('Keys' in ddb):
doc_id = compute_doc_index(ddb['Keys'], ddb_deserializer)
else:
logger.error('Cannot find keys in ddb record')
# If DynamoDB INSERT or MODIFY, send 'index' to ES
if is_ddb_insert_or_update:
# Generate ES payload for item
action = {'index': {'_index': doc_es_index_name,
'_type': doc_type,
'_id': doc_id}}
# Add external versioning if necessary
if ES_USE_EXTERNAL_VERSIONING and '_version' in doc_fields:
action['index'].update([
('version_type', 'external'),
('_version', doc_fields['_version'])
])
doc_fields.pop('_ttl', None)
doc_fields.pop('_version', None)
# Append ES Action line with 'index' directive
es_actions.append(json.dumps(action))
# Append JSON payload
es_actions.append(json.dumps(doc_fields, cls=DDBTypesEncoder))
# migration step remove old key if it exists
if ('id' in doc_fields) and (event_name == 'MODIFY') :
action = {'delete': {'_index': doc_es_index_name, '_type': doc_type,
'_id': compute_doc_index(ddb['Keys'], ddb_deserializer, True)}}
es_actions.append(json.dumps(action))
# If DynamoDB REMOVE, send 'delete' to ES
elif is_ddb_delete:
action = {'delete': {'_index': doc_es_index_name,
'_type': doc_type, '_id': doc_id}}
if ES_USE_EXTERNAL_VERSIONING and '_version' in doc_fields:
action['delete'].update([
('version_type', 'external'),
('_version', doc_fields['_version'])
])
# Action line with 'delete' directive
es_actions.append(json.dumps(action))
# Prepare bulk payload
es_actions.append('') # Add one empty line to force final \n
es_payload = '\n'.join(es_actions)
logger.info('Posting to ES: inserts=%s updates=%s deletes=%s, total_lines=%s, bytes_total=%s',
cnt_insert, cnt_modify, cnt_remove, len(es_actions) - 1, len(es_payload))
post_to_es(es_payload) # Post to ES with exponential backoff
# Global lambda handler - catches all exceptions to avoid dead letter in the DynamoDB Stream
def lambda_handler(event, context):
try:
return _lambda_handler(event, context)
except Exception:
logger.error(traceback.format_exc())
| []
| []
| [
"ES_USE_EXTERNAL_VERSIONING",
"AWS_REGION",
"DEBUG",
"ES_ENDPOINT",
"ES_REGION"
]
| [] | ["ES_USE_EXTERNAL_VERSIONING", "AWS_REGION", "DEBUG", "ES_ENDPOINT", "ES_REGION"] | python | 5 | 0 | |
internal/config/config.go | package config
import (
"os"
"strings"
"github.com/CESARBR/knot-cloud-storage/pkg/logging"
"github.com/spf13/viper"
)
// Server represents the server configuration properties
type Server struct {
Port int
}
// Logger represents the logger configuration properties
type Logger struct {
Level string
Syslog bool
}
// RabbitMQ represents the rabbitmq configuration properties
type RabbitMQ struct {
URL string
}
// MongoDB represents the database configuration properties
type MongoDB struct {
Host string
Port int
Name string
}
// Things represents the things service configuration properties
type Things struct {
Host string
Port int
}
// Expiration represents the data TTL configuration
type Expiration struct {
Time int32
}
// Config represents the service configuration
type Config struct {
Server
Logger
RabbitMQ
MongoDB
Things
Expiration
}
func readFile(name string) {
logger := logging.NewLogrus("error", false).Get("Config")
viper.SetConfigName(name)
if err := viper.ReadInConfig(); err != nil {
logger.Fatalf("Error reading config file, %s", err)
}
}
// Load returns the service configuration
func Load() Config {
var configuration Config
logger := logging.NewLogrus("error", false).Get("Config")
viper.AddConfigPath("internal/config")
viper.SetConfigType("yaml")
readFile("default")
if os.Getenv("ENV") == "development" {
readFile("development")
if err := viper.MergeInConfig(); err != nil {
logger.Fatalf("Error reading config file, %s", err)
}
}
replacer := strings.NewReplacer(".", "_")
viper.SetEnvKeyReplacer(replacer)
viper.AutomaticEnv()
if err := viper.Unmarshal(&configuration); err != nil {
logger.Fatalf("Error unmarshalling configuration, %s", err)
}
return configuration
}
| [
"\"ENV\""
]
| []
| [
"ENV"
]
| [] | ["ENV"] | go | 1 | 0 | |
{{cookiecutter.project_slug}}/config/settings/local.py | """
Local settings
- Run in Debug mode
{% if cookiecutter.use_mailhog == 'y' and cookiecutter.use_docker == 'y' %}
- Use mailhog for emails
{% else %}
- Use console backend for emails
{% endif %}
- Add Django Debug Toolbar
- Add django-extensions as app
"""
from .base import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='CHANGEME!!!')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_PORT = 1025
{% if cookiecutter.use_mailhog == 'y' and cookiecutter.use_docker == 'y' %}
EMAIL_HOST = env('EMAIL_HOST', default='mailhog')
{% else %}
EMAIL_HOST = 'localhost'
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
{% endif %}
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware', ]
INSTALLED_APPS += ['debug_toolbar', ]
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]
{% if cookiecutter.use_docker == 'y' %}
{# [cookiecutter-django] This is a workaround to flake8 "imported but unused" errors #}
import socket
import os
# tricks to have debug toolbar when developing with docker
if os.environ.get('USE_DOCKER') == 'yes':
ip = socket.gethostbyname(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + '1']
{% endif %}
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['django_extensions', ]
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
{% if cookiecutter.use_celery == 'y' %}
########## CELERY
# In development, all tasks will be executed locally by blocking until the task returns
CELERY_ALWAYS_EAGER = True
########## END CELERY
{% endif %}
# Your local stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
| []
| []
| [
"USE_DOCKER"
]
| [] | ["USE_DOCKER"] | python | 1 | 0 | |
kevals/solr.py | '''
Functions for interacting with Solr, as a Tracking Database.
NOTE requires Solr > 7.3 as uses 'add-distinct'
See https://lucene.apache.org/solr/guide/7_3/updating-parts-of-documents.html
'''
import requests
import logging
import json
import os
logger = logging.getLogger(__name__)
class SolrKevalsDB():
def __init__(self, kevalsdb_url=os.environ.get('KEVALS_SOLR_URL', None), update_batch_size=1000):
if not kevalsdb_url:
raise Exception("You must supply a KEVALS_SOLR_URL!")
# Record settings:
self.kevalsdb_url = kevalsdb_url
self.batch_size = update_batch_size
# Set up the update configuration:
self.update_kevalsdb_url = self.kevalsdb_url + '/update?softCommit=true'
def _jsonl_doc_generator(self, input_reader):
for line in input_reader:
item = json.loads(line)
# And return
yield item
def _send_batch(self, batch, as_updates=True):
# Convert the plain dicts into Solr update documents:
updates = []
for item in batch:
# There must be an ID, so complain if there isn't.
if not 'id' in item:
raise Exception("You should supply an id for each update! This update has no ID: %s" % item )
# Turn into an update:
update_item = {}
for key in item:
if key == 'id':
update_item[key] = item[key]
elif key == '_version_':
# Do nothing, as we don't want to send that, because it'll cause conflicts on import.
pass
else:
# If we want to send updates, except those already arranged as updates (i.e. as dicts):
if as_updates and not isinstance(item[key], dict):
# Convert to 'set' updates:
update_item[key] = { 'set': item[key] }
else:
update_item[key] = item[key]
# Add the item to the set:
updates.append(update_item)
# And post the batch as updates:
self._send_update(updates)
def import_jsonl_reader(self, input_reader):
self.import_items_from(self._jsonl_doc_generator(input_reader))
def import_items(self, items):
self._send_batch(items)
def import_items_from(self, item_generator):
batch = []
for item in item_generator:
batch.append(item)
if len(batch) > self.batch_size:
self._send_batch(batch)
batch = []
# And send the final batch if there is one:
if len(batch) > 0:
self._send_batch(batch)
def default_query(self):
return '*:*'
def list(self, field_value=None, sort='timestamp_dt desc', limit=100):
# set solr search terms
solr_query_url = self.kevalsdb_url + '/query'
query_string = {
'q': self.default_query(),
'rows': limit,
'sort': sort
}
# Add optional fields:
if field_value:
if field_value[1] == '_NONE_' or field_value[1] == '':
query_string['q'] += ' AND -{}:[* TO *]'.format(field_value[0])
else:
query_string['q'] += ' AND {}:{}'.format(field_value[0], field_value[1])
# gain tracking_db search response
logger.info("SolrTrackDB.list: %s %s" %(solr_query_url, query_string))
r = requests.post(url=solr_query_url, data=query_string)
if r.status_code == 200:
response = r.json()['response']
# return hits, if any:
if response['numFound'] > 0:
return response['docs']
else:
return []
else:
raise Exception("Solr returned an error! HTTP %i\n%s" %(r.status_code, r.text))
def get(self, id):
# set solr search terms
solr_query_url = self.kevalsdb_url + '/query'
query_string = {
'q':'id:"{}"'.format(id)
}
# gain tracking_db search response
logger.info("SolrTrackDB.get: %s %s" %(solr_query_url, query_string))
r = requests.post(url=solr_query_url, data=query_string)
if r.status_code == 200:
response = r.json()['response']
# return hits, if any:
if response['numFound'] == 1:
return response['docs'][0]
else:
return None
else:
raise Exception("Solr returned an error! HTTP %i\n%s" %(r.status_code, r.text))
def _send_update(self, post_data):
# Covert the list of docs to JSONLines:
#post_data = ""
#for item in docs:
# post_data += ("%s\n" % json.dumps(item))
# Set up the POST and check it worked
post_headers = {'Content-Type': 'application/json'}
logger.info("SolrTrackDB.update: %s %s" %(self.update_kevalsdb_url, str(post_data)[0:1000]))
r = requests.post(url=self.update_kevalsdb_url, headers=post_headers, json=post_data)
if r.status_code == 200:
response = r.json()
else:
raise Exception("Solr returned an error! HTTP %i\n%s" %(r.status_code, r.text))
def _update_generator(self, ids, field, value, action):
for id in ids:
# Update TrackDB record for records based on ID:
yield { 'id': id, field: { action: value } }
def update(self, ids, field, value, action='add-distinct'):
self.import_items_from(self._update_generator(ids, field, value, action))
| []
| []
| [
"KEVALS_SOLR_URL"
]
| [] | ["KEVALS_SOLR_URL"] | python | 1 | 0 | |
swf/settings.py | # -*- coding:utf-8 -*-
# Copyright (c) 2013, Theo Crevon
# Copyright (c) 2013, Greg Leclercq
#
# See the file LICENSE for copying permission.
from __future__ import unicode_literals
import os
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
def from_stream(stream):
"""Retrieves AWS settings from a stream in INI format.
Example:
>>> from io import StringIO
>>> stream = StringIO('''
...
... [credentials]
... aws_access_key_id=KEY_ID
... aws_secret_access_key=SECRET
...
... [defaults]
... region=eu-west-1
...
... ''')
>>> settings = from_stream(stream)
>>> settings['aws_access_key_id'] == 'KEY_ID'
True
>>> settings['aws_secret_access_key'] == 'SECRET'
True
>>> settings['region'] == 'eu-west-1'
True
>>> stream = StringIO('''
...
... [credentials]
... aws_access_key_id=KEY_ID
... aws_secret_access_key=SECRET
...
... ''')
>>> settings = from_stream(stream)
>>> settings['aws_access_key_id'] == 'KEY_ID'
True
>>> settings['aws_secret_access_key'] == 'SECRET'
True
:param stream: of chars in INI format.
:type stream: stream.
:rtype: dict
..note:: some fields may be None.
"""
config = ConfigParser(allow_no_value=True)
if hasattr(config, 'read_file'):
config.read_file(stream)
else:
config.readfp(stream) # deprecated name
settings = {}
if config.has_section('credentials'):
settings.update({
'aws_access_key_id': config.get('credentials',
'aws_access_key_id'),
'aws_secret_access_key': config.get('credentials',
'aws_secret_access_key')
})
if config.has_section('defaults'):
settings['region'] = config.get('defaults', 'region')
return settings
def from_file(path):
"""Retrieves AWS settings from a file in INI format.
:param path: to file in INI format.
:type path: string.
:rtype: dict
Returns `{}` is there is no file. Let raise the underlying exception if it
cannot load the file (permission denied, file is a directory, etc...)
"""
if not os.path.exists(path):
return {}
with open(path) as stream:
return from_stream(stream)
def from_env():
"""Retrieves AWS settings from environment.
Supported environment variables are:
- `AWS_DEFAULT_REGION`
:rtype: dict
"""
hsh = {}
if "AWS_DEFAULT_REGION" in os.environ:
hsh["region"] = os.environ["AWS_DEFAULT_REGION"]
return hsh
def from_home(path='.swf'):
"""Retrieves settings from home environment
If HOME environment is applicapable, search for any files in *path*.
:rtype: dict
"""
if 'HOME' in os.environ:
swf_path = os.path.join(os.environ['HOME'], path)
return from_file(swf_path)
return {}
def get(path='.swf'):
"""Retrieves settings from a file or the environment.
First, it will try to retrieve settings from a *path* in the user's home
directory. Other it tries to load the settings from the environment.
If both return an empty dict, it will also return a empty dict.
:rtype: dict
"""
return from_home(path) or from_env()
def set(**settings):
"""Set settings"""
from swf.core import SETTINGS
SETTINGS.update({k: v for k, v in settings.items() if v is not None})
def clear():
"""Clear settings"""
from swf.core import SETTINGS
SETTINGS.clear()
| []
| []
| [
"HOME",
"AWS_DEFAULT_REGION"
]
| [] | ["HOME", "AWS_DEFAULT_REGION"] | python | 2 | 0 | |
NAS/auto-keras/inceptiontime.py | import sys
import os
curPath = os.path.abspath(os.path.dirname(__file__))
print(curPath)
rootPath = curPath
for i in range(2):
rootPath = os.path.split(rootPath)[0]
print(rootPath)
sys.path.append(rootPath)
import tensorflow.keras as keras
from tensorflow.keras.utils import to_categorical
from utils.uts_classification.utils import readmts_uci_har,transform_labels
import autokeras as ak
import numpy as np
from NAS.logger import Logger
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
os.system("nvcc --version")
# 加载UCI_HAR_Dataset
file_name = '../../datasets/mts_data/UCI_HAR_Dataset'
x_train, y_train, x_test, y_test = readmts_uci_har(file_name)
data = np.concatenate((x_train, x_test),axis=0)
label = np.concatenate((y_train, y_test),axis=0)
N = data.shape[0]
ind = int(N*0.9)
x_train = data[:ind]
y_train = label[:ind]
x_test = data[ind:]
y_test = label[ind:]
y_train, y_test = transform_labels(y_train, y_test)
NUM_CLASSES = 6
y_train = to_categorical(y_train, NUM_CLASSES)
y_test = to_categorical(y_test, NUM_CLASSES)
# train_number = int(x_train.shape[0]/ 128) * 128
# x_train = x_train[0:train_number]
# y_train = y_train[0:train_number]
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
print(y_train[:3])
# Initialize the classifier.
input_node = ak.Input()
output_node = ak.InceptionTimeBlock()(input_node)
output_node = ak.ClassificationHead()(output_node)
clf = ak.AutoModel(inputs=input_node, outputs=output_node, max_trials=2,
name='UCI_inceptiontime_Greedy_test',directory='nas_result')
# sys.stdout = Logger('nas_result/UCI_inceptiontime_Greedy_test/log', sys.stdout)
# sys.stderr = Logger('nas_result/UCI_inceptiontime_Greedy_test/log_file', sys.stderr) # redirect std err, if necessary
clf.tuner.search_space_summary()
# Search for the best model.
clf.fit(x_train, y_train, epochs=2, validation_split=0.1, batch_size=128, callbacks=[keras.callbacks.EarlyStopping(patience=10)],verbose=1)
clf.tuner.results_summary()
# Evaluate the best model on the testing data.
loss, accuracy, precision, recall, f1 = clf.evaluate(x_test, y_test)
print('*************************----best_model----*************************')
print('loss:', loss)
print('accuracy:', accuracy)
print('precision:', precision)
print('recall:', recall)
print('f1:', f1)
# Evaluate the best 10 models( only a convenience shortcut, recommended to retrain the models)
best_models = clf.tuner.get_best_models(num_models=10)
for i in range(2):
loss, accuracy, precision, recall, f1 = best_models[i][2].evaluate(x_test, y_test)
print('*************************----best_model_'+str(i)+'----*************************')
print('loss:', loss)
print('accuracy:', accuracy)
print('precision:', precision)
print('recall:', recall)
print('f1:', f1)
model = clf.export_model()
model.save('nas_result/UCI_inceptiontime_Greedy_test/bestmodel.h5')
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
components/cli/pkg/util/utils.go | /*
* Copyright (c) 2019 WSO2 Inc. (http:www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http:www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package util
import (
"bufio"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"time"
"github.com/fatih/color"
"github.com/manifoldco/promptui"
"golang.org/x/crypto/ssh/terminal"
"gopkg.in/cheggaaa/pb.v1"
"github.com/cellery-io/sdk/components/cli/pkg/constants"
"github.com/cellery-io/sdk/components/cli/pkg/kubectl"
)
var Bold = color.New(color.Bold).SprintFunc()
var CyanBold = color.New(color.FgCyan).Add(color.Bold).SprintFunc()
var Faint = color.New(color.Faint).SprintFunc()
var Green = color.New(color.FgGreen).SprintfFunc()
var GreenBold = color.New(color.FgGreen).Add(color.Bold).SprintFunc()
var YellowBold = color.New(color.FgYellow).Add(color.Bold).SprintFunc()
var Red = color.New(color.FgRed).Add(color.Bold).SprintFunc()
func PrintWhatsNextMessage(action string, cmd string) {
fmt.Println()
fmt.Println(Bold("What's next?"))
fmt.Println("--------------------------------------------------------")
fmt.Printf("Execute the following command to %s:\n", action)
fmt.Println(" $ " + cmd)
fmt.Println("--------------------------------------------------------")
}
func GetDuration(startTime time.Time) string {
duration := ""
var year, month, day, hour, min, sec int
currentTime := time.Now()
if startTime.Location() != currentTime.Location() {
currentTime = currentTime.In(startTime.Location())
}
if startTime.After(currentTime) {
startTime, currentTime = currentTime, startTime
}
startYear, startMonth, startDay := startTime.Date()
currentYear, currentMonth, currentDay := currentTime.Date()
startHour, startMinute, startSecond := startTime.Clock()
currentHour, currentMinute, currentSecond := currentTime.Clock()
year = int(currentYear - startYear)
month = int(currentMonth - startMonth)
day = int(currentDay - startDay)
hour = int(currentHour - startHour)
min = int(currentMinute - startMinute)
sec = int(currentSecond - startSecond)
// Normalize negative values
if sec < 0 {
sec += 60
min--
}
if min < 0 {
min += 60
hour--
}
if hour < 0 {
hour += 24
day--
}
if day < 0 {
// days in month:
t := time.Date(startYear, startMonth, 32, 0, 0, 0, 0, time.UTC)
day += 32 - t.Day()
month--
}
if month < 0 {
month += 12
year--
}
numOfTimeUnits := 0
if year > 0 {
duration += strconv.Itoa(year) + " years "
numOfTimeUnits++
}
if month > 0 {
duration += strconv.Itoa(month) + " months "
numOfTimeUnits++
}
if day > 0 {
duration += strconv.Itoa(day) + " days "
numOfTimeUnits++
}
if hour > 0 {
duration += strconv.Itoa(hour) + " hours "
numOfTimeUnits++
}
if min > 0 {
duration += strconv.Itoa(min) + " minutes "
numOfTimeUnits++
}
if sec > 0 {
duration += strconv.Itoa(sec) + " seconds"
numOfTimeUnits++
}
return duration
}
func ConvertStringToTime(timeString string) time.Time {
convertedTime, err := time.Parse(time.RFC3339, timeString)
if err != nil {
ExitWithErrorMessage("Error parsing time", err)
}
return convertedTime
}
func UserHomeDir() string {
if runtime.GOOS == "windows" {
home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
if home == "" {
home = os.Getenv("USERPROFILE")
}
return home
}
return os.Getenv("HOME")
}
func UserHomeCelleryDir() string {
return filepath.Join(UserHomeDir(), constants.CELLERY_HOME)
}
func CelleryInstallationDir() string {
celleryHome := ""
if runtime.GOOS == "darwin" {
celleryHome = constants.CELLERY_INSTALLATION_PATH_MAC
}
if runtime.GOOS == "linux" {
celleryHome = constants.CELLERY_INSTALLATION_PATH_UBUNTU
}
return celleryHome
}
func BallerinaInstallationDir() string {
ballerinaHome := ""
if runtime.GOOS == "darwin" {
ballerinaHome = constants.BALLERINA_INSTALLATION_PATH_MAC
}
if runtime.GOOS == "linux" {
ballerinaHome = constants.BALLERINA_INSTALLATION_PATH_UBUNTU
}
return ballerinaHome
}
func ExecuteCommand(cmd *exec.Cmd) error {
stderrReader, _ := cmd.StderrPipe()
stderrScanner := bufio.NewScanner(stderrReader)
go func() {
for stderrScanner.Scan() {
fmt.Println(stderrScanner.Text())
}
}()
err := cmd.Start()
if err != nil {
return err
}
err = cmd.Wait()
if err != nil {
return err
}
return nil
}
// RequestCredentials requests the credentials form the user and returns them
func RequestCredentials(credentialType string, usernameOverride string) (string, string, error) {
fmt.Println()
fmt.Println(YellowBold("?") + " " + credentialType + " credentials required")
var username string
var err error
if usernameOverride == "" {
// Requesting the username from the user
reader := bufio.NewReader(os.Stdin)
fmt.Print("Username: ")
username, err = reader.ReadString('\n')
if err != nil {
return "", "", err
}
} else {
username = usernameOverride
}
// Requesting the password from the user
fmt.Print("Password: ")
bytePassword, err := terminal.ReadPassword(0)
if err != nil {
return username, "", err
}
password := string(bytePassword)
fmt.Println()
return strings.TrimSpace(username), strings.TrimSpace(password), nil
}
// ExitWithErrorMessage prints an error message and exits the command
func ExitWithErrorMessage(message string, err error) {
fmt.Printf("\n\n\x1b[31;1m%s:\x1b[0m %v\n\n", message, err)
os.Exit(1)
}
// PrintSuccessMessage prints the standard command success message
func PrintSuccessMessage(message string) {
fmt.Println()
fmt.Printf("\n%s %s\n", GreenBold("\U00002714"), message)
}
func PrintWarningMessage(message string) {
fmt.Println()
fmt.Printf("%s\n", YellowBold("\U000026A0 "+message))
}
// RunMethodExists checks if the run method exists in ballerina file
func RunMethodExists(sourceFile string) (bool, error) {
sourceFileBytes, err := ioutil.ReadFile(sourceFile)
if err != nil {
return false, err
}
// Check whether run method exists
return regexp.MatchString(
`.*public(\s)+function(\s)+run(\s)*\((s)*cellery:ImageName(\s)+.+(\s)*,(\s)*map<cellery:ImageName>(\s)+.+(\s)*\)(\s)+returns(\s)+error\\?`,
string(sourceFileBytes))
}
// TestMethodExists checks if the test method exists in ballerina file
func TestMethodExists(sourceFile string) (bool, error) {
sourceFileBytes, err := ioutil.ReadFile(sourceFile)
if err != nil {
return false, err
}
// Check whether test method exists
return regexp.MatchString(
`.*public(\s)+function(\s)+test(\s)*\((s)*cellery:ImageName(\s)+.+(\s)*,(\s)*map<cellery:ImageName>(\s)+.+(\s)*\)(\s)+returns(\s)+error\\?`,
string(sourceFileBytes))
}
func ContainsInStringArray(array []string, item string) bool {
for _, element := range array {
if element == item {
return true
}
}
return false
}
func GetYesOrNoFromUser(question string, withBackOption bool) (bool, bool, error) {
var options []string
var isBackSelected = false
if withBackOption {
options = []string{"Yes", "No", constants.CELLERY_SETUP_BACK}
} else {
options = []string{"Yes", "No"}
}
prompt := promptui.Select{
Label: question,
Items: options,
}
_, result, err := prompt.Run()
if result == constants.CELLERY_SETUP_BACK {
isBackSelected = true
}
if err != nil {
return false, isBackSelected, fmt.Errorf("Prompt failed %v\n", err)
}
return result == "Yes", isBackSelected, nil
}
// OpenBrowser opens up the provided URL in a browser
func OpenBrowser(url string) error {
var cmd *exec.Cmd
switch runtime.GOOS {
case "openbsd":
fallthrough
case "linux":
cmd = exec.Command("xdg-open", url)
case "darwin":
cmd = exec.Command("open", url)
case "windows":
r := strings.NewReplacer("&", "^&")
cmd = exec.Command("cmd", "/c", "start", r.Replace(url))
}
if cmd != nil {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Start()
if err != nil {
log.Printf("Failed to open browser due to error %v", err)
return fmt.Errorf("Failed to open browser: " + err.Error())
}
err = cmd.Wait()
if err != nil {
log.Printf("Failed to wait for open browser command to finish due to error %v", err)
return fmt.Errorf("Failed to wait for open browser command to finish: " + err.Error())
}
return nil
} else {
return errors.New("unsupported platform")
}
}
func FormatBytesToString(size int64) string {
return pb.Format(size).To(pb.U_BYTES_DEC).String()
}
func MergeKubeConfig(newConfigFile string) error {
newConf, err := kubectl.ReadConfig(newConfigFile)
if err != nil {
return err
}
confFile, err := kubectl.DefaultConfigFile()
if err != nil {
return err
}
if _, err := os.Stat(confFile); err != nil {
if os.IsNotExist(err) {
// kube-config does not exist. Create a new one
confDir, err := kubectl.DefaultConfigDir()
if err != nil {
return err
}
// Check for .kube directory and create if not present
if _, err := os.Stat(confDir); os.IsNotExist(err) {
err = os.Mkdir(confDir, 0755)
if err != nil {
return err
}
}
return kubectl.WriteConfig(confFile, newConf)
} else {
return err
}
}
oldConf, err := kubectl.ReadConfig(confFile)
if err != nil {
return err
}
merged := kubectl.MergeConfig(oldConf, newConf)
return kubectl.WriteConfig(confFile, merged)
}
func IsCompleteSetupSelected() (bool, bool) {
var isCompleteSelected = false
var isBackSelected = false
cellTemplate := &promptui.SelectTemplates{
Label: "{{ . }}",
Active: "\U000027A4 {{ .| bold }}",
Inactive: " {{ . | faint }}",
Help: Faint("[Use arrow keys]"),
}
cellPrompt := promptui.Select{
Label: YellowBold("?") + " Select the type of runtime",
Items: []string{constants.BASIC, constants.COMPLETE, constants.CELLERY_SETUP_BACK},
Templates: cellTemplate,
}
_, value, err := cellPrompt.Run()
if err != nil {
ExitWithErrorMessage("Failed to select an option: %v", err)
}
if value == constants.CELLERY_SETUP_BACK {
isBackSelected = true
}
if value == constants.COMPLETE {
isCompleteSelected = true
}
return isCompleteSelected, isBackSelected
}
func IsLoadBalancerIngressTypeSelected() (bool, bool) {
var isLoadBalancerSelected = false
var isBackSelected = false
cellTemplate := &promptui.SelectTemplates{
Label: "{{ . }}",
Active: "\U000027A4 {{ .| bold }}",
Inactive: " {{ . | faint }}",
Help: Faint("[Use arrow keys]"),
}
cellPrompt := promptui.Select{
Label: YellowBold("?") + " Select ingress mode",
Items: []string{constants.INGRESS_MODE_NODE_PORT, constants.INGRESS_MODE_LOAD_BALANCER, constants.CELLERY_SETUP_BACK},
Templates: cellTemplate,
}
_, value, err := cellPrompt.Run()
if err != nil {
ExitWithErrorMessage("Failed to select an option: %v", err)
}
if value == constants.CELLERY_SETUP_BACK {
isBackSelected = true
}
if value == constants.INGRESS_MODE_LOAD_BALANCER {
isLoadBalancerSelected = true
}
return isLoadBalancerSelected, isBackSelected
}
func IsCommandAvailable(name string) bool {
cmd := exec.Command("/bin/sh", "-c", "command -v "+name)
if err := cmd.Run(); err != nil {
return false
}
return true
}
func CreateTempExecutableBalFile(file string, action string) (string, error) {
var ballerinaMain = ""
if action == "build" {
ballerinaMain = `
public function main(string action, cellery:ImageName iName, map<cellery:ImageName> instances) returns error? {
return build(iName);
}`
} else if action == "run" {
ballerinaMain = `
public function main(string action, cellery:ImageName iName, map<cellery:ImageName> instances) returns error? {
return run(iName, instances);
}`
} else if action == "test" {
ballerinaMain = `
public function main(string action, cellery:ImageName iName, map<cellery:ImageName> instances) returns error? {
return test(iName, instances);
}`
} else {
return "", errors.New("invalid action:" + action)
}
originalFilePath, _ := filepath.Abs(file)
input, err := ioutil.ReadFile(originalFilePath)
if err != nil {
return "", err
}
var newFileContent = string(input) + ballerinaMain
balFileName := filepath.Base(originalFilePath)
var newFileName = strings.Replace(balFileName, ".bal", "", 1) + "_" + action + ".bal"
originalFileDir := filepath.Dir(originalFilePath)
targetAbs := filepath.Join(originalFileDir, "target")
err = os.Mkdir(targetAbs, 0777)
if err != nil {
return "", err
}
targetFilePath := filepath.Join(targetAbs, newFileName)
err = ioutil.WriteFile(targetFilePath, []byte(newFileContent), 0644)
if err != nil {
return "", err
}
return targetFilePath, nil
}
func ConvertToAlphanumeric(input, replacement string) string {
reg, err := regexp.Compile("[^A-Za-z0-9]+")
if err != nil {
ExitWithErrorMessage("Error making regex", err)
}
processedString := reg.ReplaceAllString(input, replacement)
return processedString
}
| [
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"USERPROFILE\"",
"\"HOME\""
]
| []
| [
"USERPROFILE",
"HOME",
"HOMEPATH",
"HOMEDRIVE"
]
| [] | ["USERPROFILE", "HOME", "HOMEPATH", "HOMEDRIVE"] | go | 4 | 0 | |
script popolamento DB/env/lib/python3.7/site-packages/pip/_internal/utils/logging.py | from __future__ import absolute_import
import contextlib
import errno
import logging
import logging.handlers
import os
import sys
from pip._internal.utils.compat import WINDOWS
from pip._internal.utils.misc import ensure_dir
from pip._vendor.six import PY2
try:
import threading
except ImportError:
import dummy_threading as threading # type: ignore
try:
from pip._vendor import colorama
# Lots of different errors can come from this, including SystemError and
# ImportError.
except Exception:
colorama = None
_log_state = threading.local()
_log_state.indentation = 0
class BrokenStdoutLoggingError(Exception):
"""
Raised if BrokenPipeError occurs for the stdout stream while logging.
"""
pass
# BrokenPipeError does not exist in Python 2 and, in addition, manifests
# differently in Windows and non-Windows.
if WINDOWS:
# In Windows, a broken pipe can show up as EINVAL rather than EPIPE:
# https://bugs.python.org/issue19612
# https://bugs.python.org/issue30418
if PY2:
def _is_broken_pipe_error(exc_class, exc):
"""See the docstring for non-Windows Python 3 below."""
return (exc_class is IOError and
exc.errno in (errno.EINVAL, errno.EPIPE))
else:
# In Windows, a broken pipe IOError became OSError in Python 3.
def _is_broken_pipe_error(exc_class, exc):
"""See the docstring for non-Windows Python 3 below."""
return ((exc_class is BrokenPipeError) or # noqa: F821
(exc_class is OSError and
exc.errno in (errno.EINVAL, errno.EPIPE)))
elif PY2:
def _is_broken_pipe_error(exc_class, exc):
"""See the docstring for non-Windows Python 3 below."""
return (exc_class is IOError and exc.errno == errno.EPIPE)
else:
# Then we are in the non-Windows Python 3 case.
def _is_broken_pipe_error(exc_class, exc):
"""
Return whether an exception is a broken pipe error.
Args:
exc_class: an exception class.
exc: an exception instance.
"""
return (exc_class is BrokenPipeError) # noqa: F821
@contextlib.contextmanager
def indent_log(num=2):
"""
A context manager which will cause the log output to be indented for any
log messages emitted inside it.
"""
_log_state.indentation += num
try:
yield
finally:
_log_state.indentation -= num
def get_indentation():
return getattr(_log_state, 'indentation', 0)
class IndentingFormatter(logging.Formatter):
def __init__(self, *args, **kwargs):
"""
A logging.Formatter obeying containing indent_log contexts.
:param add_timestamp: A bool indicating output lines should be prefixed
with their record's timestamp.
"""
self.add_timestamp = kwargs.pop("add_timestamp", False)
super(IndentingFormatter, self).__init__(*args, **kwargs)
def format(self, record):
"""
Calls the standard formatter, but will indent all of the log messages
by our current indentation level.
"""
formatted = super(IndentingFormatter, self).format(record)
prefix = ''
if self.add_timestamp:
prefix = self.formatTime(record, "%Y-%m-%dT%H:%M:%S ")
prefix += " " * get_indentation()
formatted = "".join([
prefix + line
for line in formatted.splitlines(True)
])
return formatted
def _color_wrap(*colors):
def wrapped(inp):
return "".join(list(colors) + [inp, colorama.Style.RESET_ALL])
return wrapped
class ColorizedStreamHandler(logging.StreamHandler):
# Don't build up a list of colors if we don't have colorama
if colorama:
COLORS = [
# This needs to be in order from highest logging level to lowest.
(logging.ERROR, _color_wrap(colorama.Fore.RED)),
(logging.WARNING, _color_wrap(colorama.Fore.YELLOW)),
]
else:
COLORS = []
def __init__(self, stream=None, no_color=None):
logging.StreamHandler.__init__(self, stream)
self._no_color = no_color
if WINDOWS and colorama:
self.stream = colorama.AnsiToWin32(self.stream)
def _using_stdout(self):
"""
Return whether the handler is using sys.stdout.
"""
if WINDOWS and colorama:
# Then self.stream is an AnsiToWin32 object.
return self.stream.wrapped is sys.stdout
return self.stream is sys.stdout
def should_color(self):
# Don't colorize things if we do not have colorama or if told not to
if not colorama or self._no_color:
return False
real_stream = (
self.stream if not isinstance(self.stream, colorama.AnsiToWin32)
else self.stream.wrapped
)
# If the stream is a tty we should color it
if hasattr(real_stream, "isatty") and real_stream.isatty():
return True
# If we have an ANSI term we should color it
if os.environ.get("TERM") == "ANSI":
return True
# If anything else we should not color it
return False
def format(self, record):
msg = logging.StreamHandler.format(self, record)
if self.should_color():
for level, color in self.COLORS:
if record.levelno >= level:
msg = color(msg)
break
return msg
# The logging module says handleError() can be customized.
def handleError(self, record):
exc_class, exc = sys.exc_info()[:2]
# If a broken pipe occurred while calling write() or flush() on the
# stdout stream in logging's Handler.emit(), then raise our special
# exception so we can handle it in main() instead of logging the
# broken pipe error and continuing.
if (exc_class and self._using_stdout() and
_is_broken_pipe_error(exc_class, exc)):
raise BrokenStdoutLoggingError()
return super(ColorizedStreamHandler, self).handleError(record)
class BetterRotatingFileHandler(logging.handlers.RotatingFileHandler):
def _open(self):
ensure_dir(os.path.dirname(self.baseFilename))
return logging.handlers.RotatingFileHandler._open(self)
class MaxLevelFilter(logging.Filter):
def __init__(self, level):
self.level = level
def filter(self, record):
return record.levelno < self.level
def setup_logging(verbosity, no_color, user_log_file):
"""Configures and sets up all of the logging
Returns the requested logging level, as its integer value.
"""
# Determine the level to be logging at.
if verbosity >= 1:
level = "DEBUG"
elif verbosity == -1:
level = "WARNING"
elif verbosity == -2:
level = "ERROR"
elif verbosity <= -3:
level = "CRITICAL"
else:
level = "INFO"
level_number = getattr(logging, level)
# The "root" logger should match the "console" level *unless* we also need
# to log to a user log file.
include_user_log = user_log_file is not None
if include_user_log:
additional_log_file = user_log_file
root_level = "DEBUG"
else:
additional_log_file = "/dev/null"
root_level = level
# Disable any logging besides WARNING unless we have DEBUG level logging
# enabled for vendored libraries.
vendored_log_level = "WARNING" if level in ["INFO", "ERROR"] else "DEBUG"
# Shorthands for clarity
log_streams = {
"stdout": "ext://sys.stdout",
"stderr": "ext://sys.stderr",
}
handler_classes = {
"stream": "pip._internal.utils.logging.ColorizedStreamHandler",
"file": "pip._internal.utils.logging.BetterRotatingFileHandler",
}
logging.config.dictConfig({
"version": 1,
"disable_existing_loggers": False,
"filters": {
"exclude_warnings": {
"()": "pip._internal.utils.logging.MaxLevelFilter",
"level": logging.WARNING,
},
},
"formatters": {
"indent": {
"()": IndentingFormatter,
"format": "%(message)s",
},
"indent_with_timestamp": {
"()": IndentingFormatter,
"format": "%(message)s",
"add_timestamp": True,
},
},
"handlers": {
"console": {
"level": level,
"class": handler_classes["stream"],
"no_color": no_color,
"stream": log_streams["stdout"],
"filters": ["exclude_warnings"],
"formatter": "indent",
},
"console_errors": {
"level": "WARNING",
"class": handler_classes["stream"],
"no_color": no_color,
"stream": log_streams["stderr"],
"formatter": "indent",
},
"user_log": {
"level": "DEBUG",
"class": handler_classes["file"],
"filename": additional_log_file,
"delay": True,
"formatter": "indent_with_timestamp",
},
},
"root": {
"level": root_level,
"handlers": ["console", "console_errors"] + (
["user_log"] if include_user_log else []
),
},
"loggers": {
"pip._vendor": {
"level": vendored_log_level
}
},
})
return level_number
| []
| []
| [
"TERM"
]
| [] | ["TERM"] | python | 1 | 0 | |
tests/base/test_context.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from itertools import chain
import os
from os.path import join, abspath
from tempfile import gettempdir
from unittest import TestCase, mock
import pytest
from conda.auxlib.collection import AttrDict
from conda.auxlib.ish import dals
from conda._vendor.toolz.itertoolz import concat
from conda.base.constants import PathConflict, ChannelPriority
from conda.base.context import context, reset_context, conda_tests_ctxt_mgmt_def_pol
from conda.common.compat import odict, iteritems
from conda.common.configuration import ValidationError, YamlRawParameter
from conda.common.io import env_var, env_vars
from conda.common.path import expand, win_path_backout
from conda.common.url import join_url, path_to_url
from conda.common.serialize import yaml_round_trip_load
from conda.core.package_cache_data import PackageCacheData
from conda.gateways.disk.create import mkdir_p, create_package_cache_directory
from conda.gateways.disk.delete import rm_rf
from conda.gateways.disk.permissions import make_read_only
from conda.gateways.disk.update import touch
from conda.models.channel import Channel
from conda.models.match_spec import MatchSpec
from conda.utils import on_win
from ..helpers import tempdir
class ContextCustomRcTests(TestCase):
def setUp(self):
string = dals("""
custom_channels:
darwin: https://some.url.somewhere/stuff
chuck: http://another.url:8080/with/path
custom_multichannels:
michele:
- https://do.it.with/passion
- learn_from_every_thing
steve:
- more-downloads
migrated_custom_channels:
darwin: s3://just/cant
chuck: file:///var/lib/repo/
migrated_channel_aliases:
- https://conda.anaconda.org
channel_alias: ftp://new.url:8082
conda-build:
root-dir: /some/test/path
proxy_servers:
http: http://user:[email protected]:8080
https: none
ftp:
sftp: ''
ftps: false
rsync: 'false'
aggressive_update_packages: []
channel_priority: false
""")
reset_context(())
rd = odict(testdata=YamlRawParameter.make_raw_parameters('testdata', yaml_round_trip_load(string)))
context._set_raw_data(rd)
def tearDown(self):
reset_context()
def test_migrated_custom_channels(self):
assert Channel('https://some.url.somewhere/stuff/darwin/noarch/a-mighty-fine.tar.bz2').canonical_name == 'darwin'
assert Channel('s3://just/cant/darwin/noarch/a-mighty-fine.tar.bz2').canonical_name == 'darwin'
assert Channel('s3://just/cant/darwin/noarch/a-mighty-fine.tar.bz2').urls() == [
'https://some.url.somewhere/stuff/darwin/noarch']
def test_old_channel_alias(self):
platform = context.subdir
cf_urls = ["ftp://new.url:8082/conda-forge/%s" % platform,
"ftp://new.url:8082/conda-forge/noarch"]
assert Channel('conda-forge').urls() == cf_urls
url = "https://conda.anaconda.org/conda-forge/osx-64/some-great-package.tar.bz2"
assert Channel(url).canonical_name == 'conda-forge'
assert Channel(url).base_url == 'ftp://new.url:8082/conda-forge'
assert Channel(url).urls() == [
'ftp://new.url:8082/conda-forge/osx-64',
'ftp://new.url:8082/conda-forge/noarch'
]
assert Channel("https://conda.anaconda.org/conda-forge/label/dev/linux-64/"
"some-great-package.tar.bz2").urls() == [
"ftp://new.url:8082/conda-forge/label/dev/linux-64",
"ftp://new.url:8082/conda-forge/label/dev/noarch",
]
def test_signing_metadata_url_base(self):
SIGNING_URL_BASE = "https://conda.example.com/pkgs"
string = f"signing_metadata_url_base: {SIGNING_URL_BASE}"
reset_context()
rd = odict(testdata=YamlRawParameter.make_raw_parameters('testdata', yaml_round_trip_load(string)))
context._set_raw_data(rd)
assert context.signing_metadata_url_base == SIGNING_URL_BASE
def test_signing_metadata_url_base_empty_default_channels(self):
string = dals("""
default_channels: []
""")
reset_context()
rd = odict(testdata=YamlRawParameter.make_raw_parameters('testdata', yaml_round_trip_load(string)))
context._set_raw_data(rd)
assert len(context.default_channels) is 0
assert context.signing_metadata_url_base is None
def test_client_ssl_cert(self):
string = dals("""
client_ssl_cert_key: /some/key/path
""")
reset_context()
rd = odict(testdata=YamlRawParameter.make_raw_parameters('testdata', yaml_round_trip_load(string)))
context._set_raw_data(rd)
pytest.raises(ValidationError, context.validate_configuration)
def test_conda_envs_path(self):
saved_envs_path = os.environ.get('CONDA_ENVS_PATH')
beginning = "C:" + os.sep if on_win else os.sep
path1 = beginning + os.sep.join(['my', 'envs', 'dir', '1'])
path2 = beginning + os.sep.join(['my', 'envs', 'dir', '2'])
try:
os.environ['CONDA_ENVS_PATH'] = path1
reset_context()
assert context.envs_dirs[0] == path1
os.environ['CONDA_ENVS_PATH'] = os.pathsep.join([path1, path2])
reset_context()
assert context.envs_dirs[0] == path1
assert context.envs_dirs[1] == path2
finally:
if saved_envs_path:
os.environ['CONDA_ENVS_PATH'] = saved_envs_path
else:
del os.environ['CONDA_ENVS_PATH']
def test_conda_bld_path(self):
conda_bld_path = join(gettempdir(), 'conda-bld')
conda_bld_url = path_to_url(conda_bld_path)
try:
mkdir_p(conda_bld_path)
with env_var('CONDA_BLD_PATH', conda_bld_path, stack_callback=conda_tests_ctxt_mgmt_def_pol):
assert len(context.conda_build_local_paths) >= 1
assert context.conda_build_local_paths[0] == conda_bld_path
channel = Channel('local')
assert channel.channel_name == "local"
assert channel.channel_location is None
assert channel.platform is None
assert channel.package_filename is None
assert channel.auth is None
assert channel.token is None
assert channel.scheme is None
assert channel.canonical_name == "local"
assert channel.url() is None
urls = list(concat((
join_url(url, context.subdir),
join_url(url, 'noarch'),
) for url in context.conda_build_local_urls))
assert channel.urls() == urls
channel = Channel(conda_bld_url)
assert channel.canonical_name == "local"
assert channel.platform is None
assert channel.package_filename is None
assert channel.auth is None
assert channel.token is None
assert channel.scheme == "file"
assert channel.urls() == [
join_url(conda_bld_url, context.subdir),
join_url(conda_bld_url, 'noarch'),
]
assert channel.url() == join_url(conda_bld_url, context.subdir)
assert channel.channel_name.lower() == win_path_backout(conda_bld_path).lstrip('/').lower()
assert channel.channel_location == '' # location really is an empty string; all path information is in channel_name
assert channel.canonical_name == "local"
finally:
rm_rf(conda_bld_path)
def test_custom_multichannels(self):
assert context.custom_multichannels['michele'] == (
Channel('passion'),
Channel('learn_from_every_thing'),
)
def test_restore_free_channel(self):
assert 'https://repo.anaconda.com/pkgs/free' not in context.default_channels
with env_var("CONDA_RESTORE_FREE_CHANNEL", 'true', stack_callback=conda_tests_ctxt_mgmt_def_pol):
assert context.default_channels.index('https://repo.anaconda.com/pkgs/free') == 1
def test_proxy_servers(self):
assert context.proxy_servers['http'] == 'http://user:[email protected]:8080'
assert context.proxy_servers['https'] is None
assert context.proxy_servers['ftp'] is None
assert context.proxy_servers['sftp'] == ''
assert context.proxy_servers['ftps'] == 'False'
assert context.proxy_servers['rsync'] == 'false'
def test_conda_build_root_dir(self):
assert context.conda_build['root-dir'] == "/some/test/path"
def test_clobber_enum(self):
with env_var("CONDA_PATH_CONFLICT", 'prevent', stack_callback=conda_tests_ctxt_mgmt_def_pol):
assert context.path_conflict == PathConflict.prevent
def test_context_parameter_map(self):
all_parameter_names = context.list_parameters()
all_mapped_parameter_names = tuple(chain.from_iterable(context.category_map.values()))
unmapped_parameter_names = set(all_parameter_names) - set(all_mapped_parameter_names)
assert not unmapped_parameter_names, unmapped_parameter_names
assert len(all_parameter_names) == len(all_mapped_parameter_names)
def test_context_parameters_have_descriptions(self):
skip_categories = ('CLI-only', 'Hidden and Undocumented')
documented_parameter_names = chain.from_iterable((
parameter_names for category, parameter_names in iteritems(context.category_map)
if category not in skip_categories
))
from pprint import pprint
for name in documented_parameter_names:
description = context.get_descriptions()[name]
pprint(context.describe_parameter(name))
def test_local_build_root_custom_rc(self):
assert context.local_build_root == abspath("/some/test/path")
test_path_1 = join(os.getcwd(), 'test_path_1')
with env_var("CONDA_CROOT", test_path_1, stack_callback=conda_tests_ctxt_mgmt_def_pol):
assert context.local_build_root == test_path_1
test_path_2 = join(os.getcwd(), 'test_path_2')
with env_var("CONDA_BLD_PATH", test_path_2, stack_callback=conda_tests_ctxt_mgmt_def_pol):
assert context.local_build_root == test_path_2
def test_default_target_is_root_prefix(self):
assert context.target_prefix == context.root_prefix
def test_target_prefix(self):
with tempdir() as prefix:
mkdir_p(join(prefix, 'first', 'envs'))
mkdir_p(join(prefix, 'second', 'envs'))
create_package_cache_directory(join(prefix, 'first', 'pkgs'))
create_package_cache_directory(join(prefix, 'second', 'pkgs'))
envs_dirs = (join(prefix, 'first', 'envs'), join(prefix, 'second', 'envs'))
with env_var('CONDA_ENVS_DIRS', os.pathsep.join(envs_dirs), stack_callback=conda_tests_ctxt_mgmt_def_pol):
# with both dirs writable, choose first
reset_context((), argparse_args=AttrDict(name='blarg', func='create'))
assert context.target_prefix == join(envs_dirs[0], 'blarg')
# with first dir read-only, choose second
PackageCacheData._cache_.clear()
make_read_only(join(envs_dirs[0], '.conda_envs_dir_test'))
reset_context((), argparse_args=AttrDict(name='blarg', func='create'))
assert context.target_prefix == join(envs_dirs[1], 'blarg')
# if first dir is read-only but environment exists, choose first
PackageCacheData._cache_.clear()
mkdir_p(join(envs_dirs[0], 'blarg'))
touch(join(envs_dirs[0], 'blarg', 'history'))
reset_context((), argparse_args=AttrDict(name='blarg', func='create'))
assert context.target_prefix == join(envs_dirs[0], 'blarg')
def test_aggressive_update_packages(self):
assert context.aggressive_update_packages == tuple()
specs = ['certifi', 'openssl>=1.1']
with env_var('CONDA_AGGRESSIVE_UPDATE_PACKAGES', ','.join(specs), stack_callback=conda_tests_ctxt_mgmt_def_pol):
assert context.aggressive_update_packages == tuple(MatchSpec(s) for s in specs)
def test_channel_priority(self):
assert context.channel_priority == ChannelPriority.DISABLED
def test_cuda_detection(self):
# confirm that CUDA detection doesn't raise exception
version = context.cuda_version
assert version is None or isinstance(version, str)
def test_cuda_override(self):
with env_var('CONDA_OVERRIDE_CUDA', '4.5'):
version = context.cuda_version
assert version == '4.5'
def test_cuda_override_none(self):
with env_var('CONDA_OVERRIDE_CUDA', ''):
version = context.cuda_version
assert version is None
def test_threads(self):
default_value = None
assert context.default_threads == default_value
assert context.repodata_threads == default_value
assert context.verify_threads == 1
assert context.execute_threads == 1
with env_var('CONDA_DEFAULT_THREADS', '3',
stack_callback=conda_tests_ctxt_mgmt_def_pol):
assert context.default_threads == 3
assert context.verify_threads == 3
assert context.repodata_threads == 3
assert context.execute_threads == 3
with env_var('CONDA_VERIFY_THREADS', '3',
stack_callback=conda_tests_ctxt_mgmt_def_pol):
assert context.default_threads == default_value
assert context.verify_threads == 3
assert context.repodata_threads == default_value
assert context.execute_threads == 1
with env_var('CONDA_REPODATA_THREADS', '3',
stack_callback=conda_tests_ctxt_mgmt_def_pol):
assert context.default_threads == default_value
assert context.verify_threads == 1
assert context.repodata_threads == 3
assert context.execute_threads == 1
with env_var('CONDA_EXECUTE_THREADS', '3',
stack_callback=conda_tests_ctxt_mgmt_def_pol):
assert context.default_threads == default_value
assert context.verify_threads == 1
assert context.repodata_threads == default_value
assert context.execute_threads == 3
with env_vars({'CONDA_EXECUTE_THREADS': '3',
'CONDA_DEFAULT_THREADS': '1'},
stack_callback=conda_tests_ctxt_mgmt_def_pol):
assert context.default_threads == 1
assert context.verify_threads == 1
assert context.repodata_threads == 1
assert context.execute_threads == 3
def test_channels_defaults(self):
"""
Test when no channels provided in cli
"""
reset_context(())
assert context.channels == ('defaults',)
def test_channels_defaults_condarc(self):
"""
Test when no channels provided in cli, but some in condarc
"""
reset_context(())
string = dals("""
channels: ['defaults', 'conda-forge']
""")
rd = odict(testdata=YamlRawParameter.make_raw_parameters('testdata', yaml_round_trip_load(string)))
context._set_raw_data(rd)
assert context.channels == ('defaults', 'conda-forge')
def test_specify_channels_cli_adding_defaults_no_condarc(self):
"""
When the channel haven't been specified in condarc, 'defaults'
should be present when specifying channel in the cli
"""
reset_context((), argparse_args=AttrDict(channel=['conda-forge']))
assert context.channels == ('conda-forge', 'defaults')
def test_specify_channels_cli_condarc(self):
"""
When the channel have been specified in condarc, these channels
should be used along with the one specified
"""
reset_context((), argparse_args=AttrDict(channel=['conda-forge']))
string = dals("""
channels: ['defaults', 'conda-forge']
""")
rd = odict(testdata=YamlRawParameter.make_raw_parameters('testdata', yaml_round_trip_load(string)))
context._set_raw_data(rd)
assert context.channels == ('defaults', 'conda-forge')
def test_specify_different_channels_cli_condarc(self):
"""
When the channel have been specified in condarc, these channels
should be used along with the one specified
In this test, the given channel in cli is different from condarc
'defaults' should not be added
"""
reset_context((), argparse_args=AttrDict(channel=['other']))
string = dals("""
channels: ['conda-forge']
""")
rd = odict(testdata=YamlRawParameter.make_raw_parameters('testdata', yaml_round_trip_load(string)))
context._set_raw_data(rd)
assert context.channels == ('conda-forge', 'other')
def test_specify_same_channels_cli_as_in_condarc(self):
"""
When the channel have been specified in condarc, these channels
should be used along with the one specified
In this test, the given channel in cli is the same as in condarc
'defaults' should not be added
See https://github.com/conda/conda/issues/10732
"""
reset_context((), argparse_args=AttrDict(channel=['conda-forge']))
string = dals("""
channels: ['conda-forge']
""")
rd = odict(testdata=YamlRawParameter.make_raw_parameters('testdata', yaml_round_trip_load(string)))
context._set_raw_data(rd)
assert context.channels == ('conda-forge',)
def test_expandvars(self):
"""
Environment variables should be expanded in settings that have expandvars=True.
"""
def _get_expandvars_context(attr, config_expr, env_value):
with mock.patch.dict(os.environ, {"TEST_VAR": env_value}):
reset_context(())
string = f"{attr}: {config_expr}"
rd = odict(testdata=YamlRawParameter.make_raw_parameters('testdata', yaml_round_trip_load(string)))
context._set_raw_data(rd)
return getattr(context, attr)
ssl_verify = _get_expandvars_context("ssl_verify", "${TEST_VAR}", "yes")
assert ssl_verify
for attr, env_value in [
("client_ssl_cert", "foo"),
("client_ssl_cert_key", "foo"),
("channel_alias", "http://foo"),
]:
value = _get_expandvars_context(attr, "${TEST_VAR}", env_value)
assert value == env_value
for attr in [
"migrated_custom_channels",
"proxy_servers",
]:
value = _get_expandvars_context("proxy_servers", "{'x': '${TEST_VAR}'}", "foo")
assert value == {"x": "foo"}
for attr in [
"channels",
"default_channels",
"whitelist_channels",
]:
value = _get_expandvars_context(attr, "['${TEST_VAR}']", "foo")
assert value == ("foo",)
custom_channels = _get_expandvars_context("custom_channels", "{'x': '${TEST_VAR}'}", "http://foo")
assert custom_channels["x"].location == "foo"
custom_multichannels = _get_expandvars_context("custom_multichannels", "{'x': ['${TEST_VAR}']}", "http://foo")
assert len(custom_multichannels["x"]) == 1
assert custom_multichannels["x"][0].location == "foo"
envs_dirs = _get_expandvars_context("envs_dirs", "['${TEST_VAR}']", "/foo")
assert any("foo" in d for d in envs_dirs)
pkgs_dirs = _get_expandvars_context("pkgs_dirs", "['${TEST_VAR}']", "/foo")
assert any("foo" in d for d in pkgs_dirs)
class ContextDefaultRcTests(TestCase):
def test_subdirs(self):
assert context.subdirs == (context.subdir, 'noarch')
subdirs = ('linux-highest', 'linux-64', 'noarch')
with env_var('CONDA_SUBDIRS', ','.join(subdirs), stack_callback=conda_tests_ctxt_mgmt_def_pol):
assert context.subdirs == subdirs
def test_local_build_root_default_rc(self):
if context.root_writable:
assert context.local_build_root == join(context.root_prefix, 'conda-bld')
else:
assert context.local_build_root == expand('~/conda-bld')
| []
| []
| [
"CONDA_ENVS_PATH"
]
| [] | ["CONDA_ENVS_PATH"] | python | 1 | 0 | |
api/client/info.go | package client
import (
"fmt"
"os"
"time"
log "github.com/Sirupsen/logrus"
"github.com/docker/docker/engine"
flag "github.com/docker/docker/pkg/mflag"
"github.com/docker/docker/pkg/units"
"github.com/docker/docker/utils"
)
// 'docker info': display system-wide information.
func (cli *DockerCli) CmdInfo(args ...string) error {
cmd := cli.Subcmd("info", "", "Display system-wide information", true)
cmd.Require(flag.Exact, 0)
utils.ParseFlags(cmd, args, false)
body, _, err := readBody(cli.call("GET", "/info", nil, false))
if err != nil {
return err
}
out := engine.NewOutput()
remoteInfo, err := out.AddEnv()
if err != nil {
return err
}
if _, err := out.Write(body); err != nil {
log.Errorf("Error reading remote info: %s", err)
return err
}
out.Close()
if remoteInfo.Exists("Containers") {
fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers"))
}
if remoteInfo.Exists("Images") {
fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images"))
}
if remoteInfo.Exists("Driver") {
fmt.Fprintf(cli.out, "Storage Driver: %s\n", remoteInfo.Get("Driver"))
}
if remoteInfo.Exists("DriverStatus") {
var driverStatus [][2]string
if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil {
return err
}
for _, pair := range driverStatus {
fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1])
}
}
if remoteInfo.Exists("ExecutionDriver") {
fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver"))
}
if remoteInfo.Exists("KernelVersion") {
fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion"))
}
if remoteInfo.Exists("OperatingSystem") {
fmt.Fprintf(cli.out, "Operating System: %s\n", remoteInfo.Get("OperatingSystem"))
}
if remoteInfo.Exists("NCPU") {
fmt.Fprintf(cli.out, "CPUs: %d\n", remoteInfo.GetInt("NCPU"))
}
if remoteInfo.Exists("MemTotal") {
fmt.Fprintf(cli.out, "Total Memory: %s\n", units.BytesSize(float64(remoteInfo.GetInt64("MemTotal"))))
}
if remoteInfo.Exists("Name") {
fmt.Fprintf(cli.out, "Name: %s\n", remoteInfo.Get("Name"))
}
if remoteInfo.Exists("ID") {
fmt.Fprintf(cli.out, "ID: %s\n", remoteInfo.Get("ID"))
}
if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" {
if remoteInfo.Exists("Debug") {
fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug"))
}
fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "")
if remoteInfo.Exists("NFd") {
fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd"))
}
if remoteInfo.Exists("NGoroutines") {
fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines"))
}
if remoteInfo.Exists("SystemTime") {
t, err := remoteInfo.GetTime("SystemTime")
if err != nil {
log.Errorf("Error reading system time: %v", err)
} else {
fmt.Fprintf(cli.out, "System Time: %s\n", t.Format(time.UnixDate))
}
}
if remoteInfo.Exists("NEventsListener") {
fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener"))
}
if initSha1 := remoteInfo.Get("InitSha1"); initSha1 != "" {
fmt.Fprintf(cli.out, "Init SHA1: %s\n", initSha1)
}
if initPath := remoteInfo.Get("InitPath"); initPath != "" {
fmt.Fprintf(cli.out, "Init Path: %s\n", initPath)
}
if root := remoteInfo.Get("DockerRootDir"); root != "" {
fmt.Fprintf(cli.out, "Docker Root Dir: %s\n", root)
}
}
if remoteInfo.Exists("HttpProxy") {
fmt.Fprintf(cli.out, "Http Proxy: %s\n", remoteInfo.Get("HttpProxy"))
}
if remoteInfo.Exists("HttpsProxy") {
fmt.Fprintf(cli.out, "Https Proxy: %s\n", remoteInfo.Get("HttpsProxy"))
}
if remoteInfo.Exists("NoProxy") {
fmt.Fprintf(cli.out, "No Proxy: %s\n", remoteInfo.Get("NoProxy"))
}
if len(remoteInfo.GetList("IndexServerAddress")) != 0 {
cli.LoadConfigFile()
u := cli.configFile.Configs[remoteInfo.Get("IndexServerAddress")].Username
if len(u) > 0 {
fmt.Fprintf(cli.out, "Username: %v\n", u)
fmt.Fprintf(cli.out, "Registry: %v\n", remoteInfo.GetList("IndexServerAddress"))
}
}
if remoteInfo.Exists("MemoryLimit") && !remoteInfo.GetBool("MemoryLimit") {
fmt.Fprintf(cli.err, "WARNING: No memory limit support\n")
}
if remoteInfo.Exists("SwapLimit") && !remoteInfo.GetBool("SwapLimit") {
fmt.Fprintf(cli.err, "WARNING: No swap limit support\n")
}
if remoteInfo.Exists("IPv4Forwarding") && !remoteInfo.GetBool("IPv4Forwarding") {
fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n")
}
if remoteInfo.Exists("Labels") {
fmt.Fprintln(cli.out, "Labels:")
for _, attribute := range remoteInfo.GetList("Labels") {
fmt.Fprintf(cli.out, " %s\n", attribute)
}
}
return nil
}
| [
"\"DEBUG\"",
"\"DEBUG\""
]
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | go | 1 | 0 | |
go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go | /*
Copyright 2022 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vreplsuite
import (
"context"
"flag"
"fmt"
"os"
"path"
"regexp"
"strings"
"testing"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/schemadiff"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/test/endtoend/cluster"
"vitess.io/vitess/go/test/endtoend/onlineddl"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
clusterInstance *cluster.LocalProcessCluster
vtParams mysql.ConnParams
evaluatedMysqlParams *mysql.ConnParams
hostname = "localhost"
keyspaceName = "ks"
cell = "zone1"
schemaChangeDirectory = ""
tableName = `onlineddl_test`
eventName = `onlineddl_test`
)
const (
testDataPath = "../../onlineddl/vrepl_suite/testdata"
defaultSQLMode = "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION"
)
type testTableSchema struct {
testName string
tableSchema string
}
var (
fromTestTableSchemas []*testTableSchema
toTestTableSchemas []*testTableSchema
autoIncrementRegexp = regexp.MustCompile(`(?i) auto_increment[\s]*[=]?[\s]*([0-9]+)`)
)
func TestMain(m *testing.M) {
defer cluster.PanicHandler(nil)
flag.Parse()
exitcode, err := func() (int, error) {
clusterInstance = cluster.NewCluster(cell, hostname)
schemaChangeDirectory = path.Join("/tmp", fmt.Sprintf("schema_change_dir_%d", clusterInstance.GetAndReserveTabletUID()))
defer os.RemoveAll(schemaChangeDirectory)
defer clusterInstance.Teardown()
if _, err := os.Stat(schemaChangeDirectory); os.IsNotExist(err) {
_ = os.Mkdir(schemaChangeDirectory, 0700)
}
clusterInstance.VtctldExtraArgs = []string{
"-schema_change_dir", schemaChangeDirectory,
"-schema_change_controller", "local",
"-schema_change_check_interval", "1",
}
clusterInstance.VtTabletExtraArgs = []string{
"-enable-lag-throttler",
"-throttle_threshold", "1s",
"-heartbeat_enable",
"-heartbeat_interval", "250ms",
"-migration_check_interval", "5s",
}
if err := clusterInstance.StartTopo(); err != nil {
return 1, err
}
// Start keyspace
keyspace := &cluster.Keyspace{
Name: keyspaceName,
}
// No need for replicas in this stress test
if err := clusterInstance.StartKeyspace(*keyspace, []string{"1"}, 0, false); err != nil {
return 1, err
}
vtgateInstance := clusterInstance.NewVtgateInstance()
// set the gateway we want to use
vtgateInstance.GatewayImplementation = "tabletgateway"
// Start vtgate
if err := vtgateInstance.Setup(); err != nil {
return 1, err
}
// ensure it is torn down during cluster TearDown
clusterInstance.VtgateProcess = *vtgateInstance
vtParams = mysql.ConnParams{
Host: clusterInstance.Hostname,
Port: clusterInstance.VtgateMySQLPort,
}
return m.Run(), nil
}()
if err != nil {
fmt.Printf("%v\n", err)
os.Exit(1)
} else {
os.Exit(exitcode)
}
}
func TestSchemaChange(t *testing.T) {
defer cluster.PanicHandler(t)
shards := clusterInstance.Keyspaces[0].Shards
require.Equal(t, 1, len(shards))
files, err := os.ReadDir(testDataPath)
require.NoError(t, err)
for _, f := range files {
if !f.IsDir() {
continue
}
// this is a test!
t.Run(f.Name(), func(t *testing.T) {
testSingle(t, f.Name())
})
}
}
func readTestFile(t *testing.T, testName string, fileName string) (content string, exists bool) {
filePath := path.Join(testDataPath, testName, fileName)
_, err := os.Stat(filePath)
if os.IsNotExist(err) {
return "", false
}
require.NoError(t, err)
b, err := os.ReadFile(filePath)
require.NoError(t, err)
return strings.TrimSpace(string(b)), true
}
// testSingle is the main testing function for a single test in the suite.
// It prepares the grounds, creates the test data, runs a migration, expects results/error, cleans up.
func testSingle(t *testing.T, testName string) {
if ignoreVersions, exists := readTestFile(t, testName, "ignore_versions"); exists {
// ignoreVersions is a regexp
re, err := regexp.Compile(ignoreVersions)
require.NoError(t, err)
rs := mysqlExec(t, "select @@version as ver", "")
row := rs.Named().Row()
require.NotNil(t, row)
mysqlVersion := row["ver"].ToString()
if re.MatchString(mysqlVersion) {
t.Skipf("Skipping test due to ignore_versions=%s", ignoreVersions)
return
}
}
if _, exists := readTestFile(t, testName, "expect_query_failure"); exists {
// VTGate failure is expected!
// irrelevant to this suite.
// We only want to test actual migrations
t.Skip("expect_query_failure found. Irrelevant to this suite")
return
}
if _, exists := readTestFile(t, testName, "expect_failure"); exists {
// irrelevant to this suite.
// We only want to test actual migrations
t.Skip("expect_failure found. Irrelevant to this suite")
return
}
sqlModeQuery := fmt.Sprintf("set @@global.sql_mode='%s'", defaultSQLMode)
_ = mysqlExec(t, sqlModeQuery, "")
_ = mysqlExec(t, "set @@global.event_scheduler=0", "")
_ = mysqlExec(t, fmt.Sprintf("drop table if exists %s", tableName), "")
_ = mysqlExec(t, fmt.Sprintf("drop event if exists %s", eventName), "")
var fromCreateTable string
var toCreateTable string
{
// create
f := "create.sql"
_, exists := readTestFile(t, testName, f)
require.True(t, exists)
onlineddl.MysqlClientExecFile(t, mysqlParams(), testDataPath, testName, f)
// ensure test table has been created:
// read the create statement
fromCreateTable = getCreateTableStatement(t, tableName)
require.NotEmpty(t, fromCreateTable)
}
defer func() {
// destroy
f := "destroy.sql"
if _, exists := readTestFile(t, testName, f); exists {
onlineddl.MysqlClientExecFile(t, mysqlParams(), testDataPath, testName, f)
}
}()
// Run test
alterClause := "engine=innodb"
if content, exists := readTestFile(t, testName, "alter"); exists {
alterClause = content
}
alterStatement := fmt.Sprintf("alter table %s %s", tableName, alterClause)
// Run the DDL!
onlineddl.VtgateExecQuery(t, &vtParams, alterStatement, "")
// migration is complete
// read the table structure of modified table:
toCreateTable = getCreateTableStatement(t, tableName)
require.NotEmpty(t, toCreateTable)
if content, exists := readTestFile(t, testName, "expect_table_structure"); exists {
assert.Contains(t, toCreateTable, content, "expected SHOW CREATE TABLE to contain text in 'expect_table_structure' file")
}
fromTestTableSchemas = append(fromTestTableSchemas, &testTableSchema{
testName: testName,
tableSchema: fromCreateTable,
})
toTestTableSchemas = append(fromTestTableSchemas, &testTableSchema{
testName: testName,
tableSchema: toCreateTable,
})
hints := &schemadiff.DiffHints{}
if strings.Contains(alterClause, "AUTO_INCREMENT") {
hints.AutoIncrementStrategy = schemadiff.AutoIncrementApplyAlways
}
t.Run("validate diff", func(t *testing.T) {
validateDiff(t, fromCreateTable, toCreateTable, hints)
})
}
// func TestRandomSchemaChanges(t *testing.T) {
// defer cluster.PanicHandler(t)
// hints := &schemadiff.DiffHints{AutoIncrementStrategy: schemadiff.AutoIncrementIgnore}
// // count := 20
// // for i := 0; i < count; i++ {
// // fromTestTableSchema := fromTestTableSchemas[rand.Intn(len(fromTestTableSchemas))]
// // toTestTableSchema := toTestTableSchemas[rand.Intn(len(toTestTableSchemas))]
// // testName := fmt.Sprintf("%s/%s", fromTestTableSchema.testName, toTestTableSchema.testName)
// // t.Run(testName, func(t *testing.T) {
// // validateDiff(t, fromTestTableSchema.tableSchema, toTestTableSchema.tableSchema, hints)
// // })
// // }
// for i := range rand.Perm(len(fromTestTableSchemas)) {
// fromTestTableSchema := fromTestTableSchemas[i]
// for j := range rand.Perm(len(toTestTableSchemas)) {
// toTestTableSchema := toTestTableSchemas[j]
// testName := fmt.Sprintf("%s:%s", fromTestTableSchema.testName, toTestTableSchema.testName)
// t.Run(testName, func(t *testing.T) {
// validateDiff(t, fromTestTableSchema.tableSchema, toTestTableSchema.tableSchema, hints)
// })
// }
// }
// }
func TestIgnoreAutoIncrementRegexp(t *testing.T) {
// validate the validation function we use in our tests...
tt := []struct {
statement string
expect string
}{
{
statement: "CREATE TABLE t(id int auto_increment primary key)",
expect: "CREATE TABLE t(id int auto_increment primary key)",
},
{
statement: "CREATE TABLE t(id int auto_increment primary key) auto_increment=3",
expect: "CREATE TABLE t(id int auto_increment primary key)",
},
{
statement: "CREATE TABLE t(id int auto_increment primary key) AUTO_INCREMENT=3 default charset=utf8",
expect: "CREATE TABLE t(id int auto_increment primary key) default charset=utf8",
},
{
statement: "CREATE TABLE t(id int auto_increment primary key) default charset=utf8 auto_increment=3",
expect: "CREATE TABLE t(id int auto_increment primary key) default charset=utf8",
},
{
statement: "CREATE TABLE t(id int auto_increment primary key) default charset=utf8 auto_increment=3 engine=innodb",
expect: "CREATE TABLE t(id int auto_increment primary key) default charset=utf8 engine=innodb",
},
}
for _, tc := range tt {
t.Run(tc.statement, func(t *testing.T) {
ignored := ignoreAutoIncrement(t, tc.statement)
assert.Equal(t, tc.expect, ignored)
})
}
}
func ignoreAutoIncrement(t *testing.T, createTable string) string {
result := autoIncrementRegexp.ReplaceAllString(createTable, "")
// sanity:
require.Contains(t, result, "CREATE TABLE")
require.Contains(t, result, ")")
return result
}
func validateDiff(t *testing.T, fromCreateTable string, toCreateTable string, hints *schemadiff.DiffHints) {
// turn the "from" and "to" create statement strings (which we just read via SHOW CREATE TABLE into sqlparser.CreateTable statement)
fromStmt, err := sqlparser.Parse(fromCreateTable)
require.NoError(t, err)
fromCreateTableStatement, ok := fromStmt.(*sqlparser.CreateTable)
require.True(t, ok)
toStmt, err := sqlparser.Parse(toCreateTable)
require.NoError(t, err)
toCreateTableStatement, ok := toStmt.(*sqlparser.CreateTable)
require.True(t, ok)
// The actual diff logic here!
diff, err := schemadiff.DiffTables(fromCreateTableStatement, toCreateTableStatement, hints)
assert.NoError(t, err)
// The diff can be empty or there can be an actual ALTER TABLE statement
diffedAlterQuery := ""
if diff != nil && !diff.IsEmpty() {
diffedAlterQuery = sqlparser.String(diff.Statement())
}
// Validate the diff! The way we do it is:
// Recreate the original table
// Alter the table directly using our evaluated diff (if empty we do nothing)
// Review the resulted table structure (via SHOW CREATE TABLE)
// Expect it to be identical to the structure generated by the suite earlier on (toCreateTable)
_ = mysqlExec(t, fmt.Sprintf("drop table if exists %s", tableName), "")
onlineddl.VtgateExecQuery(t, &vtParams, fromCreateTable, "")
if diffedAlterQuery != "" {
onlineddl.VtgateExecQuery(t, &vtParams, diffedAlterQuery, "")
}
resultCreateTable := getCreateTableStatement(t, tableName)
if hints.AutoIncrementStrategy == schemadiff.AutoIncrementIgnore {
toCreateTable = ignoreAutoIncrement(t, toCreateTable)
resultCreateTable = ignoreAutoIncrement(t, resultCreateTable)
}
// The actual validation test here:
assert.Equal(t, toCreateTable, resultCreateTable, "mismatched table structure. ALTER query was: %s", diffedAlterQuery)
// Also, let's see that our diff agrees there's no change:
resultStmt, err := sqlparser.Parse(resultCreateTable)
require.NoError(t, err)
resultCreateTableStatement, ok := resultStmt.(*sqlparser.CreateTable)
require.True(t, ok)
resultDiff, err := schemadiff.DiffTables(toCreateTableStatement, resultCreateTableStatement, hints)
assert.NoError(t, err)
assert.Nil(t, resultDiff)
}
func getTablet() *cluster.Vttablet {
return clusterInstance.Keyspaces[0].Shards[0].Vttablets[0]
}
func mysqlParams() *mysql.ConnParams {
if evaluatedMysqlParams != nil {
return evaluatedMysqlParams
}
evaluatedMysqlParams = &mysql.ConnParams{
Uname: "vt_dba",
UnixSocket: path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d", getTablet().TabletUID), "/mysql.sock"),
DbName: fmt.Sprintf("vt_%s", keyspaceName),
}
return evaluatedMysqlParams
}
// VtgateExecDDL executes a DDL query with given strategy
func mysqlExec(t *testing.T, sql string, expectError string) *sqltypes.Result {
t.Helper()
ctx := context.Background()
conn, err := mysql.Connect(ctx, mysqlParams())
require.Nil(t, err)
defer conn.Close()
qr, err := conn.ExecuteFetch(sql, 100000, true)
if expectError == "" {
require.NoError(t, err)
} else {
require.Error(t, err, "error should not be nil")
require.Contains(t, err.Error(), expectError, "Unexpected error")
}
return qr
}
// getCreateTableStatement returns the CREATE TABLE statement for a given table
func getCreateTableStatement(t *testing.T, tableName string) (statement string) {
queryResult, err := getTablet().VttabletProcess.QueryTablet(fmt.Sprintf("show create table %s", tableName), keyspaceName, true)
require.Nil(t, err)
assert.Equal(t, len(queryResult.Rows), 1)
assert.Equal(t, len(queryResult.Rows[0]), 2) // table name, create statement
statement = queryResult.Rows[0][1].ToString()
return statement
}
| [
"\"VTDATAROOT\""
]
| []
| [
"VTDATAROOT"
]
| [] | ["VTDATAROOT"] | go | 1 | 0 | |
tests/components/modbus/test_init.py | """The tests for the Modbus init.
This file is responsible for testing:
- pymodbus API
- Functionality of class ModbusHub
- Coverage 100%:
__init__.py
base_platform.py
const.py
modbus.py
"""
from datetime import timedelta
import logging
from unittest import mock
from pymodbus.exceptions import ModbusException
from pymodbus.pdu import ExceptionResponse, IllegalFunctionRequest
import pytest
import voluptuous as vol
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.modbus import number
from homeassistant.components.modbus.const import (
ATTR_ADDRESS,
ATTR_HUB,
ATTR_STATE,
ATTR_UNIT,
ATTR_VALUE,
CALL_TYPE_COIL,
CALL_TYPE_DISCRETE,
CALL_TYPE_REGISTER_HOLDING,
CALL_TYPE_REGISTER_INPUT,
CALL_TYPE_WRITE_COIL,
CALL_TYPE_WRITE_COILS,
CALL_TYPE_WRITE_REGISTER,
CALL_TYPE_WRITE_REGISTERS,
CONF_BAUDRATE,
CONF_BYTESIZE,
CONF_INPUT_TYPE,
CONF_PARITY,
CONF_STOPBITS,
DEFAULT_SCAN_INTERVAL,
MODBUS_DOMAIN as DOMAIN,
SERVICE_WRITE_COIL,
SERVICE_WRITE_REGISTER,
)
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import (
CONF_ADDRESS,
CONF_BINARY_SENSORS,
CONF_DELAY,
CONF_HOST,
CONF_METHOD,
CONF_NAME,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_SENSORS,
CONF_TIMEOUT,
CONF_TYPE,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from .conftest import TEST_MODBUS_NAME, ReadResult
from tests.common import async_fire_time_changed
TEST_SENSOR_NAME = "testSensor"
TEST_ENTITY_ID = f"{SENSOR_DOMAIN}.{TEST_SENSOR_NAME}"
TEST_HOST = "modbusTestHost"
async def test_number_validator():
"""Test number validator."""
for value, value_type in [
(15, int),
(15.1, float),
("15", int),
("15.1", float),
(-15, int),
(-15.1, float),
("-15", int),
("-15.1", float),
]:
assert isinstance(number(value), value_type)
try:
number("x15.1")
except (vol.Invalid):
return
pytest.fail("Number not throwing exception")
@pytest.mark.parametrize(
"do_config",
[
{
CONF_TYPE: "tcp",
CONF_HOST: TEST_HOST,
CONF_PORT: 5501,
},
{
CONF_TYPE: "tcp",
CONF_HOST: TEST_HOST,
CONF_PORT: 5501,
CONF_NAME: TEST_MODBUS_NAME,
CONF_TIMEOUT: 30,
CONF_DELAY: 10,
},
{
CONF_TYPE: "udp",
CONF_HOST: TEST_HOST,
CONF_PORT: 5501,
},
{
CONF_TYPE: "udp",
CONF_HOST: TEST_HOST,
CONF_PORT: 5501,
CONF_NAME: TEST_MODBUS_NAME,
CONF_TIMEOUT: 30,
CONF_DELAY: 10,
},
{
CONF_TYPE: "rtuovertcp",
CONF_HOST: TEST_HOST,
CONF_PORT: 5501,
},
{
CONF_TYPE: "rtuovertcp",
CONF_HOST: TEST_HOST,
CONF_PORT: 5501,
CONF_NAME: TEST_MODBUS_NAME,
CONF_TIMEOUT: 30,
CONF_DELAY: 10,
},
{
CONF_TYPE: "serial",
CONF_BAUDRATE: 9600,
CONF_BYTESIZE: 8,
CONF_METHOD: "rtu",
CONF_PORT: "usb01",
CONF_PARITY: "E",
CONF_STOPBITS: 1,
},
{
CONF_TYPE: "serial",
CONF_BAUDRATE: 9600,
CONF_BYTESIZE: 8,
CONF_METHOD: "rtu",
CONF_PORT: "usb01",
CONF_PARITY: "E",
CONF_STOPBITS: 1,
CONF_NAME: TEST_MODBUS_NAME,
CONF_TIMEOUT: 30,
CONF_DELAY: 10,
},
{
CONF_TYPE: "tcp",
CONF_HOST: TEST_HOST,
CONF_PORT: 5501,
CONF_DELAY: 5,
},
[
{
CONF_TYPE: "tcp",
CONF_HOST: TEST_HOST,
CONF_PORT: 5501,
CONF_NAME: TEST_MODBUS_NAME,
},
{
CONF_TYPE: "tcp",
CONF_HOST: TEST_HOST,
CONF_PORT: 5501,
CONF_NAME: TEST_MODBUS_NAME + "2",
},
{
CONF_TYPE: "serial",
CONF_BAUDRATE: 9600,
CONF_BYTESIZE: 8,
CONF_METHOD: "rtu",
CONF_PORT: "usb01",
CONF_PARITY: "E",
CONF_STOPBITS: 1,
CONF_NAME: TEST_MODBUS_NAME + "3",
},
],
],
)
async def test_config_modbus(hass, caplog, do_config, mock_pymodbus):
"""Run configuration test for modbus."""
config = {DOMAIN: do_config}
caplog.set_level(logging.ERROR)
assert await async_setup_component(hass, DOMAIN, config) is True
await hass.async_block_till_done()
assert DOMAIN in hass.config.components
assert len(caplog.records) == 0
VALUE = "value"
FUNC = "func"
DATA = "data"
SERVICE = "service"
@pytest.mark.parametrize(
"do_write",
[
{
DATA: ATTR_VALUE,
VALUE: 15,
SERVICE: SERVICE_WRITE_REGISTER,
FUNC: CALL_TYPE_WRITE_REGISTER,
},
{
DATA: ATTR_VALUE,
VALUE: [1, 2, 3],
SERVICE: SERVICE_WRITE_REGISTER,
FUNC: CALL_TYPE_WRITE_REGISTERS,
},
{
DATA: ATTR_STATE,
VALUE: False,
SERVICE: SERVICE_WRITE_COIL,
FUNC: CALL_TYPE_WRITE_COIL,
},
{
DATA: ATTR_STATE,
VALUE: [True, False, True],
SERVICE: SERVICE_WRITE_COIL,
FUNC: CALL_TYPE_WRITE_COILS,
},
],
)
async def test_pb_service_write(hass, do_write, caplog, mock_modbus):
"""Run test for service write_register."""
func_name = {
CALL_TYPE_WRITE_COIL: mock_modbus.write_coil,
CALL_TYPE_WRITE_COILS: mock_modbus.write_coils,
CALL_TYPE_WRITE_REGISTER: mock_modbus.write_register,
CALL_TYPE_WRITE_REGISTERS: mock_modbus.write_registers,
}
data = {
ATTR_HUB: TEST_MODBUS_NAME,
ATTR_UNIT: 17,
ATTR_ADDRESS: 16,
do_write[DATA]: do_write[VALUE],
}
await hass.services.async_call(DOMAIN, do_write[SERVICE], data, blocking=True)
assert func_name[do_write[FUNC]].called
assert func_name[do_write[FUNC]].call_args[0] == (
data[ATTR_ADDRESS],
data[do_write[DATA]],
)
mock_modbus.reset_mock()
for return_value in [
ExceptionResponse(0x06),
IllegalFunctionRequest(0x06),
ModbusException("fail write_"),
]:
caplog.set_level(logging.DEBUG)
func_name[do_write[FUNC]].return_value = return_value
await hass.services.async_call(DOMAIN, do_write[SERVICE], data, blocking=True)
assert func_name[do_write[FUNC]].called
assert caplog.messages[-1].startswith("Pymodbus:")
mock_modbus.reset_mock()
async def _read_helper(hass, do_group, do_type, do_return, do_exception, mock_pymodbus):
config = {
DOMAIN: [
{
CONF_TYPE: "tcp",
CONF_HOST: TEST_HOST,
CONF_PORT: 5501,
CONF_NAME: TEST_MODBUS_NAME,
do_group: [
{
CONF_INPUT_TYPE: do_type,
CONF_NAME: TEST_SENSOR_NAME,
CONF_ADDRESS: 51,
CONF_SCAN_INTERVAL: 1,
}
],
}
]
}
mock_pymodbus.read_coils.side_effect = do_exception
mock_pymodbus.read_discrete_inputs.side_effect = do_exception
mock_pymodbus.read_input_registers.side_effect = do_exception
mock_pymodbus.read_holding_registers.side_effect = do_exception
mock_pymodbus.read_coils.return_value = do_return
mock_pymodbus.read_discrete_inputs.return_value = do_return
mock_pymodbus.read_input_registers.return_value = do_return
mock_pymodbus.read_holding_registers.return_value = do_return
now = dt_util.utcnow()
with mock.patch("homeassistant.helpers.event.dt_util.utcnow", return_value=now):
assert await async_setup_component(hass, DOMAIN, config) is True
await hass.async_block_till_done()
now = now + timedelta(seconds=DEFAULT_SCAN_INTERVAL + 60)
with mock.patch("homeassistant.helpers.event.dt_util.utcnow", return_value=now):
async_fire_time_changed(hass, now)
await hass.async_block_till_done()
@pytest.mark.parametrize(
"do_return,do_exception,do_expect",
[
[ReadResult([7]), None, "7"],
[IllegalFunctionRequest(0x99), None, STATE_UNAVAILABLE],
[ExceptionResponse(0x99), None, STATE_UNAVAILABLE],
[ReadResult([7]), ModbusException("fail read_"), STATE_UNAVAILABLE],
],
)
@pytest.mark.parametrize(
"do_type",
[CALL_TYPE_REGISTER_HOLDING, CALL_TYPE_REGISTER_INPUT],
)
async def test_pb_read_value(
hass, caplog, do_type, do_return, do_exception, do_expect, mock_pymodbus
):
"""Run test for different read."""
# the purpose of this test is to test the special
# return values from pymodbus:
# ExceptionResponse, IllegalResponse
# and exceptions.
# We "hijiack" binary_sensor and sensor in order
# to make a proper blackbox test.
await _read_helper(
hass, CONF_SENSORS, do_type, do_return, do_exception, mock_pymodbus
)
# Check state
entity_id = f"{SENSOR_DOMAIN}.{TEST_SENSOR_NAME}"
assert hass.states.get(entity_id).state
@pytest.mark.parametrize(
"do_return,do_exception,do_expect",
[
[ReadResult([0x01]), None, STATE_ON],
[IllegalFunctionRequest(0x99), None, STATE_UNAVAILABLE],
[ExceptionResponse(0x99), None, STATE_UNAVAILABLE],
[ReadResult([7]), ModbusException("fail read_"), STATE_UNAVAILABLE],
],
)
@pytest.mark.parametrize("do_type", [CALL_TYPE_DISCRETE, CALL_TYPE_COIL])
async def test_pb_read_state(
hass, caplog, do_type, do_return, do_exception, do_expect, mock_pymodbus
):
"""Run test for different read."""
# the purpose of this test is to test the special
# return values from pymodbus:
# ExceptionResponse, IllegalResponse
# and exceptions.
# We "hijiack" binary_sensor and sensor in order
# to make a proper blackbox test.
await _read_helper(
hass, CONF_BINARY_SENSORS, do_type, do_return, do_exception, mock_pymodbus
)
# Check state
entity_id = f"{BINARY_SENSOR_DOMAIN}.{TEST_SENSOR_NAME}"
state = hass.states.get(entity_id).state
assert state == do_expect
async def test_pymodbus_constructor_fail(hass, caplog):
"""Run test for failing pymodbus constructor."""
config = {
DOMAIN: [
{
CONF_TYPE: "tcp",
CONF_HOST: TEST_HOST,
CONF_PORT: 5501,
}
]
}
with mock.patch(
"homeassistant.components.modbus.modbus.ModbusTcpClient"
) as mock_pb:
caplog.set_level(logging.ERROR)
mock_pb.side_effect = ModbusException("test no class")
assert await async_setup_component(hass, DOMAIN, config) is True
await hass.async_block_till_done()
assert len(caplog.records) == 1
assert caplog.records[0].levelname == "ERROR"
assert mock_pb.called
async def test_pymodbus_connect_fail(hass, caplog, mock_pymodbus):
"""Run test for failing pymodbus constructor."""
config = {
DOMAIN: [
{
CONF_TYPE: "tcp",
CONF_HOST: TEST_HOST,
CONF_PORT: 5501,
}
]
}
caplog.set_level(logging.ERROR)
mock_pymodbus.connect.side_effect = ModbusException("test connect fail")
mock_pymodbus.close.side_effect = ModbusException("test connect fail")
assert await async_setup_component(hass, DOMAIN, config) is True
await hass.async_block_till_done()
assert len(caplog.records) == 1
assert caplog.records[0].levelname == "ERROR"
async def test_delay(hass, mock_pymodbus):
"""Run test for startup delay."""
# the purpose of this test is to test startup delay
# We "hijiack" a binary_sensor to make a proper blackbox test.
test_delay = 15
test_scan_interval = 5
entity_id = f"{BINARY_SENSOR_DOMAIN}.{TEST_SENSOR_NAME}"
config = {
DOMAIN: [
{
CONF_TYPE: "tcp",
CONF_HOST: TEST_HOST,
CONF_PORT: 5501,
CONF_NAME: TEST_MODBUS_NAME,
CONF_DELAY: test_delay,
CONF_BINARY_SENSORS: [
{
CONF_INPUT_TYPE: CALL_TYPE_COIL,
CONF_NAME: f"{TEST_SENSOR_NAME}",
CONF_ADDRESS: 52,
CONF_SCAN_INTERVAL: test_scan_interval,
},
],
}
]
}
mock_pymodbus.read_coils.return_value = ReadResult([0x01])
now = dt_util.utcnow()
with mock.patch("homeassistant.helpers.event.dt_util.utcnow", return_value=now):
assert await async_setup_component(hass, DOMAIN, config) is True
await hass.async_block_till_done()
# pass first scan_interval
start_time = now
now = now + timedelta(seconds=(test_scan_interval + 1))
with mock.patch("homeassistant.helpers.event.dt_util.utcnow", return_value=now):
async_fire_time_changed(hass, now)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
stop_time = start_time + timedelta(seconds=(test_delay + 1))
step_timedelta = timedelta(seconds=1)
while now < stop_time:
now = now + step_timedelta
with mock.patch("homeassistant.helpers.event.dt_util.utcnow", return_value=now):
async_fire_time_changed(hass, now)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
now = now + step_timedelta + timedelta(seconds=2)
with mock.patch("homeassistant.helpers.event.dt_util.utcnow", return_value=now):
async_fire_time_changed(hass, now)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ON
| []
| []
| []
| [] | [] | python | null | null | null |
share/ci/appimage.py | import common as c
from config import *
import os
import sys
import subprocess as sub
import shutil
from glob import glob
if len(sys.argv) > 1 and sys.argv[1] == 'glibc_version': # subcommand
sub.run('ldd --version | head -n 1 | grep -Po "\\d\\.\\d\\d"', shell=True)
exit(0)
tag = os.environ.get('TAG', '')
artifact_name = '{}-{}{}.AppImage'.format(app_name, app_version, tag)
if len(sys.argv) > 1 and sys.argv[1] == 'artifact_name': # subcommand
c.print(artifact_name)
exit(0)
artifact_path = os.path.abspath(artifact_name)
c.print('>> Making appimage')
base_url = 'https://github.com/probonopd/linuxdeployqt/releases/download'
continuous_url = base_url + '/continuous/linuxdeployqt-continuous-x86_64.AppImage'
tagged_url = base_url + '/6/linuxdeployqt-6-x86_64.AppImage'
linuxdeployqt_url = tagged_url
linuxdeployqt_original = os.path.basename(linuxdeployqt_url)
c.download(linuxdeployqt_url, linuxdeployqt_original)
c.run('chmod a+x {}'.format(linuxdeployqt_original))
linuxdeployqt_bin = os.path.abspath('linuxdeployqt')
c.symlink(linuxdeployqt_original, linuxdeployqt_bin)
os.chdir(build_dir)
install_dir = os.path.abspath('appdir')
c.recreate_dir(install_dir)
c.run('make INSTALL_ROOT={0} DESTDIR={0} install'.format(install_dir))
if c.is_inside_docker():
c.run('{} --appimage-extract'.format(linuxdeployqt_bin))
linuxdeployqt_bin = os.path.abspath('squashfs-root/AppRun')
os.environ['LD_LIBRARY_PATH'] = dependencies_dir + '/lib'
os.environ['VERSION'] = app_version
# debug flags: -unsupported-bundle-everything -unsupported-allow-new-glibc
flags = '' if os.getenv("DEBUG") is None else '-unsupported-allow-new-glibc'
additional_files = glob(ssl_dir + '/lib/lib*.so.*') + \
glob('/usr/lib/x86_64-linux-gnu/nss/*') + \
glob(dependencies_dir + '/lib/libtesseract-*.so')
out_lib_dir = install_dir + '/usr/lib'
os.makedirs(out_lib_dir, exist_ok=True)
for f in additional_files:
c.print('>> Copying {} to {}'.format(f, out_lib_dir))
shutil.copy(f, out_lib_dir)
c.run('{} {}/usr/share/applications/*.desktop {} -appimage -qmake={}/bin/qmake'.format(
linuxdeployqt_bin, install_dir, flags, qt_dir))
c.run('mv {}-{}*.AppImage "{}"'.format(app_name, app_version, artifact_path))
bin_path = install_dir + '/usr/bin/' + bin_name
c.print('>> Md5 {} {}'.format(bin_path, c.md5sum(bin_path)))
| []
| []
| [
"VERSION",
"TAG",
"LD_LIBRARY_PATH",
"DEBUG"
]
| [] | ["VERSION", "TAG", "LD_LIBRARY_PATH", "DEBUG"] | python | 4 | 0 | |
examples/trials/mnist-keras/mnist-keras.py | # Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import argparse
import logging
import os
import keras
import numpy as np
from keras import backend as K
from keras.callbacks import TensorBoard
from keras.datasets import mnist
from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D
from keras.models import Sequential
import nni
LOG = logging.getLogger('mnist_keras')
K.set_image_data_format('channels_last')
TENSORBOARD_DIR = os.environ['NNI_OUTPUT_DIR']
H, W = 28, 28
NUM_CLASSES = 10
def create_mnist_model(hyper_params, input_shape=(H, W, 1), num_classes=NUM_CLASSES):
'''
Create simple convolutional model
'''
layers = [
Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dense(100, activation='relu'),
Dense(num_classes, activation='softmax')
]
model = Sequential(layers)
if hyper_params['optimizer'] == 'Adam':
optimizer = keras.optimizers.Adam(lr=hyper_params['learning_rate'])
else:
optimizer = keras.optimizers.SGD(lr=hyper_params['learning_rate'], momentum=0.9)
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=optimizer, metrics=['accuracy'])
return model
def load_mnist_data(args):
'''
Load MNIST dataset
'''
mnist_path = os.path.join(os.environ.get('NNI_OUTPUT_DIR'), 'mnist.npz')
(x_train, y_train), (x_test, y_test) = mnist.load_data(path=mnist_path)
os.remove(mnist_path)
x_train = (np.expand_dims(x_train, -1).astype(np.float) / 255.)[:args.num_train]
x_test = (np.expand_dims(x_test, -1).astype(np.float) / 255.)[:args.num_test]
y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train]
y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test]
LOG.debug('x_train shape: %s', (x_train.shape,))
LOG.debug('x_test shape: %s', (x_test.shape,))
return x_train, y_train, x_test, y_test
class SendMetrics(keras.callbacks.Callback):
'''
Keras callback to send metrics to NNI framework
'''
def on_epoch_end(self, epoch, logs={}):
'''
Run on end of each epoch
'''
LOG.debug(logs)
# TensorFlow 2.0 API reference claims the key is `val_acc`, but in fact it's `val_accuracy`
if 'val_acc' in logs:
nni.report_intermediate_result(logs['val_acc'])
else:
nni.report_intermediate_result(logs['val_accuracy'])
def train(args, params):
'''
Train model
'''
x_train, y_train, x_test, y_test = load_mnist_data(args)
model = create_mnist_model(params)
model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1,
validation_data=(x_test, y_test), callbacks=[SendMetrics(), TensorBoard(log_dir=TENSORBOARD_DIR)])
_, acc = model.evaluate(x_test, y_test, verbose=0)
LOG.debug('Final result is: %d', acc)
nni.report_final_result(acc)
def generate_default_params():
'''
Generate default hyper parameters
'''
return {
'optimizer': 'Adam',
'learning_rate': 0.001
}
if __name__ == '__main__':
PARSER = argparse.ArgumentParser()
PARSER.add_argument("--batch_size", type=int, default=200, help="batch size", required=False)
PARSER.add_argument("--epochs", type=int, default=10, help="Train epochs", required=False)
PARSER.add_argument("--num_train", type=int, default=60000, help="Number of train samples to be used, maximum 60000", required=False)
PARSER.add_argument("--num_test", type=int, default=10000, help="Number of test samples to be used, maximum 10000", required=False)
ARGS, UNKNOWN = PARSER.parse_known_args()
try:
# get parameters from tuner
RECEIVED_PARAMS = nni.get_next_parameter()
LOG.debug(RECEIVED_PARAMS)
PARAMS = generate_default_params()
PARAMS.update(RECEIVED_PARAMS)
# train
train(ARGS, PARAMS)
except Exception as e:
LOG.exception(e)
raise
| []
| []
| [
"NNI_OUTPUT_DIR"
]
| [] | ["NNI_OUTPUT_DIR"] | python | 1 | 0 | |
src/runtime/gc_test.go | // Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime_test
import (
"io"
"os"
"reflect"
"runtime"
"runtime/debug"
"testing"
"time"
"unsafe"
)
func TestGcSys(t *testing.T) {
if os.Getenv("GOGC") == "off" {
t.Skip("skipping test; GOGC=off in environment")
}
got := runTestProg(t, "testprog", "GCSys")
want := "OK\n"
if got != want {
t.Fatalf("expected %q, but got %q", want, got)
}
}
func TestGcDeepNesting(t *testing.T) {
type T [2][2][2][2][2][2][2][2][2][2]*int
a := new(T)
// Prevent the compiler from applying escape analysis.
// This makes sure new(T) is allocated on heap, not on the stack.
t.Logf("%p", a)
a[0][0][0][0][0][0][0][0][0][0] = new(int)
*a[0][0][0][0][0][0][0][0][0][0] = 13
runtime.GC()
if *a[0][0][0][0][0][0][0][0][0][0] != 13 {
t.Fail()
}
}
func TestGcHashmapIndirection(t *testing.T) {
defer debug.SetGCPercent(debug.SetGCPercent(1))
runtime.GC()
type T struct {
a [256]int
}
m := make(map[T]T)
for i := 0; i < 2000; i++ {
var a T
a.a[0] = i
m[a] = T{}
}
}
func TestGcArraySlice(t *testing.T) {
type X struct {
buf [1]byte
nextbuf []byte
next *X
}
var head *X
for i := 0; i < 10; i++ {
p := &X{}
p.buf[0] = 42
p.next = head
if head != nil {
p.nextbuf = head.buf[:]
}
head = p
runtime.GC()
}
for p := head; p != nil; p = p.next {
if p.buf[0] != 42 {
t.Fatal("corrupted heap")
}
}
}
func TestGcRescan(t *testing.T) {
type X struct {
c chan error
nextx *X
}
type Y struct {
X
nexty *Y
p *int
}
var head *Y
for i := 0; i < 10; i++ {
p := &Y{}
p.c = make(chan error)
if head != nil {
p.nextx = &head.X
}
p.nexty = head
p.p = new(int)
*p.p = 42
head = p
runtime.GC()
}
for p := head; p != nil; p = p.nexty {
if *p.p != 42 {
t.Fatal("corrupted heap")
}
}
}
func TestGcLastTime(t *testing.T) {
ms := new(runtime.MemStats)
t0 := time.Now().UnixNano()
runtime.GC()
t1 := time.Now().UnixNano()
runtime.ReadMemStats(ms)
last := int64(ms.LastGC)
if t0 > last || last > t1 {
t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1)
}
pause := ms.PauseNs[(ms.NumGC+255)%256]
// Due to timer granularity, pause can actually be 0 on windows
// or on virtualized environments.
if pause == 0 {
t.Logf("last GC pause was 0")
} else if pause > 10e9 {
t.Logf("bad last GC pause: got %v, want [0, 10e9]", pause)
}
}
var hugeSink interface{}
func TestHugeGCInfo(t *testing.T) {
// The test ensures that compiler can chew these huge types even on weakest machines.
// The types are not allocated at runtime.
if hugeSink != nil {
// 400MB on 32 bots, 4TB on 64-bits.
const n = (400 << 20) + (unsafe.Sizeof(uintptr(0))-4)<<40
hugeSink = new([n]*byte)
hugeSink = new([n]uintptr)
hugeSink = new(struct {
x float64
y [n]*byte
z []string
})
hugeSink = new(struct {
x float64
y [n]uintptr
z []string
})
}
}
func TestPeriodicGC(t *testing.T) {
// Make sure we're not in the middle of a GC.
runtime.GC()
var ms1, ms2 runtime.MemStats
runtime.ReadMemStats(&ms1)
// Make periodic GC run continuously.
orig := *runtime.ForceGCPeriod
*runtime.ForceGCPeriod = 0
// Let some periodic GCs happen. In a heavily loaded system,
// it's possible these will be delayed, so this is designed to
// succeed quickly if things are working, but to give it some
// slack if things are slow.
var numGCs uint32
const want = 2
for i := 0; i < 20 && numGCs < want; i++ {
time.Sleep(5 * time.Millisecond)
// Test that periodic GC actually happened.
runtime.ReadMemStats(&ms2)
numGCs = ms2.NumGC - ms1.NumGC
}
*runtime.ForceGCPeriod = orig
if numGCs < want {
t.Fatalf("no periodic GC: got %v GCs, want >= 2", numGCs)
}
}
func BenchmarkSetTypePtr(b *testing.B) {
benchSetType(b, new(*byte))
}
func BenchmarkSetTypePtr8(b *testing.B) {
benchSetType(b, new([8]*byte))
}
func BenchmarkSetTypePtr16(b *testing.B) {
benchSetType(b, new([16]*byte))
}
func BenchmarkSetTypePtr32(b *testing.B) {
benchSetType(b, new([32]*byte))
}
func BenchmarkSetTypePtr64(b *testing.B) {
benchSetType(b, new([64]*byte))
}
func BenchmarkSetTypePtr126(b *testing.B) {
benchSetType(b, new([126]*byte))
}
func BenchmarkSetTypePtr128(b *testing.B) {
benchSetType(b, new([128]*byte))
}
func BenchmarkSetTypePtrSlice(b *testing.B) {
benchSetType(b, make([]*byte, 1<<10))
}
type Node1 struct {
Value [1]uintptr
Left, Right *byte
}
func BenchmarkSetTypeNode1(b *testing.B) {
benchSetType(b, new(Node1))
}
func BenchmarkSetTypeNode1Slice(b *testing.B) {
benchSetType(b, make([]Node1, 32))
}
type Node8 struct {
Value [8]uintptr
Left, Right *byte
}
func BenchmarkSetTypeNode8(b *testing.B) {
benchSetType(b, new(Node8))
}
func BenchmarkSetTypeNode8Slice(b *testing.B) {
benchSetType(b, make([]Node8, 32))
}
type Node64 struct {
Value [64]uintptr
Left, Right *byte
}
func BenchmarkSetTypeNode64(b *testing.B) {
benchSetType(b, new(Node64))
}
func BenchmarkSetTypeNode64Slice(b *testing.B) {
benchSetType(b, make([]Node64, 32))
}
type Node64Dead struct {
Left, Right *byte
Value [64]uintptr
}
func BenchmarkSetTypeNode64Dead(b *testing.B) {
benchSetType(b, new(Node64Dead))
}
func BenchmarkSetTypeNode64DeadSlice(b *testing.B) {
benchSetType(b, make([]Node64Dead, 32))
}
type Node124 struct {
Value [124]uintptr
Left, Right *byte
}
func BenchmarkSetTypeNode124(b *testing.B) {
benchSetType(b, new(Node124))
}
func BenchmarkSetTypeNode124Slice(b *testing.B) {
benchSetType(b, make([]Node124, 32))
}
type Node126 struct {
Value [126]uintptr
Left, Right *byte
}
func BenchmarkSetTypeNode126(b *testing.B) {
benchSetType(b, new(Node126))
}
func BenchmarkSetTypeNode126Slice(b *testing.B) {
benchSetType(b, make([]Node126, 32))
}
type Node128 struct {
Value [128]uintptr
Left, Right *byte
}
func BenchmarkSetTypeNode128(b *testing.B) {
benchSetType(b, new(Node128))
}
func BenchmarkSetTypeNode128Slice(b *testing.B) {
benchSetType(b, make([]Node128, 32))
}
type Node130 struct {
Value [130]uintptr
Left, Right *byte
}
func BenchmarkSetTypeNode130(b *testing.B) {
benchSetType(b, new(Node130))
}
func BenchmarkSetTypeNode130Slice(b *testing.B) {
benchSetType(b, make([]Node130, 32))
}
type Node1024 struct {
Value [1024]uintptr
Left, Right *byte
}
func BenchmarkSetTypeNode1024(b *testing.B) {
benchSetType(b, new(Node1024))
}
func BenchmarkSetTypeNode1024Slice(b *testing.B) {
benchSetType(b, make([]Node1024, 32))
}
func benchSetType(b *testing.B, x interface{}) {
v := reflect.ValueOf(x)
t := v.Type()
switch t.Kind() {
case reflect.Ptr:
b.SetBytes(int64(t.Elem().Size()))
case reflect.Slice:
b.SetBytes(int64(t.Elem().Size()) * int64(v.Len()))
}
b.ResetTimer()
runtime.BenchSetType(b.N, x)
}
func BenchmarkAllocation(b *testing.B) {
type T struct {
x, y *byte
}
ngo := runtime.GOMAXPROCS(0)
work := make(chan bool, b.N+ngo)
result := make(chan *T)
for i := 0; i < b.N; i++ {
work <- true
}
for i := 0; i < ngo; i++ {
work <- false
}
for i := 0; i < ngo; i++ {
go func() {
var x *T
for <-work {
for i := 0; i < 1000; i++ {
x = &T{}
}
}
result <- x
}()
}
for i := 0; i < ngo; i++ {
<-result
}
}
func TestPrintGC(t *testing.T) {
if testing.Short() {
t.Skip("Skipping in short mode")
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
done := make(chan bool)
go func() {
for {
select {
case <-done:
return
default:
runtime.GC()
}
}
}()
for i := 0; i < 1e4; i++ {
func() {
defer print("")
}()
}
close(done)
}
// The implicit y, ok := x.(error) for the case error
// in testTypeSwitch used to not initialize the result y
// before passing &y to assertE2I2GC.
// Catch this by making assertE2I2 call runtime.GC,
// which will force a stack scan and failure if there are
// bad pointers, and then fill the stack with bad pointers
// and run the type switch.
func TestAssertE2I2Liveness(t *testing.T) {
// Note that this flag is defined in export_test.go
// and is not available to ordinary imports of runtime.
*runtime.TestingAssertE2I2GC = true
defer func() {
*runtime.TestingAssertE2I2GC = false
}()
poisonStack()
testTypeSwitch(io.EOF)
poisonStack()
testAssert(io.EOF)
poisonStack()
testAssertVar(io.EOF)
}
func poisonStack() uintptr {
var x [1000]uintptr
for i := range x {
x[i] = 0xff
}
return x[123]
}
func testTypeSwitch(x interface{}) error {
switch y := x.(type) {
case nil:
// ok
case error:
return y
}
return nil
}
func testAssert(x interface{}) error {
if y, ok := x.(error); ok {
return y
}
return nil
}
func testAssertVar(x interface{}) error {
var y, ok = x.(error)
if ok {
return y
}
return nil
}
func TestAssertE2T2Liveness(t *testing.T) {
*runtime.TestingAssertE2T2GC = true
defer func() {
*runtime.TestingAssertE2T2GC = false
}()
poisonStack()
testIfaceEqual(io.EOF)
}
var a bool
//go:noinline
func testIfaceEqual(x interface{}) {
if x == "abc" {
a = true
}
}
func TestPageAccounting(t *testing.T) {
// Grow the heap in small increments. This used to drop the
// pages-in-use count below zero because of a rounding
// mismatch (golang.org/issue/15022).
const blockSize = 64 << 10
blocks := make([]*[blockSize]byte, (64<<20)/blockSize)
for i := range blocks {
blocks[i] = new([blockSize]byte)
}
// Check that the running page count matches reality.
pagesInUse, counted := runtime.CountPagesInUse()
if pagesInUse != counted {
t.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse, counted)
}
}
| [
"\"GOGC\""
]
| []
| [
"GOGC"
]
| [] | ["GOGC"] | go | 1 | 0 | |
main.go | /*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"fmt"
"os"
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
// to ensure that exec-entrypoint and run can make use of them.
"go.uber.org/zap/zapcore"
"golang.org/x/mod/semver"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"github.com/RHsyseng/operator-utils/pkg/utils/openshift"
oauthzv1 "github.com/openshift/api/authorization/v1"
consolev1 "github.com/openshift/api/console/v1"
consolev1alpha1 "github.com/openshift/api/console/v1alpha1"
operatorv1 "github.com/openshift/api/operator/v1"
oauthzclientv1 "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1"
coreosv1 "github.com/operator-framework/api/pkg/operators/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"github.com/RHEcosystemAppEng/dbaas-operator/api/v1alpha1"
"github.com/RHEcosystemAppEng/dbaas-operator/controllers"
operatorframework "github.com/operator-framework/api/pkg/operators/v1alpha1"
//+kubebuilder:scaffold:imports
)
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
)
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(v1alpha1.AddToScheme(scheme))
utilruntime.Must(operatorframework.AddToScheme(scheme))
utilruntime.Must(coreosv1.AddToScheme(scheme))
utilruntime.Must(consolev1alpha1.Install(scheme))
utilruntime.Must(operatorv1.Install(scheme))
utilruntime.Must(oauthzv1.Install(scheme))
utilruntime.Must(rbacv1.AddToScheme(scheme))
utilruntime.Must(consolev1.AddToScheme(scheme))
//+kubebuilder:scaffold:scheme
}
func main() {
var metricsAddr string
var enableLeaderElection bool
var probeAddr string
var logLevel string
flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.")
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
flag.StringVar(&logLevel, "log-level", "info", "Log level.")
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
var level zapcore.Level
if err := level.UnmarshalText([]byte(logLevel)); err != nil {
//default to info level
level = zapcore.InfoLevel
}
opts := zap.Options{
Development: true,
Level: level,
}
opts.BindFlags(flag.CommandLine)
flag.Parse()
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
cfg := ctrl.GetConfigOrDie()
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
Port: 9443,
HealthProbeBindAddress: probeAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "e4addb06.redhat.com",
ClientDisableCacheFor: []client.Object{
&operatorframework.ClusterServiceVersion{},
&corev1.Secret{},
},
})
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
DBaaSReconciler := &controllers.DBaaSReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}
if DBaaSReconciler.InstallNamespace, err = controllers.GetInstallNamespace(); err != nil {
setupLog.Error(err, "unable to retrieve install namespace. default Tenant object cannot be installed")
}
authzReconciler := &controllers.DBaaSAuthzReconciler{
DBaaSReconciler: DBaaSReconciler,
AuthorizationV1Client: oauthzclientv1.NewForConfigOrDie(cfg),
}
if err = (&controllers.DBaaSTenantAuthzReconciler{
DBaaSAuthzReconciler: authzReconciler,
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "DBaaSTenantAuthz")
os.Exit(1)
}
connectionCtrl, err := (&controllers.DBaaSConnectionReconciler{
DBaaSReconciler: DBaaSReconciler,
}).SetupWithManager(mgr)
if err != nil {
setupLog.Error(err, "unable to create controller", "controller", "DBaaSConnection")
os.Exit(1)
}
inventoryCtrl, err := (&controllers.DBaaSInventoryReconciler{
DBaaSReconciler: DBaaSReconciler,
}).SetupWithManager(mgr)
if err != nil {
setupLog.Error(err, "unable to create controller", "controller", "DBaaSInventory")
os.Exit(1)
}
instanceCtrl, err := (&controllers.DBaaSInstanceReconciler{
DBaaSReconciler: DBaaSReconciler,
}).SetupWithManager(mgr)
if err != nil {
setupLog.Error(err, "unable to create controller", "controller", "DBaaSInstance")
os.Exit(1)
}
if err = (&controllers.DBaaSDefaultTenantReconciler{
DBaaSReconciler: DBaaSReconciler,
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "DBaaSDefaultTenant")
os.Exit(1)
}
if err = (&controllers.DBaaSProviderReconciler{
DBaaSReconciler: DBaaSReconciler,
ConnectionCtrl: connectionCtrl,
InventoryCtrl: inventoryCtrl,
InstanceCtrl: instanceCtrl,
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "DBaaSProvider")
os.Exit(1)
}
//We'll just make sure to set `ENABLE_WEBHOOKS=false` when we run locally.
if os.Getenv("ENABLE_WEBHOOKS") != "false" {
if err = (&v1alpha1.DBaaSConnection{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "DBaaSConnection")
os.Exit(1)
}
if err = (&v1alpha1.DBaaSInventory{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "DBaaSInventory")
os.Exit(1)
}
if err = (&v1alpha1.DBaaSTenant{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "DBaaSTenant")
os.Exit(1)
}
}
if err = (&controllers.DBaaSTenantReconciler{
DBaaSAuthzReconciler: authzReconciler,
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "DBaaSTenant")
os.Exit(1)
}
var ocpVersion string
info, err := openshift.GetPlatformInfo(mgr.GetConfig())
if err != nil {
setupLog.Error(err, "unable to get platform info")
}
if info.IsOpenShift() {
mappedVersion := openshift.MapKnownVersion(info)
if mappedVersion.Version != "" {
ocpVersion = semver.MajorMinor("v" + mappedVersion.Version)
setupLog.Info(fmt.Sprintf("OpenShift Version: %s", ocpVersion))
} else {
setupLog.Info("OpenShift version could not be determined.")
}
}
if err = (&controllers.DBaaSPlatformReconciler{
DBaaSReconciler: DBaaSReconciler,
Log: ctrl.Log.WithName("controllers").WithName("DBaaSPlatform"),
OcpVersion: ocpVersion,
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "DBaaSPlatform")
os.Exit(1)
}
//+kubebuilder:scaffold:builder
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up health check")
os.Exit(1)
}
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up ready check")
os.Exit(1)
}
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}
| [
"\"ENABLE_WEBHOOKS\""
]
| []
| [
"ENABLE_WEBHOOKS"
]
| [] | ["ENABLE_WEBHOOKS"] | go | 1 | 0 | |
apps/languages/python3/map/main.py | import os
import multiprocessing as mp
def worker_func(data):
process_id = mp.current_process()
print(f'worker_func is running on process {process_id}, data={data}')
def main():
# How many cores were assigned to the slurm job allocation?
num_cores = int(os.getenv('SLURM_CPUS_PER_TASK'))
print(f'Found {num_cores} cores')
# Create a worker pool
pool = mp.Pool(num_cores)
# Generate a list of work data
worker_data = range(10)
# Map worker_data to worker_func
pool.map(worker_func, worker_data)
if __name__ == "__main__":
main()
| []
| []
| [
"SLURM_CPUS_PER_TASK"
]
| [] | ["SLURM_CPUS_PER_TASK"] | python | 1 | 0 | |
trainval_model.py | from __future__ import division
import sys
import os
import argparse
import tensorflow as tf
import skimage
from skimage import io as sio
import time
# import matplotlib.pyplot as plt
from get_model import get_segmentation_model
from pydensecrf import densecrf
from util import data_reader
from util.processing_tools import *
from util import im_processing, eval_tools, MovingAverage
def train(max_iter, snapshot, dataset, setname, mu, lr, bs, tfmodel_folder,
conv5, model_name, stop_iter, pre_emb=False):
iters_per_log = 100
data_folder = './' + dataset + '/' + setname + '_batch/'
data_prefix = dataset + '_' + setname
snapshot_file = os.path.join(tfmodel_folder, dataset + '_iter_%d.tfmodel')
if not os.path.isdir(tfmodel_folder):
os.makedirs(tfmodel_folder)
cls_loss_avg = 0
avg_accuracy_all, avg_accuracy_pos, avg_accuracy_neg = 0, 0, 0
decay = 0.99
vocab_size = 8803 if dataset == 'referit' else 12112
emb_name = 'referit' if dataset == 'referit' else 'Gref'
if pre_emb:
print("Use pretrained Embeddings.")
model = get_segmentation_model(model_name, mode='train',
vocab_size=vocab_size, start_lr=lr,
batch_size=bs, conv5=conv5, emb_name=emb_name)
else:
model = get_segmentation_model(model_name, mode='train',
vocab_size=vocab_size, start_lr=lr,
batch_size=bs, conv5=conv5)
weights = './data/weights/deeplab_resnet_init.ckpt'
print("Loading pretrained weights from {}".format(weights))
load_var = {var.op.name: var for var in tf.global_variables()
if var.name.startswith('res') or var.name.startswith('bn') or var.name.startswith('conv1')}
snapshot_loader = tf.train.Saver(load_var)
snapshot_saver = tf.train.Saver(max_to_keep=4)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
snapshot_loader.restore(sess, weights)
im_h, im_w, num_steps = model.H, model.W, model.num_steps
text_batch = np.zeros((bs, num_steps), dtype=np.float32)
image_batch = np.zeros((bs, im_h, im_w, 3), dtype=np.float32)
mask_batch = np.zeros((bs, im_h, im_w, 1), dtype=np.float32)
valid_idx_batch = np.zeros((bs, 1), dtype=np.int32)
reader = data_reader.DataReader(data_folder, data_prefix)
# for time calculate
last_time = time.time()
time_avg = MovingAverage()
for n_iter in range(max_iter):
for n_batch in range(bs):
batch = reader.read_batch(is_log=(n_batch == 0 and n_iter % iters_per_log == 0))
text = batch['text_batch']
im = batch['im_batch'].astype(np.float32)
mask = np.expand_dims(batch['mask_batch'].astype(np.float32), axis=2)
im = im[:, :, ::-1]
im -= mu
text_batch[n_batch, ...] = text
image_batch[n_batch, ...] = im
mask_batch[n_batch, ...] = mask
for idx in range(text.shape[0]):
if text[idx] != 0:
valid_idx_batch[n_batch, :] = idx
break
_, cls_loss_val, lr_val, scores_val, label_val = sess.run([model.train_step,
model.cls_loss,
model.learning_rate,
model.pred,
model.target],
feed_dict={
model.words: text_batch,
# np.expand_dims(text, axis=0),
model.im: image_batch,
# np.expand_dims(im, axis=0),
model.target_fine: mask_batch,
# np.expand_dims(mask, axis=0)
model.valid_idx: valid_idx_batch
})
cls_loss_avg = decay * cls_loss_avg + (1 - decay) * cls_loss_val
# Accuracy
accuracy_all, accuracy_pos, accuracy_neg = compute_accuracy(scores_val, label_val)
avg_accuracy_all = decay * avg_accuracy_all + (1 - decay) * accuracy_all
avg_accuracy_pos = decay * avg_accuracy_pos + (1 - decay) * accuracy_pos
avg_accuracy_neg = decay * avg_accuracy_neg + (1 - decay) * accuracy_neg
# timing
cur_time = time.time()
elapsed = cur_time - last_time
last_time = cur_time
if n_iter % iters_per_log == 0:
print('iter = %d, loss (cur) = %f, loss (avg) = %f, lr = %f'
% (n_iter, cls_loss_val, cls_loss_avg, lr_val))
print('iter = %d, accuracy (cur) = %f (all), %f (pos), %f (neg)'
% (n_iter, accuracy_all, accuracy_pos, accuracy_neg))
print('iter = %d, accuracy (avg) = %f (all), %f (pos), %f (neg)'
% (n_iter, avg_accuracy_all, avg_accuracy_pos, avg_accuracy_neg))
time_avg.add(elapsed)
print('iter = %d, cur time = %.5f, avg time = %.5f, model_name: %s' % (n_iter, elapsed, time_avg.get_avg(), model_name))
# Save snapshot
if (n_iter + 1) % snapshot == 0 or (n_iter + 1) >= max_iter:
snapshot_saver.save(sess, snapshot_file % (n_iter + 1))
print('snapshot saved to ' + snapshot_file % (n_iter + 1))
if (n_iter + 1) >= stop_iter:
print('stop training at iter ' + str(stop_iter))
break
print('Optimization done.')
def test(iter, dataset, visualize, setname, dcrf, mu, tfmodel_folder, model_name, pre_emb=False):
data_folder = './' + dataset + '/' + setname + '_batch/'
data_prefix = dataset + '_' + setname
if visualize:
save_dir = './' + dataset + '/visualization/' + str(iter) + '/'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
weights = os.path.join(tfmodel_folder, dataset + '_iter_' + str(iter) + '.tfmodel')
print("Loading trained weights from {}".format(weights))
score_thresh = 1e-9
eval_seg_iou_list = [.5, .6, .7, .8, .9]
cum_I, cum_U = 0, 0
mean_IoU, mean_dcrf_IoU = 0, 0
seg_correct = np.zeros(len(eval_seg_iou_list), dtype=np.int32)
if dcrf:
cum_I_dcrf, cum_U_dcrf = 0, 0
seg_correct_dcrf = np.zeros(len(eval_seg_iou_list), dtype=np.int32)
seg_total = 0.
H, W = 320, 320
vocab_size = 8803 if dataset == 'referit' else 12112
emb_name = 'referit' if dataset == 'referit' else 'Gref'
IU_result = list()
if pre_emb:
# use pretrained embbeding
print("Use pretrained Embeddings.")
model = get_segmentation_model(model_name, H=H, W=W,
mode='eval', vocab_size=vocab_size, emb_name=emb_name)
else:
model = get_segmentation_model(model_name, H=H, W=W,
mode='eval', vocab_size=vocab_size)
# Load pretrained model
snapshot_restorer = tf.train.Saver()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
snapshot_restorer.restore(sess, weights)
reader = data_reader.DataReader(data_folder, data_prefix, shuffle=False)
NN = reader.num_batch
for n_iter in range(reader.num_batch):
if n_iter % (NN // 50) == 0:
if n_iter / (NN // 50) % 5 == 0:
sys.stdout.write(str(n_iter / (NN // 50) // 5))
else:
sys.stdout.write('.')
sys.stdout.flush()
batch = reader.read_batch(is_log=False)
text = batch['text_batch']
im = batch['im_batch']
mask = batch['mask_batch'].astype(np.float32)
valid_idx = np.zeros([1], dtype=np.int32)
for idx in range(text.shape[0]):
if text[idx] != 0:
valid_idx[0] = idx
break
proc_im = skimage.img_as_ubyte(im_processing.resize_and_pad(im, H, W))
proc_im_ = proc_im.astype(np.float32)
proc_im_ = proc_im_[:, :, ::-1]
proc_im_ -= mu
scores_val, up_val, sigm_val = sess.run([model.pred, model.up, model.sigm],
feed_dict={
model.words: np.expand_dims(text, axis=0),
model.im: np.expand_dims(proc_im_, axis=0),
model.valid_idx: np.expand_dims(valid_idx, axis=0)
})
# scores_val = np.squeeze(scores_val)
# pred_raw = (scores_val >= score_thresh).astype(np.float32)
up_val = np.squeeze(up_val)
pred_raw = (up_val >= score_thresh).astype(np.float32)
predicts = im_processing.resize_and_crop(pred_raw, mask.shape[0], mask.shape[1])
if dcrf:
# Dense CRF post-processing
sigm_val = np.squeeze(sigm_val)
d = densecrf.DenseCRF2D(W, H, 2)
U = np.expand_dims(-np.log(sigm_val), axis=0)
U_ = np.expand_dims(-np.log(1 - sigm_val), axis=0)
unary = np.concatenate((U_, U), axis=0)
unary = unary.reshape((2, -1))
d.setUnaryEnergy(unary)
d.addPairwiseGaussian(sxy=3, compat=3)
d.addPairwiseBilateral(sxy=20, srgb=3, rgbim=proc_im, compat=10)
Q = d.inference(5)
pred_raw_dcrf = np.argmax(Q, axis=0).reshape((H, W)).astype(np.float32)
predicts_dcrf = im_processing.resize_and_crop(pred_raw_dcrf, mask.shape[0], mask.shape[1])
if visualize:
sent = batch['sent_batch'][0]
visualize_seg(im, mask, predicts, sent)
if dcrf:
visualize_seg(im, mask, predicts_dcrf, sent)
I, U = eval_tools.compute_mask_IU(predicts, mask)
IU_result.append({'batch_no': n_iter, 'I': I, 'U': U})
mean_IoU += float(I) / U
cum_I += I
cum_U += U
msg = 'cumulative IoU = %f' % (cum_I / cum_U)
for n_eval_iou in range(len(eval_seg_iou_list)):
eval_seg_iou = eval_seg_iou_list[n_eval_iou]
seg_correct[n_eval_iou] += (I / U >= eval_seg_iou)
if dcrf:
I_dcrf, U_dcrf = eval_tools.compute_mask_IU(predicts_dcrf, mask)
mean_dcrf_IoU += float(I_dcrf) / U_dcrf
cum_I_dcrf += I_dcrf
cum_U_dcrf += U_dcrf
msg += '\tcumulative IoU (dcrf) = %f' % (cum_I_dcrf / cum_U_dcrf)
for n_eval_iou in range(len(eval_seg_iou_list)):
eval_seg_iou = eval_seg_iou_list[n_eval_iou]
seg_correct_dcrf[n_eval_iou] += (I_dcrf / U_dcrf >= eval_seg_iou)
# print(msg)
seg_total += 1
# Print results
print('Segmentation evaluation (without DenseCRF):')
result_str = ''
for n_eval_iou in range(len(eval_seg_iou_list)):
result_str += 'precision@%s = %f\n' % \
(str(eval_seg_iou_list[n_eval_iou]), seg_correct[n_eval_iou] / seg_total)
result_str += 'overall IoU = %f; mean IoU = %f\n' % (cum_I / cum_U, mean_IoU / seg_total)
print(result_str)
if dcrf:
print('Segmentation evaluation (with DenseCRF):')
result_str = ''
for n_eval_iou in range(len(eval_seg_iou_list)):
result_str += 'precision@%s = %f\n' % \
(str(eval_seg_iou_list[n_eval_iou]), seg_correct_dcrf[n_eval_iou] / seg_total)
result_str += 'overall IoU = %f; mean IoU = %f\n' % (cum_I_dcrf / cum_U_dcrf, mean_dcrf_IoU / seg_total)
print(result_str)
def visualize_seg(im, mask, predicts, sent):
# print("visualizing")
vis_dir = "./visualize/lgcr_best_c5map/unc/testA"
sent_dir = os.path.join(vis_dir, sent)
if not os.path.exists(sent_dir):
os.makedirs(sent_dir)
# Ignore sio warnings of low-contrast image.
import warnings
warnings.filterwarnings('ignore')
sio.imsave(os.path.join(sent_dir, "im.png"), im)
im_gt = np.zeros_like(im)
im_gt[:, :, 2] = 170
im_gt[:, :, 0] += mask.astype('uint8') * 170
im_gt = im_gt.astype('int16')
im_gt[:, :, 2] += mask.astype('int16') * (-170)
im_gt = im_gt.astype('uint8')
sio.imsave(os.path.join(sent_dir, "gt.png"), im_gt)
im_seg = im / 2
im_seg[:, :, 0] += predicts.astype('uint8') * 100
im_seg = im_seg.astype('uint8')
sio.imsave(os.path.join(sent_dir, "pred.png"), im_seg)
# plt.imshow(im_seg.astype('uint8'))
# plt.title(sent)
# plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-g', type=str, default='0')
parser.add_argument('-i', type=int, default=800000)
parser.add_argument('-s', type=int, default=100000)
parser.add_argument('-st', type=int, default=700000) # stop training when get st iters
parser.add_argument('-m', type=str) # 'train' 'test'
parser.add_argument('-d', type=str, default='referit') # 'Gref' 'unc' 'unc+' 'referit'
parser.add_argument('-t', type=str) # 'train' 'trainval' 'val' 'test' 'testA' 'testB'
parser.add_argument('-f', type=str) # directory to save models
parser.add_argument('-lr', type=float, default=0.00025) # start learning rate
parser.add_argument('-bs', type=int, default=1) # batch size
parser.add_argument('-v', default=False, action='store_true') # visualization
parser.add_argument('-c', default=False, action='store_true') # whether or not apply DenseCRF
parser.add_argument('-emb', default=False, action='store_true') # whether or not use Pretrained Embeddings
parser.add_argument('-n', type=str, default='') # select model
parser.add_argument('-conv5', default=False, action='store_true') # finetune conv layers
args = parser.parse_args()
# os.environ['CUDA_VISIBLE_DEVICES'] = args.g
mu = np.array((104.00698793, 116.66876762, 122.67891434))
if args.m == 'train':
train(max_iter=args.i,
snapshot=args.s,
dataset=args.d,
setname=args.t,
mu=mu,
lr=args.lr,
bs=args.bs,
tfmodel_folder=args.f,
conv5=args.conv5,
model_name=args.n,
stop_iter=args.st,
pre_emb=args.emb)
elif args.m == 'test':
test(iter=args.i,
dataset=args.d,
visualize=args.v,
setname=args.t,
dcrf=args.c,
mu=mu,
tfmodel_folder=args.f,
model_name=args.n,
pre_emb=args.emb)
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
tools/train_net.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
r"""
Basic training script for PyTorch
"""
# Set up custom environment before nearly anything else is imported
# NOTE: this should be the first import (no not reorder)
from maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip
import argparse
import os
import torch
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.data import make_data_loader
from maskrcnn_benchmark.solver import make_lr_scheduler
from maskrcnn_benchmark.solver import make_optimizer
from maskrcnn_benchmark.engine.inference import inference
from maskrcnn_benchmark.engine.trainer import do_train
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize, get_rank
from maskrcnn_benchmark.utils.imports import import_file
from maskrcnn_benchmark.utils.logger import setup_logger
from maskrcnn_benchmark.utils.miscellaneous import mkdir
torch.manual_seed(42)
def train(cfg, local_rank, distributed):
model = build_detection_model(cfg)
device = torch.device(cfg.MODEL.DEVICE)
model.to(device)
optimizer = make_optimizer(cfg, model)
scheduler = make_lr_scheduler(cfg, optimizer)
if distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[local_rank], output_device=local_rank,
# this should be removed if we update BatchNorm stats
broadcast_buffers=False,
)
arguments = {}
arguments["iteration"] = 0
output_dir = cfg.OUTPUT_DIR
save_to_disk = get_rank() == 0
checkpointer = DetectronCheckpointer(
cfg, model, optimizer, scheduler, output_dir, save_to_disk
)
print(cfg.MODEL.WEIGHT)
extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT)
arguments.update(extra_checkpoint_data)
data_loader = make_data_loader(
cfg,
is_train=True,
is_distributed=distributed,
start_iter=arguments["iteration"],
)
checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
do_train(
model,
data_loader,
optimizer,
scheduler,
checkpointer,
device,
checkpoint_period,
arguments,
)
return model
def run_test(cfg, model, distributed):
if distributed:
model = model.module
torch.cuda.empty_cache() # TODO check if it helps
iou_types = ("bbox",)
if cfg.MODEL.MASK_ON:
iou_types = iou_types + ("segm",)
if cfg.MODEL.KEYPOINT_ON:
iou_types = iou_types + ("keypoints",)
output_folders = [None] * len(cfg.DATASETS.TEST)
dataset_names = cfg.DATASETS.TEST
if cfg.OUTPUT_DIR:
for idx, dataset_name in enumerate(dataset_names):
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
mkdir(output_folder)
output_folders[idx] = output_folder
data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)
for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val):
inference(
model,
data_loader_val,
dataset_name=dataset_name,
iou_types=iou_types,
box_only=False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,
device=cfg.MODEL.DEVICE,
expected_results=cfg.TEST.EXPECTED_RESULTS,
expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
output_folder=output_folder,
)
synchronize()
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Training")
parser.add_argument(
"--config-file",
default="",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument(
"--skip-test",
dest="skip_test",
help="Do not test the final model",
action="store_true",
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
args.distributed = num_gpus > 1
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(
backend="nccl", init_method="env://"
)
synchronize()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if output_dir:
mkdir(output_dir)
logger = setup_logger("maskrcnn_benchmark", output_dir, get_rank())
logger.info("Using {} GPUs".format(num_gpus))
logger.info(args)
logger.info("Collecting env info (might take some time)")
logger.info("\n" + collect_env_info())
logger.info("Loaded configuration file {}".format(args.config_file))
with open(args.config_file, "r") as cf:
config_str = "\n" + cf.read()
logger.info(config_str)
logger.info("Running with config:\n{}".format(cfg))
model = train(cfg, args.local_rank, args.distributed)
if not args.skip_test:
run_test(cfg, model, args.distributed)
if __name__ == "__main__":
main()
| []
| []
| [
"WORLD_SIZE"
]
| [] | ["WORLD_SIZE"] | python | 1 | 0 | |
cats_suite_test.go | package cats_test
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"testing"
"time"
. "github.com/cloudfoundry/cf-acceptance-tests/cats_suite_helpers"
"github.com/cloudfoundry/cf-acceptance-tests/helpers/assets"
"github.com/mholt/archiver"
_ "github.com/cloudfoundry/cf-acceptance-tests/apps"
_ "github.com/cloudfoundry/cf-acceptance-tests/backend_compatibility"
_ "github.com/cloudfoundry/cf-acceptance-tests/capi_experimental"
_ "github.com/cloudfoundry/cf-acceptance-tests/credhub"
_ "github.com/cloudfoundry/cf-acceptance-tests/detect"
_ "github.com/cloudfoundry/cf-acceptance-tests/docker"
_ "github.com/cloudfoundry/cf-acceptance-tests/internet_dependent"
_ "github.com/cloudfoundry/cf-acceptance-tests/internetless"
_ "github.com/cloudfoundry/cf-acceptance-tests/isolation_segments"
_ "github.com/cloudfoundry/cf-acceptance-tests/logging_isolation_segments"
_ "github.com/cloudfoundry/cf-acceptance-tests/route_services"
_ "github.com/cloudfoundry/cf-acceptance-tests/routing"
_ "github.com/cloudfoundry/cf-acceptance-tests/routing_isolation_segments"
_ "github.com/cloudfoundry/cf-acceptance-tests/security_groups"
_ "github.com/cloudfoundry/cf-acceptance-tests/service_discovery"
_ "github.com/cloudfoundry/cf-acceptance-tests/services"
_ "github.com/cloudfoundry/cf-acceptance-tests/ssh"
_ "github.com/cloudfoundry/cf-acceptance-tests/tasks"
_ "github.com/cloudfoundry/cf-acceptance-tests/tcp_routing"
_ "github.com/cloudfoundry/cf-acceptance-tests/v3"
_ "github.com/cloudfoundry/cf-acceptance-tests/volume_services"
_ "github.com/cloudfoundry/cf-acceptance-tests/windows"
"github.com/cloudfoundry-incubator/cf-test-helpers/cf"
"github.com/cloudfoundry-incubator/cf-test-helpers/helpers"
"github.com/cloudfoundry-incubator/cf-test-helpers/workflowhelpers"
. "github.com/cloudfoundry/cf-acceptance-tests/helpers/cli_version_check"
"github.com/cloudfoundry/cf-acceptance-tests/helpers/config"
"github.com/cloudfoundry/custom-cats-reporters/honeycomb"
"github.com/cloudfoundry/custom-cats-reporters/honeycomb/client"
"github.com/honeycombio/libhoney-go"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gexec"
)
const minCliVersion = "6.33.1"
func TestCATS(t *testing.T) {
RegisterFailHandler(Fail)
var validationError error
Config, validationError = config.NewCatsConfig(os.Getenv("CONFIG"))
if validationError != nil {
defer GinkgoRecover()
fmt.Println("Invalid configuration. ")
fmt.Println(validationError)
fmt.Println("Please fix the contents of $CONFIG:\n " + os.Getenv("CONFIG") + "\nbefore proceeding.")
t.Fail()
}
var _ = SynchronizedBeforeSuite(func() []byte {
installedVersion, err := GetInstalledCliVersionString()
Expect(err).ToNot(HaveOccurred(), "Error trying to determine CF CLI version")
fmt.Println("Running CATs with CF CLI version ", installedVersion)
Expect(ParseRawCliVersionString(installedVersion).AtLeast(ParseRawCliVersionString(minCliVersion))).To(BeTrue(), "CLI version "+minCliVersion+" is required")
if Config.GetIncludeSsh() {
ScpPath, err = exec.LookPath("scp")
Expect(err).NotTo(HaveOccurred())
SftpPath, err = exec.LookPath("sftp")
Expect(err).NotTo(HaveOccurred())
}
buildCmd := exec.Command("go", "build", "-o", "bin/catnip")
buildCmd.Dir = "assets/catnip"
buildCmd.Env = append(os.Environ(),
"GOOS=linux",
"GOARCH=amd64",
)
buildCmd.Stdout = GinkgoWriter
buildCmd.Stderr = GinkgoWriter
err = buildCmd.Run()
Expect(err).NotTo(HaveOccurred())
doraFiles, err := ioutil.ReadDir(assets.NewAssets().Dora)
Expect(err).NotTo(HaveOccurred())
var doraFileNames []string
for _, doraFile := range doraFiles {
doraFileNames = append(doraFileNames, assets.NewAssets().Dora+"/"+doraFile.Name())
}
err = archiver.Zip.Make(assets.NewAssets().DoraZip, doraFileNames)
Expect(err).NotTo(HaveOccurred())
return []byte{}
}, func([]byte) {
SetDefaultEventuallyTimeout(Config.DefaultTimeoutDuration())
SetDefaultEventuallyPollingInterval(1 * time.Second)
TestSetup = workflowhelpers.NewTestSuiteSetup(Config)
workflowhelpers.AsUser(TestSetup.AdminUserContext(), Config.GetScaledTimeout(1*time.Minute), func() {
buildpacksSession := cf.Cf("buildpacks").Wait()
Expect(buildpacksSession).To(Exit(0))
buildpacks := string(buildpacksSession.Out.Contents())
Expect(buildpacks).To(ContainSubstring(Config.GetBinaryBuildpackName()), "Missing the binary buildpack specified in the integration_config.json. There may be other missing buildpacks as well; please double-check your configuration against the buildpacks listed below.")
Expect(buildpacks).To(ContainSubstring(Config.GetGoBuildpackName()), "Missing the go buildpack specified in the integration_config.json. There may be other missing buildpacks as well; please double-check your configuration against the buildpacks listed below.")
Expect(buildpacks).To(ContainSubstring(Config.GetJavaBuildpackName()), "Missing the java buildpack specified in the integration_config.json. There may be other missing buildpacks as well; please double-check your configuration against the buildpacks listed below.")
Expect(buildpacks).To(ContainSubstring(Config.GetNodejsBuildpackName()), "Missing the NodeJS buildpack specified in the integration_config.json. There may be other missing buildpacks as well; please double-check your configuration against the buildpacks listed below.")
Expect(buildpacks).To(ContainSubstring(Config.GetRubyBuildpackName()), "Missing the ruby buildpack specified in the integration_config.json. There may be other missing buildpacks as well; please double-check your configuration against the buildpacks listed below.")
})
TestSetup.Setup()
})
SynchronizedAfterSuite(func() {
if TestSetup != nil {
TestSetup.Teardown()
}
}, func() {
os.Remove(assets.NewAssets().DoraZip)
})
rs := []Reporter{}
if validationError == nil {
if Config.GetArtifactsDirectory() != "" {
helpers.EnableCFTrace(Config, "CATS")
rs = append(rs, helpers.NewJUnitReporter(Config, "CATS"))
}
}
reporterConfig := Config.GetReporterConfig()
if reporterConfig.HoneyCombDataset != "" && reporterConfig.HoneyCombWriteKey != "" {
honeyCombClient := client.New(libhoney.Config{
WriteKey: reporterConfig.HoneyCombWriteKey,
Dataset: reporterConfig.HoneyCombDataset,
})
globalTags := map[string]interface{}{
"run_id": os.Getenv("RUN_ID"),
"env_api": Config.GetApiEndpoint(),
}
honeyCombReporter := honeycomb.New(honeyCombClient)
honeyCombReporter.SetGlobalTags(globalTags)
honeyCombReporter.SetCustomTags(reporterConfig.CustomTags)
rs = append(rs, honeyCombReporter)
}
RunSpecsWithDefaultAndCustomReporters(t, "CATS", rs)
}
| [
"\"CONFIG\"",
"\"CONFIG\"",
"\"RUN_ID\""
]
| []
| [
"RUN_ID",
"CONFIG"
]
| [] | ["RUN_ID", "CONFIG"] | go | 2 | 0 | |
src/test/java/hudson/plugins/git/GitSCMTest.java | package hudson.plugins.git;
import com.cloudbees.plugins.credentials.Credentials;
import com.cloudbees.plugins.credentials.CredentialsProvider;
import com.cloudbees.plugins.credentials.CredentialsScope;
import com.cloudbees.plugins.credentials.CredentialsStore;
import com.cloudbees.plugins.credentials.SystemCredentialsProvider;
import com.cloudbees.plugins.credentials.common.StandardCredentials;
import com.cloudbees.plugins.credentials.domains.Domain;
import com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl;
import com.gargoylesoftware.htmlunit.html.HtmlPage;
import com.google.common.base.Function;
import com.google.common.collect.Collections2;
import com.google.common.collect.Lists;
import hudson.EnvVars;
import hudson.FilePath;
import hudson.Functions;
import hudson.Launcher;
import hudson.matrix.Axis;
import hudson.matrix.AxisList;
import hudson.matrix.MatrixBuild;
import hudson.matrix.MatrixProject;
import hudson.model.*;
import hudson.plugins.git.GitSCM.BuildChooserContextImpl;
import hudson.plugins.git.GitSCM.DescriptorImpl;
import hudson.plugins.git.browser.GitRepositoryBrowser;
import hudson.plugins.git.browser.GithubWeb;
import hudson.plugins.git.extensions.GitSCMExtension;
import hudson.plugins.git.extensions.impl.*;
import hudson.plugins.git.util.BuildChooser;
import hudson.plugins.git.util.BuildChooserContext;
import hudson.plugins.git.util.BuildChooserContext.ContextCallable;
import hudson.plugins.git.util.BuildData;
import hudson.plugins.git.util.GitUtils;
import hudson.plugins.parameterizedtrigger.BuildTrigger;
import hudson.plugins.parameterizedtrigger.ResultCondition;
import hudson.remoting.Channel;
import hudson.remoting.VirtualChannel;
import hudson.scm.ChangeLogSet;
import hudson.scm.PollingResult;
import hudson.scm.PollingResult.Change;
import hudson.scm.SCMRevisionState;
import hudson.security.ACL;
import hudson.security.ACLContext;
import hudson.security.Permission;
import hudson.slaves.DumbSlave;
import hudson.slaves.EnvironmentVariablesNodeProperty.Entry;
import hudson.tools.ToolLocationNodeProperty;
import hudson.tools.ToolProperty;
import hudson.triggers.SCMTrigger;
import hudson.util.LogTaskListener;
import hudson.util.ReflectionUtils;
import hudson.util.RingBufferLogHandler;
import hudson.util.StreamTaskListener;
import jenkins.security.MasterToSlaveCallable;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.eclipse.jgit.lib.Constants;
import org.eclipse.jgit.lib.ObjectId;
import org.eclipse.jgit.lib.PersonIdent;
import org.eclipse.jgit.lib.Ref;
import org.eclipse.jgit.lib.Repository;
import org.jenkinsci.plugins.tokenmacro.TokenMacro;
import org.jenkinsci.plugins.gitclient.*;
import org.junit.Assume;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.jvnet.hudson.test.MockAuthorizationStrategy;
import org.jvnet.hudson.test.TestExtension;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.ObjectStreamException;
import java.io.Serializable;
import java.lang.reflect.InvocationTargetException;
import java.net.URL;
import java.text.MessageFormat;
import java.util.*;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.eclipse.jgit.transport.RemoteConfig;
import static org.hamcrest.MatcherAssert.*;
import static org.hamcrest.Matchers.*;
import org.jvnet.hudson.test.Issue;
import org.jvnet.hudson.test.JenkinsRule;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import org.junit.After;
import org.junit.Before;
import org.junit.BeforeClass;
import org.mockito.ArgumentCaptor;
import org.mockito.Mockito;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.atLeastOnce;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import jenkins.model.Jenkins;
import jenkins.plugins.git.CliGitCommand;
import jenkins.plugins.git.GitSampleRepoRule;
/**
* Tests for {@link GitSCM}.
* @author ishaaq
*/
public class GitSCMTest extends AbstractGitTestCase {
@Rule
public GitSampleRepoRule secondRepo = new GitSampleRepoRule();
private CredentialsStore store = null;
@BeforeClass
public static void setGitDefaults() throws Exception {
CliGitCommand gitCmd = new CliGitCommand(null);
gitCmd.setDefaults();
}
@Before
public void enableSystemCredentialsProvider() throws Exception {
SystemCredentialsProvider.getInstance().setDomainCredentialsMap(
Collections.singletonMap(Domain.global(), Collections.<Credentials>emptyList()));
for (CredentialsStore s : CredentialsProvider.lookupStores(Jenkins.get())) {
if (s.getProvider() instanceof SystemCredentialsProvider.ProviderImpl) {
store = s;
break;
}
}
assertThat("The system credentials provider is enabled", store, notNullValue());
}
@After
public void waitForJenkinsIdle() throws Exception {
if (cleanupIsUnreliable()) {
rule.waitUntilNoActivityUpTo(5001);
}
}
private StandardCredentials getInvalidCredential() {
String username = "bad-user";
String password = "bad-password";
CredentialsScope scope = CredentialsScope.GLOBAL;
String id = "username-" + username + "-password-" + password;
return new UsernamePasswordCredentialsImpl(scope, id, "desc: " + id, username, password);
}
@Test
public void manageShouldAccessGlobalConfig() {
final String USER = "user";
final String MANAGER = "manager";
Permission jenkinsManage;
try {
jenkinsManage = getJenkinsManage();
} catch (Exception e) {
Assume.assumeTrue("Jenkins baseline is too old for this test (requires Jenkins.MANAGE)", false);
return;
}
rule.jenkins.setSecurityRealm(rule.createDummySecurityRealm());
rule.jenkins.setAuthorizationStrategy(new MockAuthorizationStrategy()
// Read access
.grant(Jenkins.READ).everywhere().to(USER)
// Read and Manage
.grant(Jenkins.READ).everywhere().to(MANAGER)
.grant(jenkinsManage).everywhere().to(MANAGER)
);
try (ACLContext c = ACL.as(User.getById(USER, true))) {
Collection<Descriptor> descriptors = Functions.getSortedDescriptorsForGlobalConfigUnclassified();
assertThat("Global configuration should not be accessible to READ users", descriptors, is(empty()));
}
try (ACLContext c = ACL.as(User.getById(MANAGER, true))) {
Collection<Descriptor> descriptors = Functions.getSortedDescriptorsForGlobalConfigUnclassified();
Optional<Descriptor> found =
descriptors.stream().filter(descriptor -> descriptor instanceof GitSCM.DescriptorImpl).findFirst();
assertTrue("Global configuration should be accessible to MANAGE users", found.isPresent());
}
}
// TODO: remove when Jenkins core baseline is 2.222+
private Permission getJenkinsManage() throws NoSuchMethodException, IllegalAccessException,
InvocationTargetException {
// Jenkins.MANAGE is available starting from Jenkins 2.222 (https://jenkins.io/changelog/#v2.222). See JEP-223 for more info
return (Permission) ReflectionUtils.getPublicProperty(Jenkins.get(), "MANAGE");
}
@Test
public void trackCredentials() throws Exception {
StandardCredentials credential = getInvalidCredential();
store.addCredentials(Domain.global(), credential);
Fingerprint fingerprint = CredentialsProvider.getFingerprintOf(credential);
assertThat("Fingerprint should not be set before job definition", fingerprint, nullValue());
JenkinsRule.WebClient wc = rule.createWebClient();
HtmlPage page = wc.goTo("credentials/store/system/domain/_/credentials/" + credential.getId());
assertThat("Have usage tracking reported", page.getElementById("usage"), notNullValue());
assertThat("No fingerprint created until first use", page.getElementById("usage-missing"), notNullValue());
assertThat("No fingerprint created until first use", page.getElementById("usage-present"), nullValue());
FreeStyleProject project = setupProject("master", credential);
fingerprint = CredentialsProvider.getFingerprintOf(credential);
assertThat("Fingerprint should not be set before first build", fingerprint, nullValue());
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
build(project, Result.SUCCESS, commitFile1);
fingerprint = CredentialsProvider.getFingerprintOf(credential);
assertThat("Fingerprint should be set after first build", fingerprint, notNullValue());
assertThat(fingerprint.getJobs(), hasItem(is(project.getFullName())));
Fingerprint.RangeSet rangeSet = fingerprint.getRangeSet(project);
assertThat(rangeSet, notNullValue());
assertThat(rangeSet.includes(project.getLastBuild().getNumber()), is(true));
page = wc.goTo("credentials/store/system/domain/_/credentials/" + credential.getId());
assertThat(page.getElementById("usage-missing"), nullValue());
assertThat(page.getElementById("usage-present"), notNullValue());
assertThat(page.getAnchorByText(project.getFullDisplayName()), notNullValue());
}
/**
* Basic test - create a GitSCM based project, check it out and build for the first time.
* Next test that polling works correctly, make another commit, check that polling finds it,
* then build it and finally test the build culprits as well as the contents of the workspace.
* @throws Exception on error
*/
@Test
public void testBasic() throws Exception {
FreeStyleProject project = setupSimpleProject("master");
// create initial commit and then run the build against it:
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
build(project, Result.SUCCESS, commitFile1);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
final String commitFile2 = "commitFile2";
commit(commitFile2, janeDoe, "Commit number 2");
assertTrue("scm polling did not detect commit2 change", project.poll(listener).hasChanges());
//... and build it...
final FreeStyleBuild build2 = build(project, Result.SUCCESS, commitFile2);
final Set<User> culprits = build2.getCulprits();
assertEquals("The build should have only one culprit", 1, culprits.size());
assertEquals("", janeDoe.getName(), culprits.iterator().next().getFullName());
assertTrue(build2.getWorkspace().child(commitFile2).exists());
rule.assertBuildStatusSuccess(build2);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
}
@Test
@Issue("JENKINS-56176")
public void testBasicRemotePoll() throws Exception {
// FreeStyleProject project = setupProject("master", true, false);
FreeStyleProject project = setupProject("master", false, null, null, null, true, null);
// create initial commit and then run the build against it:
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
build(project, Result.SUCCESS, commitFile1);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
final String commitFile2 = "commitFile2";
String sha1String = commit(commitFile2, janeDoe, "Commit number 2");
assertTrue("scm polling did not detect commit2 change", project.poll(listener).hasChanges());
// ... and build it...
final FreeStyleBuild build2 = build(project, Result.SUCCESS, commitFile2);
final Set<User> culprits = build2.getCulprits();
assertEquals("The build should have only one culprit", 1, culprits.size());
assertEquals("", janeDoe.getName(), culprits.iterator().next().getFullName());
assertTrue(build2.getWorkspace().child(commitFile2).exists());
rule.assertBuildStatusSuccess(build2);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
// JENKINS-56176 token macro expansion broke when BuildData was no longer updated
assertThat(TokenMacro.expandAll(build2, listener, "${GIT_REVISION,length=7}"), is(sha1String.substring(0, 7)));
assertThat(TokenMacro.expandAll(build2, listener, "${GIT_REVISION}"), is(sha1String));
assertThat(TokenMacro.expandAll(build2, listener, "$GIT_REVISION"), is(sha1String));
}
@Test
public void testBranchSpecWithRemotesMaster() throws Exception {
FreeStyleProject projectMasterBranch = setupProject("remotes/origin/master", false, null, null, null, true, null);
// create initial commit and build
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
build(projectMasterBranch, Result.SUCCESS, commitFile1);
}
/**
* This test and testSpecificRefspecsWithoutCloneOption confirm behaviors of
* refspecs on initial clone. Without the CloneOption to honor refspec, all
* references are cloned, even if they will be later ignored due to the
* refspec. With the CloneOption to ignore refspec, the initial clone also
* honors the refspec and only retrieves references per the refspec.
* @throws Exception on error
*/
@Test
@Issue("JENKINS-31393")
public void testSpecificRefspecs() throws Exception {
List<UserRemoteConfig> repos = new ArrayList<>();
repos.add(new UserRemoteConfig(testRepo.gitDir.getAbsolutePath(), "origin", "+refs/heads/foo:refs/remotes/foo", null));
/* Set CloneOption to honor refspec on initial clone */
FreeStyleProject projectWithMaster = setupProject(repos, Collections.singletonList(new BranchSpec("master")), null, false, null);
CloneOption cloneOptionMaster = new CloneOption(false, null, null);
cloneOptionMaster.setHonorRefspec(true);
((GitSCM)projectWithMaster.getScm()).getExtensions().add(cloneOptionMaster);
/* Set CloneOption to honor refspec on initial clone */
FreeStyleProject projectWithFoo = setupProject(repos, Collections.singletonList(new BranchSpec("foo")), null, false, null);
CloneOption cloneOptionFoo = new CloneOption(false, null, null);
cloneOptionFoo.setHonorRefspec(true);
((GitSCM)projectWithMaster.getScm()).getExtensions().add(cloneOptionFoo);
// create initial commit
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit in master");
// create branch and make initial commit
git.branch("foo");
git.checkout().branch("foo");
commit(commitFile1, johnDoe, "Commit in foo");
build(projectWithMaster, Result.FAILURE);
build(projectWithFoo, Result.SUCCESS, commitFile1);
}
/**
* This test and testSpecificRefspecs confirm behaviors of
* refspecs on initial clone. Without the CloneOption to honor refspec, all
* references are cloned, even if they will be later ignored due to the
* refspec. With the CloneOption to ignore refspec, the initial clone also
* honors the refspec and only retrieves references per the refspec.
* @throws Exception on error
*/
@Test
@Issue("JENKINS-36507")
public void testSpecificRefspecsWithoutCloneOption() throws Exception {
List<UserRemoteConfig> repos = new ArrayList<>();
repos.add(new UserRemoteConfig(testRepo.gitDir.getAbsolutePath(), "origin", "+refs/heads/foo:refs/remotes/foo", null));
FreeStyleProject projectWithMaster = setupProject(repos, Collections.singletonList(new BranchSpec("master")), null, false, null);
FreeStyleProject projectWithFoo = setupProject(repos, Collections.singletonList(new BranchSpec("foo")), null, false, null);
// create initial commit
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit in master");
// create branch and make initial commit
git.branch("foo");
git.checkout().branch("foo");
commit(commitFile1, johnDoe, "Commit in foo");
build(projectWithMaster, Result.SUCCESS); /* If clone refspec had been honored, this would fail */
build(projectWithFoo, Result.SUCCESS, commitFile1);
}
/**
* An empty remote repo URL failed the job as expected but provided
* a poor diagnostic message. The fix for JENKINS-38608 improves
* the error message to be clear and helpful. This test checks for
* that error message.
* @throws Exception on error
*/
@Test
@Issue("JENKINS-38608")
public void testAddFirstRepositoryWithNullRepoURL() throws Exception{
List<UserRemoteConfig> repos = new ArrayList<>();
repos.add(new UserRemoteConfig(null, null, null, null));
FreeStyleProject project = setupProject(repos, Collections.singletonList(new BranchSpec("master")), null, false, null);
FreeStyleBuild build = build(project, Result.FAILURE);
// Before JENKINS-38608 fix
assertThat("Build log reports 'Null value not allowed'",
build.getLog(175), not(hasItem("Null value not allowed as an environment variable: GIT_URL")));
// After JENKINS-38608 fix
assertThat("Build log did not report empty string in job definition",
build.getLog(175), hasItem("FATAL: Git repository URL 1 is an empty string in job definition. Checkout requires a valid repository URL"));
}
/**
* An empty remote repo URL failed the job as expected but provided
* a poor diagnostic message. The fix for JENKINS-38608 improves
* the error message to be clear and helpful. This test checks for
* that error message when the second URL is empty.
* @throws Exception on error
*/
@Test
@Issue("JENKINS-38608")
public void testAddSecondRepositoryWithNullRepoURL() throws Exception{
String repoURL = "https://example.com/non-empty/repo/url";
List<UserRemoteConfig> repos = new ArrayList<>();
repos.add(new UserRemoteConfig(repoURL, null, null, null));
repos.add(new UserRemoteConfig(null, null, null, null));
FreeStyleProject project = setupProject(repos, Collections.singletonList(new BranchSpec("master")), null, false, null);
FreeStyleBuild build = build(project, Result.FAILURE);
// Before JENKINS-38608 fix
assertThat("Build log reports 'Null value not allowed'",
build.getLog(175), not(hasItem("Null value not allowed as an environment variable: GIT_URL_2")));
// After JENKINS-38608 fix
assertThat("Build log did not report empty string in job definition for URL 2",
build.getLog(175), hasItem("FATAL: Git repository URL 2 is an empty string in job definition. Checkout requires a valid repository URL"));
}
@Test
public void testBranchSpecWithRemotesHierarchical() throws Exception {
FreeStyleProject projectMasterBranch = setupProject("master", false, null, null, null, true, null);
FreeStyleProject projectHierarchicalBranch = setupProject("remotes/origin/rel-1/xy", false, null, null, null, true, null);
// create initial commit
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
// create hierarchical branch, delete master branch, and build
git.branch("rel-1/xy");
git.checkout("rel-1/xy");
git.deleteBranch("master");
build(projectMasterBranch, Result.FAILURE);
build(projectHierarchicalBranch, Result.SUCCESS, commitFile1);
}
@Test
public void testBranchSpecUsingTagWithSlash() throws Exception {
FreeStyleProject projectMasterBranch = setupProject("path/tag", false, null, null, null, true, null);
// create initial commit and build
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1 will be tagged with path/tag");
testRepo.git.tag("path/tag", "tag with a slash in the tag name");
build(projectMasterBranch, Result.SUCCESS, commitFile1);
}
@Test
public void testBasicIncludedRegion() throws Exception {
FreeStyleProject project = setupProject("master", false, null, null, null, ".*3");
// create initial commit and then run the build against it:
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
build(project, Result.SUCCESS, commitFile1);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
final String commitFile2 = "commitFile2";
commit(commitFile2, janeDoe, "Commit number 2");
assertFalse("scm polling detected commit2 change, which should not have been included", project.poll(listener).hasChanges());
final String commitFile3 = "commitFile3";
commit(commitFile3, johnDoe, "Commit number 3");
assertTrue("scm polling did not detect commit3 change", project.poll(listener).hasChanges());
//... and build it...
final FreeStyleBuild build2 = build(project, Result.SUCCESS, commitFile2, commitFile3);
final Set<User> culprits = build2.getCulprits();
assertEquals("The build should have two culprit", 2, culprits.size());
PersonIdent[] expected = {johnDoe, janeDoe};
assertCulprits("jane doe and john doe should be the culprits", culprits, expected);
assertTrue(build2.getWorkspace().child(commitFile2).exists());
assertTrue(build2.getWorkspace().child(commitFile3).exists());
rule.assertBuildStatusSuccess(build2);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
}
/**
* testMergeCommitInExcludedRegionIsIgnored() confirms behavior of excluded regions with merge commits.
* This test has excluded and included regions, for files ending with .excluded and .included,
* respectively. The git repository is set up so that a non-fast-forward merge commit comes
* to master. The newly merged commit is a file ending with .excluded, so it should be ignored.
*
* @throws Exception on error
*/
@Issue({"JENKINS-20389","JENKINS-23606"})
@Test
public void testMergeCommitInExcludedRegionIsIgnored() throws Exception {
final String branchToMerge = "new-branch-we-merge-to-master";
FreeStyleProject project = setupProject("master", false, null, ".*\\.excluded", null, ".*\\.included");
final String initialCommit = "initialCommit";
commit(initialCommit, johnDoe, "Commit " + initialCommit + " to master");
build(project, Result.SUCCESS, initialCommit);
final String secondCommit = "secondCommit";
commit(secondCommit, johnDoe, "Commit " + secondCommit + " to master");
testRepo.git.checkoutBranch(branchToMerge, "HEAD~");
final String fileToMerge = "fileToMerge.excluded";
commit(fileToMerge, johnDoe, "Commit should be ignored: " + fileToMerge + " to " + branchToMerge);
ObjectId branchSHA = git.revParse("HEAD");
testRepo.git.checkoutBranch("master", "refs/heads/master");
MergeCommand mergeCommand = testRepo.git.merge();
mergeCommand.setRevisionToMerge(branchSHA);
mergeCommand.execute();
// Should return false, because our merge commit falls within the excluded region.
assertFalse("Polling should report no changes, because they are in the excluded region.",
project.poll(listener).hasChanges());
}
/**
* testMergeCommitInExcludedDirectoryIsIgnored() confirms behavior of excluded directories with merge commits.
* This test has excluded and included directories, named /excluded/ and /included/,respectively. The repository
* is set up so that a non-fast-forward merge commit comes to master, and is in the directory /excluded/,
* so it should be ignored.
*
* @throws Exception on error
*/
@Issue({"JENKINS-20389","JENKINS-23606"})
@Test
public void testMergeCommitInExcludedDirectoryIsIgnored() throws Exception {
final String branchToMerge = "new-branch-we-merge-to-master";
FreeStyleProject project = setupProject("master", false, null, "excluded/.*", null, "included/.*");
final String initialCommit = "initialCommit";
commit(initialCommit, johnDoe, "Commit " + initialCommit + " to master");
build(project, Result.SUCCESS, initialCommit);
final String secondCommit = "secondCommit";
commit(secondCommit, johnDoe, "Commit " + secondCommit + " to master");
testRepo.git.checkoutBranch(branchToMerge, "HEAD~");
final String fileToMerge = "excluded/should-be-ignored";
commit(fileToMerge, johnDoe, "Commit should be ignored: " + fileToMerge + " to " + branchToMerge);
ObjectId branchSHA = git.revParse("HEAD");
testRepo.git.checkoutBranch("master", "refs/heads/master");
MergeCommand mergeCommand = testRepo.git.merge();
mergeCommand.setRevisionToMerge(branchSHA);
mergeCommand.execute();
// Should return false, because our merge commit falls within the excluded directory.
assertFalse("Polling should see no changes, because they are in the excluded directory.",
project.poll(listener).hasChanges());
}
/**
* testMergeCommitInIncludedRegionIsProcessed() confirms behavior of included regions with merge commits.
* This test has excluded and included regions, for files ending with .excluded and .included, respectively.
* The git repository is set up so that a non-fast-forward merge commit comes to master. The newly merged
* commit is a file ending with .included, so it should be processed as a new change.
*
* @throws Exception on error
*/
@Issue({"JENKINS-20389","JENKINS-23606"})
@Test
public void testMergeCommitInIncludedRegionIsProcessed() throws Exception {
final String branchToMerge = "new-branch-we-merge-to-master";
FreeStyleProject project = setupProject("master", false, null, ".*\\.excluded", null, ".*\\.included");
final String initialCommit = "initialCommit";
commit(initialCommit, johnDoe, "Commit " + initialCommit + " to master");
build(project, Result.SUCCESS, initialCommit);
final String secondCommit = "secondCommit";
commit(secondCommit, johnDoe, "Commit " + secondCommit + " to master");
testRepo.git.checkoutBranch(branchToMerge, "HEAD~");
final String fileToMerge = "fileToMerge.included";
commit(fileToMerge, johnDoe, "Commit should be noticed and processed as a change: " + fileToMerge + " to " + branchToMerge);
ObjectId branchSHA = git.revParse("HEAD");
testRepo.git.checkoutBranch("master", "refs/heads/master");
MergeCommand mergeCommand = testRepo.git.merge();
mergeCommand.setRevisionToMerge(branchSHA);
mergeCommand.execute();
// Should return true, because our commit falls within the included region.
assertTrue("Polling should report changes, because they fall within the included region.",
project.poll(listener).hasChanges());
}
/**
* testMergeCommitInIncludedRegionIsProcessed() confirms behavior of included directories with merge commits.
* This test has excluded and included directories, named /excluded/ and /included/, respectively. The repository
* is set up so that a non-fast-forward merge commit comes to master, and is in the directory /included/,
* so it should be processed as a new change.
*
* @throws Exception on error
*/
@Issue({"JENKINS-20389","JENKINS-23606"})
@Test
public void testMergeCommitInIncludedDirectoryIsProcessed() throws Exception {
final String branchToMerge = "new-branch-we-merge-to-master";
FreeStyleProject project = setupProject("master", false, null, "excluded/.*", null, "included/.*");
final String initialCommit = "initialCommit";
commit(initialCommit, johnDoe, "Commit " + initialCommit + " to master");
build(project, Result.SUCCESS, initialCommit);
final String secondCommit = "secondCommit";
commit(secondCommit, johnDoe, "Commit " + secondCommit + " to master");
testRepo.git.checkoutBranch(branchToMerge, "HEAD~");
final String fileToMerge = "included/should-be-processed";
commit(fileToMerge, johnDoe, "Commit should be noticed and processed as a change: " + fileToMerge + " to " + branchToMerge);
ObjectId branchSHA = git.revParse("HEAD");
testRepo.git.checkoutBranch("master", "refs/heads/master");
MergeCommand mergeCommand = testRepo.git.merge();
mergeCommand.setRevisionToMerge(branchSHA);
mergeCommand.execute();
// When this test passes, project.poll(listener).hasChanges()) should return
// true, because our commit falls within the included region.
assertTrue("Polling should report changes, because they are in the included directory.",
project.poll(listener).hasChanges());
}
/**
* testMergeCommitOutsideIncludedRegionIsIgnored() confirms behavior of included regions with merge commits.
* This test has an included region defined, for files ending with .included. There is no excluded region
* defined. The repository is set up and a non-fast-forward merge commit comes to master. The newly merged commit
* is a file ending with .should-be-ignored, thus falling outside of the included region, so it should ignored.
*
* @throws Exception on error
*/
@Issue({"JENKINS-20389","JENKINS-23606"})
@Test
public void testMergeCommitOutsideIncludedRegionIsIgnored() throws Exception {
final String branchToMerge = "new-branch-we-merge-to-master";
FreeStyleProject project = setupProject("master", false, null, null, null, ".*\\.included");
final String initialCommit = "initialCommit";
commit(initialCommit, johnDoe, "Commit " + initialCommit + " to master");
build(project, Result.SUCCESS, initialCommit);
final String secondCommit = "secondCommit";
commit(secondCommit, johnDoe, "Commit " + secondCommit + " to master");
testRepo.git.checkoutBranch(branchToMerge, "HEAD~");
final String fileToMerge = "fileToMerge.should-be-ignored";
commit(fileToMerge, johnDoe, "Commit should be ignored: " + fileToMerge + " to " + branchToMerge);
ObjectId branchSHA = git.revParse("HEAD");
testRepo.git.checkoutBranch("master", "refs/heads/master");
MergeCommand mergeCommand = testRepo.git.merge();
mergeCommand.setRevisionToMerge(branchSHA);
mergeCommand.execute();
// Should return false, because our commit falls outside the included region.
assertFalse("Polling should ignore the change, because it falls outside the included region.",
project.poll(listener).hasChanges());
}
/**
* testMergeCommitOutsideIncludedDirectoryIsIgnored() confirms behavior of included directories with merge commits.
* This test has only an included directory `/included` defined. The git repository is set up so that
* a non-fast-forward, but mergeable, commit comes to master. The newly merged commit is outside of the
* /included/ directory, so polling should report no changes.
*
* @throws Exception on error
*/
@Issue({"JENKINS-20389","JENKINS-23606"})
@Test
public void testMergeCommitOutsideIncludedDirectoryIsIgnored() throws Exception {
final String branchToMerge = "new-branch-we-merge-to-master";
FreeStyleProject project = setupProject("master", false, null, null, null, "included/.*");
final String initialCommit = "initialCommit";
commit(initialCommit, johnDoe, "Commit " + initialCommit + " to master");
build(project, Result.SUCCESS, initialCommit);
final String secondCommit = "secondCommit";
commit(secondCommit, johnDoe, "Commit " + secondCommit + " to master");
testRepo.git.checkoutBranch(branchToMerge, "HEAD~");
final String fileToMerge = "directory-to-ignore/file-should-be-ignored";
commit(fileToMerge, johnDoe, "Commit should be ignored: " + fileToMerge + " to " + branchToMerge);
ObjectId branchSHA = git.revParse("HEAD");
testRepo.git.checkoutBranch("master", "refs/heads/master");
MergeCommand mergeCommand = testRepo.git.merge();
mergeCommand.setRevisionToMerge(branchSHA);
mergeCommand.execute();
// Should return false, because our commit falls outside of the included directory
assertFalse("Polling should ignore the change, because it falls outside the included directory.",
project.poll(listener).hasChanges());
}
/**
* testMergeCommitOutsideExcludedRegionIsProcessed() confirms behavior of excluded regions with merge commits.
* This test has an excluded region defined, for files ending with .excluded. There is no included region defined.
* The repository is set up so a non-fast-forward merge commit comes to master. The newly merged commit is a file
* ending with .should-be-processed, thus falling outside of the excluded region, so it should processed
* as a new change.
*
* @throws Exception on error
*/
@Issue({"JENKINS-20389","JENKINS-23606"})
@Test
public void testMergeCommitOutsideExcludedRegionIsProcessed() throws Exception {
final String branchToMerge = "new-branch-we-merge-to-master";
FreeStyleProject project = setupProject("master", false, null, ".*\\.excluded", null, null);
final String initialCommit = "initialCommit";
commit(initialCommit, johnDoe, "Commit " + initialCommit + " to master");
build(project, Result.SUCCESS, initialCommit);
final String secondCommit = "secondCommit";
commit(secondCommit, johnDoe, "Commit " + secondCommit + " to master");
testRepo.git.checkoutBranch(branchToMerge, "HEAD~");
final String fileToMerge = "fileToMerge.should-be-processed";
commit(fileToMerge, johnDoe, "Commit should be noticed and processed as a change: " + fileToMerge + " to " + branchToMerge);
ObjectId branchSHA = git.revParse("HEAD");
testRepo.git.checkoutBranch("master", "refs/heads/master");
MergeCommand mergeCommand = testRepo.git.merge();
mergeCommand.setRevisionToMerge(branchSHA);
mergeCommand.execute();
// Should return true, because our commit falls outside of the excluded region
assertTrue("Polling should process the change, because it falls outside the excluded region.",
project.poll(listener).hasChanges());
}
/**
* testMergeCommitOutsideExcludedDirectoryIsProcessed() confirms behavior of excluded directories with merge commits.
* This test has an excluded directory `excluded` defined. There is no `included` directory defined. The repository
* is set up so that a non-fast-forward merge commit comes to master. The newly merged commit resides in a
* directory of its own, thus falling outside of the excluded directory, so it should processed
* as a new change.
*
* @throws Exception on error
*/
@Issue({"JENKINS-20389","JENKINS-23606"})
@Test
public void testMergeCommitOutsideExcludedDirectoryIsProcessed() throws Exception {
final String branchToMerge = "new-branch-we-merge-to-master";
FreeStyleProject project = setupProject("master", false, null, "excluded/.*", null, null);
final String initialCommit = "initialCommit";
commit(initialCommit, johnDoe, "Commit " + initialCommit + " to master");
build(project, Result.SUCCESS, initialCommit);
final String secondCommit = "secondCommit";
commit(secondCommit, johnDoe, "Commit " + secondCommit + " to master");
testRepo.git.checkoutBranch(branchToMerge, "HEAD~");
// Create this new file outside of our excluded directory
final String fileToMerge = "directory-to-include/file-should-be-processed";
commit(fileToMerge, johnDoe, "Commit should be noticed and processed as a change: " + fileToMerge + " to " + branchToMerge);
ObjectId branchSHA = git.revParse("HEAD");
testRepo.git.checkoutBranch("master", "refs/heads/master");
MergeCommand mergeCommand = testRepo.git.merge();
mergeCommand.setRevisionToMerge(branchSHA);
mergeCommand.execute();
// Should return true, because our commit falls outside of the excluded directory
assertTrue("SCM polling should process the change, because it falls outside the excluded directory.",
project.poll(listener).hasChanges());
}
@Test
public void testIncludedRegionWithDeeperCommits() throws Exception {
FreeStyleProject project = setupProject("master", false, null, null, null, ".*3");
// create initial commit and then run the build against it:
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
build(project, Result.SUCCESS, commitFile1);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
final String commitFile2 = "commitFile2";
commit(commitFile2, janeDoe, "Commit number 2");
assertFalse("scm polling detected commit2 change, which should not have been included", project.poll(listener).hasChanges());
final String commitFile3 = "commitFile3";
commit(commitFile3, johnDoe, "Commit number 3");
final String commitFile4 = "commitFile4";
commit(commitFile4, janeDoe, "Commit number 4");
assertTrue("scm polling did not detect commit3 change", project.poll(listener).hasChanges());
//... and build it...
final FreeStyleBuild build2 = build(project, Result.SUCCESS, commitFile2, commitFile3);
final Set<User> culprits = build2.getCulprits();
assertEquals("The build should have two culprit", 2, culprits.size());
PersonIdent[] expected = {johnDoe, janeDoe};
assertCulprits("jane doe and john doe should be the culprits", culprits, expected);
assertTrue(build2.getWorkspace().child(commitFile2).exists());
assertTrue(build2.getWorkspace().child(commitFile3).exists());
rule.assertBuildStatusSuccess(build2);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
}
@Test
public void testBasicExcludedRegion() throws Exception {
FreeStyleProject project = setupProject("master", false, null, ".*2", null, null);
// create initial commit and then run the build against it:
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
build(project, Result.SUCCESS, commitFile1);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
final String commitFile2 = "commitFile2";
commit(commitFile2, janeDoe, "Commit number 2");
assertFalse("scm polling detected commit2 change, which should have been excluded", project.poll(listener).hasChanges());
final String commitFile3 = "commitFile3";
commit(commitFile3, johnDoe, "Commit number 3");
assertTrue("scm polling did not detect commit3 change", project.poll(listener).hasChanges());
//... and build it...
final FreeStyleBuild build2 = build(project, Result.SUCCESS, commitFile2, commitFile3);
final Set<User> culprits = build2.getCulprits();
assertEquals("The build should have two culprit", 2, culprits.size());
PersonIdent[] expected = {johnDoe, janeDoe};
assertCulprits("jane doe and john doe should be the culprits", culprits, expected);
assertTrue(build2.getWorkspace().child(commitFile2).exists());
assertTrue(build2.getWorkspace().child(commitFile3).exists());
rule.assertBuildStatusSuccess(build2);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
}
@Test
public void testCleanBeforeCheckout() throws Exception {
FreeStyleProject p = setupProject("master", false, null, null, "Jane Doe", null);
((GitSCM)p.getScm()).getExtensions().add(new CleanBeforeCheckout());
final String commitFile1 = "commitFile1";
final String commitFile2 = "commitFile2";
commit(commitFile1, johnDoe, janeDoe, "Commit number 1");
commit(commitFile2, johnDoe, janeDoe, "Commit number 2");
final FreeStyleBuild firstBuild = build(p, Result.SUCCESS, commitFile1);
final String branch1 = "Branch1";
final String branch2 = "Branch2";
List<BranchSpec> branches = new ArrayList<>();
branches.add(new BranchSpec("master"));
branches.add(new BranchSpec(branch1));
branches.add(new BranchSpec(branch2));
git.branch(branch1);
git.checkout(branch1);
p.poll(listener).hasChanges();
assertThat(firstBuild.getLog(175), hasItem("Cleaning workspace"));
assertTrue(firstBuild.getLog().indexOf("Cleaning") > firstBuild.getLog().indexOf("Cloning")); //clean should be after clone
assertTrue(firstBuild.getLog().indexOf("Cleaning") < firstBuild.getLog().indexOf("Checking out")); //clean before checkout
assertTrue(firstBuild.getWorkspace().child(commitFile1).exists());
git.checkout(branch1);
final FreeStyleBuild secondBuild = build(p, Result.SUCCESS, commitFile2);
p.poll(listener).hasChanges();
assertThat(secondBuild.getLog(175), hasItem("Cleaning workspace"));
assertTrue(secondBuild.getLog().indexOf("Cleaning") < secondBuild.getLog().indexOf("Fetching upstream changes"));
assertTrue(secondBuild.getWorkspace().child(commitFile2).exists());
}
@Issue("JENKINS-8342")
@Test
public void testExcludedRegionMultiCommit() throws Exception {
// Got 2 projects, each one should only build if changes in its own file
FreeStyleProject clientProject = setupProject("master", false, null, ".*serverFile", null, null);
FreeStyleProject serverProject = setupProject("master", false, null, ".*clientFile", null, null);
String initialCommitFile = "initialFile";
commit(initialCommitFile, johnDoe, "initial commit");
build(clientProject, Result.SUCCESS, initialCommitFile);
build(serverProject, Result.SUCCESS, initialCommitFile);
assertFalse("scm polling should not detect any more changes after initial build", clientProject.poll(listener).hasChanges());
assertFalse("scm polling should not detect any more changes after initial build", serverProject.poll(listener).hasChanges());
// Got commits on serverFile, so only server project should build.
commit("myserverFile", johnDoe, "commit first server file");
assertFalse("scm polling should not detect any changes in client project", clientProject.poll(listener).hasChanges());
assertTrue("scm polling did not detect changes in server project", serverProject.poll(listener).hasChanges());
// Got commits on both client and serverFile, so both projects should build.
commit("myNewserverFile", johnDoe, "commit new server file");
commit("myclientFile", johnDoe, "commit first clientfile");
assertTrue("scm polling did not detect changes in client project", clientProject.poll(listener).hasChanges());
assertTrue("scm polling did not detect changes in server project", serverProject.poll(listener).hasChanges());
}
/*
* With multiple branches specified in the project and having commits from a user
* excluded should not build the excluded revisions when another branch changes.
*/
/*
@Issue("JENKINS-8342")
@Test
public void testMultipleBranchWithExcludedUser() throws Exception {
final String branch1 = "Branch1";
final String branch2 = "Branch2";
List<BranchSpec> branches = new ArrayList<BranchSpec>();
branches.add(new BranchSpec("master"));
branches.add(new BranchSpec(branch1));
branches.add(new BranchSpec(branch2));
final FreeStyleProject project = setupProject(branches, false, null, null, janeDoe.getName(), null, false, null);
// create initial commit and then run the build against it:
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
build(project, Result.SUCCESS, commitFile1);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
// create branches here so we can get back to them later...
git.branch(branch1);
git.branch(branch2);
final String commitFile2 = "commitFile2";
commit(commitFile2, johnDoe, "Commit number 2");
final String commitFile3 = "commitFile3";
commit(commitFile3, johnDoe, "Commit number 3");
assertTrue("scm polling should detect changes in 'master' branch", project.poll(listener).hasChanges());
build(project, Result.SUCCESS, commitFile1, commitFile2);
assertFalse("scm polling should not detect any more changes after last build", project.poll(listener).hasChanges());
// Add excluded commit
final String commitFile4 = "commitFile4";
commit(commitFile4, janeDoe, "Commit number 4");
assertFalse("scm polling detected change in 'master', which should have been excluded", project.poll(listener).hasChanges());
// now jump back...
git.checkout(branch1);
final String branch1File1 = "branch1File1";
commit(branch1File1, janeDoe, "Branch1 commit number 1");
assertFalse("scm polling detected change in 'Branch1', which should have been excluded", project.poll(listener).hasChanges());
// and the other branch...
git.checkout(branch2);
final String branch2File1 = "branch2File1";
commit(branch2File1, janeDoe, "Branch2 commit number 1");
assertFalse("scm polling detected change in 'Branch2', which should have been excluded", project.poll(listener).hasChanges());
final String branch2File2 = "branch2File2";
commit(branch2File2, johnDoe, "Branch2 commit number 2");
assertTrue("scm polling should detect changes in 'Branch2' branch", project.poll(listener).hasChanges());
//... and build it...
build(project, Result.SUCCESS, branch2File1, branch2File2);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
// now jump back again...
git.checkout(branch1);
// Commit excluded after non-excluded commit, should trigger build.
final String branch1File2 = "branch1File2";
commit(branch1File2, johnDoe, "Branch1 commit number 2");
final String branch1File3 = "branch1File3";
commit(branch1File3, janeDoe, "Branch1 commit number 3");
assertTrue("scm polling should detect changes in 'Branch1' branch", project.poll(listener).hasChanges());
build(project, Result.SUCCESS, branch1File1, branch1File2, branch1File3);
} */
@Test
public void testBasicExcludedUser() throws Exception {
FreeStyleProject project = setupProject("master", false, null, null, "Jane Doe", null);
// create initial commit and then run the build against it:
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
build(project, Result.SUCCESS, commitFile1);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
final String commitFile2 = "commitFile2";
commit(commitFile2, janeDoe, "Commit number 2");
assertFalse("scm polling detected commit2 change, which should have been excluded", project.poll(listener).hasChanges());
final String commitFile3 = "commitFile3";
commit(commitFile3, johnDoe, "Commit number 3");
assertTrue("scm polling did not detect commit3 change", project.poll(listener).hasChanges());
//... and build it...
final FreeStyleBuild build2 = build(project, Result.SUCCESS, commitFile2, commitFile3);
final Set<User> culprits = build2.getCulprits();
assertEquals("The build should have two culprit", 2, culprits.size());
PersonIdent[] expected = {johnDoe, janeDoe};
assertCulprits("jane doe and john doe should be the culprits", culprits, expected);
assertTrue(build2.getWorkspace().child(commitFile2).exists());
assertTrue(build2.getWorkspace().child(commitFile3).exists());
rule.assertBuildStatusSuccess(build2);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
}
@Test
public void testBasicInSubdir() throws Exception {
FreeStyleProject project = setupSimpleProject("master");
((GitSCM)project.getScm()).getExtensions().add(new RelativeTargetDirectory("subdir"));
// create initial commit and then run the build against it:
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
build(project, "subdir", Result.SUCCESS, commitFile1);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
final String commitFile2 = "commitFile2";
commit(commitFile2, janeDoe, "Commit number 2");
assertTrue("scm polling did not detect commit2 change", project.poll(listener).hasChanges());
//... and build it...
final FreeStyleBuild build2 = build(project, "subdir", Result.SUCCESS,
commitFile2);
final Set<User> culprits = build2.getCulprits();
assertEquals("The build should have only one culprit", 1, culprits.size());
assertEquals("", janeDoe.getName(), culprits.iterator().next().getFullName());
assertEquals("The workspace should have a 'subdir' subdirectory, but does not.", true,
build2.getWorkspace().child("subdir").exists());
assertEquals("The 'subdir' subdirectory should contain commitFile2, but does not.", true,
build2.getWorkspace().child("subdir").child(commitFile2).exists());
rule.assertBuildStatusSuccess(build2);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
}
@Test
public void testBasicWithAgent() throws Exception {
FreeStyleProject project = setupSimpleProject("master");
project.setAssignedLabel(rule.createSlave().getSelfLabel());
// create initial commit and then run the build against it:
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
build(project, Result.SUCCESS, commitFile1);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
final String commitFile2 = "commitFile2";
commit(commitFile2, janeDoe, "Commit number 2");
assertTrue("scm polling did not detect commit2 change", project.poll(listener).hasChanges());
//... and build it...
final FreeStyleBuild build2 = build(project, Result.SUCCESS, commitFile2);
final Set<User> culprits = build2.getCulprits();
assertEquals("The build should have only one culprit", 1, culprits.size());
assertEquals("", janeDoe.getName(), culprits.iterator().next().getFullName());
assertTrue(build2.getWorkspace().child(commitFile2).exists());
rule.assertBuildStatusSuccess(build2);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
}
@Issue("HUDSON-7547")
@Test
public void testBasicWithAgentNoExecutorsOnMaster() throws Exception {
FreeStyleProject project = setupSimpleProject("master");
rule.jenkins.setNumExecutors(0);
project.setAssignedLabel(rule.createSlave().getSelfLabel());
// create initial commit and then run the build against it:
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
build(project, Result.SUCCESS, commitFile1);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
final String commitFile2 = "commitFile2";
commit(commitFile2, janeDoe, "Commit number 2");
assertTrue("scm polling did not detect commit2 change", project.poll(listener).hasChanges());
//... and build it...
final FreeStyleBuild build2 = build(project, Result.SUCCESS, commitFile2);
final Set<User> culprits = build2.getCulprits();
assertEquals("The build should have only one culprit", 1, culprits.size());
assertEquals("", janeDoe.getName(), culprits.iterator().next().getFullName());
assertTrue(build2.getWorkspace().child(commitFile2).exists());
rule.assertBuildStatusSuccess(build2);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
}
@Test
public void testAuthorOrCommitterFalse() throws Exception {
// Test with authorOrCommitter set to false and make sure we get the committer.
FreeStyleProject project = setupSimpleProject("master");
// create initial commit and then run the build against it:
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, janeDoe, "Commit number 1");
final FreeStyleBuild firstBuild = build(project, Result.SUCCESS, commitFile1);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
final String commitFile2 = "commitFile2";
commit(commitFile2, johnDoe, janeDoe, "Commit number 2");
assertTrue("scm polling did not detect commit2 change", project.poll(listener).hasChanges());
final FreeStyleBuild secondBuild = build(project, Result.SUCCESS, commitFile2);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
final Set<User> secondCulprits = secondBuild.getCulprits();
assertEquals("The build should have only one culprit", 1, secondCulprits.size());
assertEquals("Did not get the committer as the change author with authorOrCommitter==false",
janeDoe.getName(), secondCulprits.iterator().next().getFullName());
}
@Test
public void testAuthorOrCommitterTrue() throws Exception {
// Next, test with authorOrCommitter set to true and make sure we get the author.
FreeStyleProject project = setupSimpleProject("master");
((GitSCM)project.getScm()).getExtensions().add(new AuthorInChangelog());
// create initial commit and then run the build against it:
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, janeDoe, "Commit number 1");
final FreeStyleBuild firstBuild = build(project, Result.SUCCESS, commitFile1);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
final String commitFile2 = "commitFile2";
commit(commitFile2, johnDoe, janeDoe, "Commit number 2");
assertTrue("scm polling did not detect commit2 change", project.poll(listener).hasChanges());
final FreeStyleBuild secondBuild = build(project, Result.SUCCESS, commitFile2);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
final Set<User> secondCulprits = secondBuild.getCulprits();
assertEquals("The build should have only one culprit", 1, secondCulprits.size());
assertEquals("Did not get the author as the change author with authorOrCommitter==true",
johnDoe.getName(), secondCulprits.iterator().next().getFullName());
}
@Test
public void testNewCommitToUntrackedBranchDoesNotTriggerBuild() throws Exception {
FreeStyleProject project = setupSimpleProject("master");
// create initial commit and then run the build against it:
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
build(project, Result.SUCCESS, commitFile1);
//now create and checkout a new branch:
git.checkout(Constants.HEAD, "untracked");
//.. and commit to it:
final String commitFile2 = "commitFile2";
commit(commitFile2, johnDoe, "Commit number 2");
assertFalse("scm polling should not detect commit2 change because it is not in the branch we are tracking.", project.poll(listener).hasChanges());
}
private String checkoutString(FreeStyleProject project, String envVar) {
return "checkout -f " + getEnvVars(project).get(envVar);
}
@Test
public void testEnvVarsAvailable() throws Exception {
FreeStyleProject project = setupSimpleProject("master");
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
FreeStyleBuild build1 = build(project, Result.SUCCESS, commitFile1);
assertEquals("origin/master", getEnvVars(project).get(GitSCM.GIT_BRANCH));
rule.waitForMessage(getEnvVars(project).get(GitSCM.GIT_BRANCH), build1);
rule.waitForMessage(checkoutString(project, GitSCM.GIT_COMMIT), build1);
final String commitFile2 = "commitFile2";
commit(commitFile2, johnDoe, "Commit number 2");
FreeStyleBuild build2 = build(project, Result.SUCCESS, commitFile2);
rule.assertLogNotContains(checkoutString(project, GitSCM.GIT_PREVIOUS_COMMIT), build2);
rule.waitForMessage(checkoutString(project, GitSCM.GIT_PREVIOUS_COMMIT), build1);
rule.assertLogNotContains(checkoutString(project, GitSCM.GIT_PREVIOUS_SUCCESSFUL_COMMIT), build2);
rule.waitForMessage(checkoutString(project, GitSCM.GIT_PREVIOUS_SUCCESSFUL_COMMIT), build1);
}
@Issue("HUDSON-7411")
@Test
public void testNodeEnvVarsAvailable() throws Exception {
FreeStyleProject project = setupSimpleProject("master");
DumbSlave agent = rule.createSlave();
setVariables(agent, new Entry("TESTKEY", "agent value"));
project.setAssignedLabel(agent.getSelfLabel());
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
build(project, Result.SUCCESS, commitFile1);
assertEquals("agent value", getEnvVars(project).get("TESTKEY"));
}
@Test
public void testNodeOverrideGit() throws Exception {
GitSCM scm = new GitSCM(null);
DumbSlave agent = rule.createSlave();
GitTool.DescriptorImpl gitToolDescriptor = rule.jenkins.getDescriptorByType(GitTool.DescriptorImpl.class);
GitTool installation = new GitTool("Default", "/usr/bin/git", null);
gitToolDescriptor.setInstallations(installation);
String gitExe = scm.getGitExe(agent, TaskListener.NULL);
assertEquals("/usr/bin/git", gitExe);
ToolLocationNodeProperty nodeGitLocation = new ToolLocationNodeProperty(new ToolLocationNodeProperty.ToolLocation(gitToolDescriptor, "Default", "C:\\Program Files\\Git\\bin\\git.exe"));
agent.setNodeProperties(Collections.singletonList(nodeGitLocation));
gitExe = scm.getGitExe(agent, TaskListener.NULL);
assertEquals("C:\\Program Files\\Git\\bin\\git.exe", gitExe);
}
/*
* A previous version of GitSCM would only build against branches, not tags. This test checks that that
* regression has been fixed.
*/
@Test
public void testGitSCMCanBuildAgainstTags() throws Exception {
final String mytag = "mytag";
FreeStyleProject project = setupSimpleProject(mytag);
build(project, Result.FAILURE); // fail, because there's nothing to be checked out here
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
// Try again. The first build will leave the repository in a bad state because we
// cloned something without even a HEAD - which will mean it will want to re-clone once there is some
// actual data.
build(project, Result.FAILURE); // fail, because there's nothing to be checked out here
//now create and checkout a new branch:
final String tmpBranch = "tmp";
git.branch(tmpBranch);
git.checkout(tmpBranch);
// commit to it
final String commitFile2 = "commitFile2";
commit(commitFile2, johnDoe, "Commit number 2");
assertFalse("scm polling should not detect any more changes since mytag is untouched right now", project.poll(listener).hasChanges());
build(project, Result.FAILURE); // fail, because there's nothing to be checked out here
// tag it, then delete the tmp branch
git.tag(mytag, "mytag initial");
git.checkout("master");
git.deleteBranch(tmpBranch);
// at this point we're back on master, there are no other branches, tag "mytag" exists but is
// not part of "master"
assertTrue("scm polling should detect commit2 change in 'mytag'", project.poll(listener).hasChanges());
build(project, Result.SUCCESS, commitFile2);
assertFalse("scm polling should not detect any more changes after last build", project.poll(listener).hasChanges());
// now, create tmp branch again against mytag:
git.checkout(mytag);
git.branch(tmpBranch);
// another commit:
final String commitFile3 = "commitFile3";
commit(commitFile3, johnDoe, "Commit number 3");
assertFalse("scm polling should not detect any more changes since mytag is untouched right now", project.poll(listener).hasChanges());
// now we're going to force mytag to point to the new commit, if everything goes well, gitSCM should pick the change up:
git.tag(mytag, "mytag moved");
git.checkout("master");
git.deleteBranch(tmpBranch);
// at this point we're back on master, there are no other branches, "mytag" has been updated to a new commit:
assertTrue("scm polling should detect commit3 change in 'mytag'", project.poll(listener).hasChanges());
build(project, Result.SUCCESS, commitFile3);
assertFalse("scm polling should not detect any more changes after last build", project.poll(listener).hasChanges());
}
/*
* Not specifying a branch string in the project implies that we should be polling for changes in
* all branches.
*/
@Test
public void testMultipleBranchBuild() throws Exception {
// empty string will result in a project that tracks against changes in all branches:
final FreeStyleProject project = setupSimpleProject("");
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
build(project, Result.SUCCESS, commitFile1);
// create a branch here so we can get back to this point later...
final String fork = "fork";
git.branch(fork);
final String commitFile2 = "commitFile2";
commit(commitFile2, johnDoe, "Commit number 2");
final String commitFile3 = "commitFile3";
commit(commitFile3, johnDoe, "Commit number 3");
assertTrue("scm polling should detect changes in 'master' branch", project.poll(listener).hasChanges());
build(project, Result.SUCCESS, commitFile1, commitFile2);
assertFalse("scm polling should not detect any more changes after last build", project.poll(listener).hasChanges());
// now jump back...
git.checkout(fork);
// add some commits to the fork branch...
final String forkFile1 = "forkFile1";
commit(forkFile1, johnDoe, "Fork commit number 1");
final String forkFile2 = "forkFile2";
commit(forkFile2, johnDoe, "Fork commit number 2");
assertTrue("scm polling should detect changes in 'fork' branch", project.poll(listener).hasChanges());
build(project, Result.SUCCESS, forkFile1, forkFile2);
assertFalse("scm polling should not detect any more changes after last build", project.poll(listener).hasChanges());
}
@Test
public void testMultipleBranchesWithTags() throws Exception {
List<BranchSpec> branchSpecs = Arrays.asList(
new BranchSpec("refs/tags/v*"),
new BranchSpec("refs/remotes/origin/non-existent"));
FreeStyleProject project = setupProject(branchSpecs, false, null, null, janeDoe.getName(), null, false, null);
// create initial commit and then run the build against it:
// Here the changelog is by default empty (because changelog for first commit is always empty
commit("commitFileBase", johnDoe, "Initial Commit");
// there are no branches to be build
FreeStyleBuild freeStyleBuild = build(project, Result.FAILURE);
final String v1 = "v1";
git.tag(v1, "version 1");
assertTrue("v1 tag exists", git.tagExists(v1));
freeStyleBuild = build(project, Result.SUCCESS);
assertTrue("change set is empty", freeStyleBuild.getChangeSet().isEmptySet());
commit("file1", johnDoe, "change to file1");
git.tag("none", "latest");
freeStyleBuild = build(project, Result.SUCCESS);
ObjectId tag = git.revParse(Constants.R_TAGS + v1);
GitSCM scm = (GitSCM)project.getScm();
BuildData buildData = scm.getBuildData(freeStyleBuild);
assertEquals("last build matches the v1 tag revision", tag, buildData.lastBuild.getSHA1());
}
@Issue("JENKINS-19037")
@SuppressWarnings("ResultOfObjectAllocationIgnored")
@Test
public void testBlankRepositoryName() throws Exception {
new GitSCM(null);
}
@Issue("JENKINS-10060")
@Test
public void testSubmoduleFixup() throws Exception {
File repo = secondRepo.getRoot();
FilePath moduleWs = new FilePath(repo);
org.jenkinsci.plugins.gitclient.GitClient moduleRepo = Git.with(listener, new EnvVars()).in(repo).getClient();
{// first we create a Git repository with submodule
moduleRepo.init();
moduleWs.child("a").touch(0);
moduleRepo.add("a");
moduleRepo.commit("creating a module");
git.addSubmodule(repo.getAbsolutePath(), "module1");
git.commit("creating a super project");
}
// configure two uproject 'u' -> 'd' that's chained together.
FreeStyleProject u = createFreeStyleProject();
FreeStyleProject d = createFreeStyleProject();
u.setScm(new GitSCM(workDir.getPath()));
u.getPublishersList().add(new BuildTrigger(new hudson.plugins.parameterizedtrigger.BuildTriggerConfig(d.getName(), ResultCondition.SUCCESS,
new GitRevisionBuildParameters())));
d.setScm(new GitSCM(workDir.getPath()));
rule.jenkins.rebuildDependencyGraph();
FreeStyleBuild ub = rule.assertBuildStatusSuccess(u.scheduleBuild2(0));
for (int i=0; (d.getLastBuild()==null || d.getLastBuild().isBuilding()) && i<100; i++) // wait only up to 10 sec to avoid infinite loop
Thread.sleep(100);
FreeStyleBuild db = d.getLastBuild();
assertNotNull("downstream build didn't happen",db);
rule.assertBuildStatusSuccess(db);
}
@Test
public void testBuildChooserContext() throws Exception {
final FreeStyleProject p = createFreeStyleProject();
final FreeStyleBuild b = rule.assertBuildStatusSuccess(p.scheduleBuild2(0));
BuildChooserContextImpl c = new BuildChooserContextImpl(p, b, null);
c.actOnBuild(new ContextCallable<Run<?,?>, Object>() {
public Object invoke(Run param, VirtualChannel channel) throws IOException, InterruptedException {
assertSame(param,b);
return null;
}
});
c.actOnProject(new ContextCallable<Job<?,?>, Object>() {
public Object invoke(Job param, VirtualChannel channel) throws IOException, InterruptedException {
assertSame(param,p);
return null;
}
});
DumbSlave agent = rule.createOnlineSlave();
assertEquals(p.toString(), agent.getChannel().call(new BuildChooserContextTestCallable(c)));
}
private static class BuildChooserContextTestCallable extends MasterToSlaveCallable<String,IOException> {
private final BuildChooserContext c;
public BuildChooserContextTestCallable(BuildChooserContext c) {
this.c = c;
}
public String call() throws IOException {
try {
return c.actOnProject(new ContextCallable<Job<?,?>, String>() {
public String invoke(Job<?,?> param, VirtualChannel channel) throws IOException, InterruptedException {
assertTrue(channel instanceof Channel);
assertTrue(Jenkins.getInstanceOrNull()!=null);
return param.toString();
}
});
} catch (InterruptedException e) {
throw new IOException(e);
}
}
}
// eg: "jane doe and john doe should be the culprits", culprits, [johnDoe, janeDoe])
static public void assertCulprits(String assertMsg, Set<User> actual, PersonIdent[] expected)
{
Collection<String> fullNames = Collections2.transform(actual, new Function<User,String>() {
public String apply(User u)
{
return u.getFullName();
}
});
for(PersonIdent p : expected)
{
assertTrue(assertMsg, fullNames.contains(p.getName()));
}
}
@Test
public void testEmailCommitter() throws Exception {
FreeStyleProject project = setupSimpleProject("master");
// setup global config
GitSCM scm = (GitSCM) project.getScm();
final DescriptorImpl descriptor = (DescriptorImpl) scm.getDescriptor();
assertFalse("Wrong initial value for create account based on e-mail", scm.isCreateAccountBasedOnEmail());
descriptor.setCreateAccountBasedOnEmail(true);
assertTrue("Create account based on e-mail not set", scm.isCreateAccountBasedOnEmail());
assertFalse("Wrong initial value for use existing user if same e-mail already found", scm.isUseExistingAccountWithSameEmail());
descriptor.setUseExistingAccountWithSameEmail(true);
assertTrue("Use existing user if same e-mail already found is not set", scm.isUseExistingAccountWithSameEmail());
// create initial commit and then run the build against it:
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
final FreeStyleBuild build = build(project, Result.SUCCESS, commitFile1);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
final String commitFile2 = "commitFile2";
final PersonIdent jeffDoe = new PersonIdent("Jeff Doe", "[email protected]");
commit(commitFile2, jeffDoe, "Commit number 2");
assertTrue("scm polling did not detect commit2 change", project.poll(listener).hasChanges());
//... and build it...
final FreeStyleBuild build2 = build(project, Result.SUCCESS, commitFile2);
final Set<User> culprits = build2.getCulprits();
assertEquals("The build should have only one culprit", 1, culprits.size());
User culprit = culprits.iterator().next();
assertEquals("", jeffDoe.getEmailAddress(), culprit.getId());
assertEquals("", jeffDoe.getName(), culprit.getFullName());
rule.assertBuildStatusSuccess(build);
}
@Issue("JENKINS-59868")
@Test
public void testNonExistentWorkingDirectoryPoll() throws Exception {
FreeStyleProject project = setupSimpleProject("master");
// create initial commit and then run the build against it
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
project.setScm(new GitSCM(
((GitSCM)project.getScm()).getUserRemoteConfigs(),
Collections.singletonList(new BranchSpec("master")),
false, Collections.<SubmoduleConfig>emptyList(),
null, null,
// configure GitSCM with the DisableRemotePoll extension to ensure that polling use the workspace
Collections.singletonList(new DisableRemotePoll())));
FreeStyleBuild build1 = build(project, Result.SUCCESS, commitFile1);
// Empty the workspace directory
build1.getWorkspace().deleteRecursive();
// Setup a recorder for polling logs
RingBufferLogHandler pollLogHandler = new RingBufferLogHandler(10);
Logger pollLogger = Logger.getLogger(GitSCMTest.class.getName());
pollLogger.addHandler(pollLogHandler);
TaskListener taskListener = new LogTaskListener(pollLogger, Level.INFO);
// Make sure that polling returns BUILD_NOW and properly log the reason
FilePath filePath = build1.getWorkspace();
assertThat(project.getScm().compareRemoteRevisionWith(project, new Launcher.LocalLauncher(taskListener),
filePath, taskListener, null), is(PollingResult.BUILD_NOW));
assertTrue(pollLogHandler.getView().stream().anyMatch(m ->
m.getMessage().contains("[poll] Working Directory does not exist")));
}
// Disabled - consistently fails, needs more analysis
// @Test
public void testFetchFromMultipleRepositories() throws Exception {
FreeStyleProject project = setupSimpleProject("master");
TestGitRepo secondTestRepo = new TestGitRepo("second", secondRepo.getRoot(), listener);
List<UserRemoteConfig> remotes = new ArrayList<>();
remotes.addAll(testRepo.remoteConfigs());
remotes.addAll(secondTestRepo.remoteConfigs());
project.setScm(new GitSCM(
remotes,
Collections.singletonList(new BranchSpec("master")),
false, Collections.<SubmoduleConfig>emptyList(),
null, null,
Collections.<GitSCMExtension>emptyList()));
// create initial commit and then run the build against it:
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
build(project, Result.SUCCESS, commitFile1);
/* Diagnostic help - for later use */
SCMRevisionState baseline = project.poll(listener).baseline;
Change change = project.poll(listener).change;
SCMRevisionState remote = project.poll(listener).remote;
String assertionMessage = MessageFormat.format("polling incorrectly detected change after build. Baseline: {0}, Change: {1}, Remote: {2}", baseline, change, remote);
assertFalse(assertionMessage, project.poll(listener).hasChanges());
final String commitFile2 = "commitFile2";
secondTestRepo.commit(commitFile2, janeDoe, "Commit number 2");
assertTrue("scm polling did not detect commit2 change", project.poll(listener).hasChanges());
//... and build it...
final FreeStyleBuild build2 = build(project, Result.SUCCESS, commitFile2);
assertTrue(build2.getWorkspace().child(commitFile2).exists());
rule.assertBuildStatusSuccess(build2);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
}
private void branchSpecWithMultipleRepositories(String branchName) throws Exception {
FreeStyleProject project = setupSimpleProject("master");
TestGitRepo secondTestRepo = new TestGitRepo("second", secondRepo.getRoot(), listener);
List<UserRemoteConfig> remotes = new ArrayList<>();
remotes.addAll(testRepo.remoteConfigs());
remotes.addAll(secondTestRepo.remoteConfigs());
// create initial commit
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
project.setScm(new GitSCM(
remotes,
Collections.singletonList(new BranchSpec(branchName)),
false, Collections.<SubmoduleConfig>emptyList(),
null, null,
Collections.<GitSCMExtension>emptyList()));
final FreeStyleBuild build = build(project, Result.SUCCESS, commitFile1);
rule.assertBuildStatusSuccess(build);
}
@Issue("JENKINS-26268")
public void testBranchSpecAsSHA1WithMultipleRepositories() throws Exception {
branchSpecWithMultipleRepositories(testRepo.git.revParse("HEAD").getName());
}
@Issue("JENKINS-26268")
public void testBranchSpecAsRemotesOriginMasterWithMultipleRepositories() throws Exception {
branchSpecWithMultipleRepositories("remotes/origin/master");
}
@Issue("JENKINS-25639")
@Test
public void testCommitDetectedOnlyOnceInMultipleRepositories() throws Exception {
FreeStyleProject project = setupSimpleProject("master");
TestGitRepo secondTestRepo = new TestGitRepo("secondRepo", secondRepo.getRoot(), listener);
List<UserRemoteConfig> remotes = new ArrayList<>();
remotes.addAll(testRepo.remoteConfigs());
remotes.addAll(secondTestRepo.remoteConfigs());
GitSCM gitSCM = new GitSCM(
remotes,
Collections.singletonList(new BranchSpec("origin/master")),
false, Collections.<SubmoduleConfig>emptyList(),
null, null,
Collections.<GitSCMExtension>emptyList());
project.setScm(gitSCM);
/* Check that polling would force build through
* compareRemoteRevisionWith by detecting no last build */
FilePath filePath = new FilePath(new File("."));
assertThat(gitSCM.compareRemoteRevisionWith(project, new Launcher.LocalLauncher(listener), filePath, listener, null), is(PollingResult.BUILD_NOW));
commit("commitFile1", johnDoe, "Commit number 1");
FreeStyleBuild build = build(project, Result.SUCCESS, "commitFile1");
commit("commitFile2", johnDoe, "Commit number 2");
git = Git.with(listener, new EnvVars()).in(build.getWorkspace()).getClient();
for (RemoteConfig remoteConfig : gitSCM.getRepositories()) {
git.fetch_().from(remoteConfig.getURIs().get(0), remoteConfig.getFetchRefSpecs());
}
BuildChooser buildChooser = gitSCM.getBuildChooser();
Collection<Revision> candidateRevisions = buildChooser.getCandidateRevisions(false, "origin/master", git, listener, project.getLastBuild().getAction(BuildData.class), null);
assertEquals(1, candidateRevisions.size());
gitSCM.setBuildChooser(buildChooser); // Should be a no-op
Collection<Revision> candidateRevisions2 = buildChooser.getCandidateRevisions(false, "origin/master", git, listener, project.getLastBuild().getAction(BuildData.class), null);
assertThat(candidateRevisions2, is(candidateRevisions));
}
private final Random random = new Random();
private boolean useChangelogToBranch = random.nextBoolean();
private void addChangelogToBranchExtension(GitSCM scm) {
if (useChangelogToBranch) {
/* Changelog should be no different with this enabled or disabled */
ChangelogToBranchOptions changelogOptions = new ChangelogToBranchOptions("origin", "master");
scm.getExtensions().add(new ChangelogToBranch(changelogOptions));
}
useChangelogToBranch = !useChangelogToBranch;
}
@Test
public void testMerge() throws Exception {
FreeStyleProject project = setupSimpleProject("master");
GitSCM scm = new GitSCM(
createRemoteRepositories(),
Collections.singletonList(new BranchSpec("*")),
false, Collections.<SubmoduleConfig>emptyList(),
null, null,
Collections.<GitSCMExtension>emptyList());
scm.getExtensions().add(new PreBuildMerge(new UserMergeOptions("origin", "integration", "default", MergeCommand.GitPluginFastForwardMode.FF)));
addChangelogToBranchExtension(scm);
project.setScm(scm);
// create initial commit and then run the build against it:
commit("commitFileBase", johnDoe, "Initial Commit");
testRepo.git.branch("integration");
build(project, Result.SUCCESS, "commitFileBase");
testRepo.git.checkout(null, "topic1");
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
final FreeStyleBuild build1 = build(project, Result.SUCCESS, commitFile1);
assertTrue(build1.getWorkspace().child(commitFile1).exists());
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
// do what the GitPublisher would do
testRepo.git.deleteBranch("integration");
testRepo.git.checkout("topic1", "integration");
testRepo.git.checkout("master", "topic2");
final String commitFile2 = "commitFile2";
commit(commitFile2, johnDoe, "Commit number 2");
assertTrue("scm polling did not detect commit2 change", project.poll(listener).hasChanges());
final FreeStyleBuild build2 = build(project, Result.SUCCESS, commitFile2);
assertTrue(build2.getWorkspace().child(commitFile2).exists());
rule.assertBuildStatusSuccess(build2);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
}
@Issue("JENKINS-20392")
@Test
public void testMergeChangelog() throws Exception {
FreeStyleProject project = setupSimpleProject("master");
GitSCM scm = new GitSCM(
createRemoteRepositories(),
Collections.singletonList(new BranchSpec("*")),
false, Collections.<SubmoduleConfig>emptyList(),
null, null,
Collections.<GitSCMExtension>emptyList());
scm.getExtensions().add(new PreBuildMerge(new UserMergeOptions("origin", "integration", "default", MergeCommand.GitPluginFastForwardMode.FF)));
addChangelogToBranchExtension(scm);
project.setScm(scm);
// create initial commit and then run the build against it:
// Here the changelog is by default empty (because changelog for first commit is always empty
commit("commitFileBase", johnDoe, "Initial Commit");
testRepo.git.branch("integration");
build(project, Result.SUCCESS, "commitFileBase");
// Create second commit and run build
// Here the changelog should contain exactly this one new commit
testRepo.git.checkout("master", "topic2");
final String commitFile2 = "commitFile2";
String commitMessage = "Commit number 2";
commit(commitFile2, johnDoe, commitMessage);
final FreeStyleBuild build2 = build(project, Result.SUCCESS, commitFile2);
ChangeLogSet<? extends ChangeLogSet.Entry> changeLog = build2.getChangeSet();
assertEquals("Changelog should contain one item", 1, changeLog.getItems().length);
GitChangeSet singleChange = (GitChangeSet) changeLog.getItems()[0];
assertEquals("Changelog should contain commit number 2", commitMessage, singleChange.getComment().trim());
}
@Test
public void testMergeWithAgent() throws Exception {
FreeStyleProject project = setupSimpleProject("master");
project.setAssignedLabel(rule.createSlave().getSelfLabel());
GitSCM scm = new GitSCM(
createRemoteRepositories(),
Collections.singletonList(new BranchSpec("*")),
false, Collections.<SubmoduleConfig>emptyList(),
null, null,
Collections.<GitSCMExtension>emptyList());
scm.getExtensions().add(new PreBuildMerge(new UserMergeOptions("origin", "integration", null, null)));
addChangelogToBranchExtension(scm);
project.setScm(scm);
// create initial commit and then run the build against it:
commit("commitFileBase", johnDoe, "Initial Commit");
testRepo.git.branch("integration");
build(project, Result.SUCCESS, "commitFileBase");
testRepo.git.checkout(null, "topic1");
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
final FreeStyleBuild build1 = build(project, Result.SUCCESS, commitFile1);
assertTrue(build1.getWorkspace().child(commitFile1).exists());
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
// do what the GitPublisher would do
testRepo.git.deleteBranch("integration");
testRepo.git.checkout("topic1", "integration");
testRepo.git.checkout("master", "topic2");
final String commitFile2 = "commitFile2";
commit(commitFile2, johnDoe, "Commit number 2");
assertTrue("scm polling did not detect commit2 change", project.poll(listener).hasChanges());
final FreeStyleBuild build2 = build(project, Result.SUCCESS, commitFile2);
assertTrue(build2.getWorkspace().child(commitFile2).exists());
rule.assertBuildStatusSuccess(build2);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
}
@Test
public void testMergeFailed() throws Exception {
FreeStyleProject project = setupSimpleProject("master");
GitSCM scm = new GitSCM(
createRemoteRepositories(),
Collections.singletonList(new BranchSpec("*")),
false, Collections.<SubmoduleConfig>emptyList(),
null, null,
Collections.<GitSCMExtension>emptyList());
project.setScm(scm);
scm.getExtensions().add(new PreBuildMerge(new UserMergeOptions("origin", "integration", "", MergeCommand.GitPluginFastForwardMode.FF)));
addChangelogToBranchExtension(scm);
// create initial commit and then run the build against it:
commit("commitFileBase", johnDoe, "Initial Commit");
testRepo.git.branch("integration");
build(project, Result.SUCCESS, "commitFileBase");
testRepo.git.checkout(null, "topic1");
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
final FreeStyleBuild build1 = build(project, Result.SUCCESS, commitFile1);
assertTrue(build1.getWorkspace().child(commitFile1).exists());
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
// do what the GitPublisher would do
testRepo.git.deleteBranch("integration");
testRepo.git.checkout("topic1", "integration");
testRepo.git.checkout("master", "topic2");
commit(commitFile1, "other content", johnDoe, "Commit number 2");
assertTrue("scm polling did not detect commit2 change", project.poll(listener).hasChanges());
final FreeStyleBuild build2 = build(project, Result.FAILURE);
rule.assertBuildStatus(Result.FAILURE, build2);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
}
@Issue("JENKINS-25191")
@Test
public void testMultipleMergeFailed() throws Exception {
FreeStyleProject project = setupSimpleProject("master");
GitSCM scm = new GitSCM(
createRemoteRepositories(),
Collections.singletonList(new BranchSpec("master")),
false, Collections.<SubmoduleConfig>emptyList(),
null, null,
Collections.<GitSCMExtension>emptyList());
project.setScm(scm);
scm.getExtensions().add(new PreBuildMerge(new UserMergeOptions("origin", "integration1", "", MergeCommand.GitPluginFastForwardMode.FF)));
scm.getExtensions().add(new PreBuildMerge(new UserMergeOptions("origin", "integration2", "", MergeCommand.GitPluginFastForwardMode.FF)));
addChangelogToBranchExtension(scm);
commit("dummyFile", johnDoe, "Initial Commit");
testRepo.git.branch("integration1");
testRepo.git.branch("integration2");
build(project, Result.SUCCESS);
final String commitFile = "commitFile";
testRepo.git.checkoutBranch("integration1","master");
commit(commitFile,"abc", johnDoe, "merge conflict with integration2");
testRepo.git.checkoutBranch("integration2","master");
commit(commitFile,"cde", johnDoe, "merge conflict with integration1");
final FreeStyleBuild build = build(project, Result.FAILURE);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
}
@Test
public void testMergeFailedWithAgent() throws Exception {
FreeStyleProject project = setupSimpleProject("master");
project.setAssignedLabel(rule.createSlave().getSelfLabel());
GitSCM scm = new GitSCM(
createRemoteRepositories(),
Collections.singletonList(new BranchSpec("*")),
false, Collections.<SubmoduleConfig>emptyList(),
null, null,
Collections.<GitSCMExtension>emptyList());
scm.getExtensions().add(new PreBuildMerge(new UserMergeOptions("origin", "integration", null, null)));
addChangelogToBranchExtension(scm);
project.setScm(scm);
// create initial commit and then run the build against it:
commit("commitFileBase", johnDoe, "Initial Commit");
testRepo.git.branch("integration");
build(project, Result.SUCCESS, "commitFileBase");
testRepo.git.checkout(null, "topic1");
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
final FreeStyleBuild build1 = build(project, Result.SUCCESS, commitFile1);
assertTrue(build1.getWorkspace().child(commitFile1).exists());
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
// do what the GitPublisher would do
testRepo.git.deleteBranch("integration");
testRepo.git.checkout("topic1", "integration");
testRepo.git.checkout("master", "topic2");
commit(commitFile1, "other content", johnDoe, "Commit number 2");
assertTrue("scm polling did not detect commit2 change", project.poll(listener).hasChanges());
final FreeStyleBuild build2 = build(project, Result.FAILURE);
rule.assertBuildStatus(Result.FAILURE, build2);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
}
@Test
public void testMergeWithMatrixBuild() throws Exception {
//Create a matrix project and a couple of axes
MatrixProject project = rule.jenkins.createProject(MatrixProject.class, "xyz");
project.setAxes(new AxisList(new Axis("VAR","a","b")));
GitSCM scm = new GitSCM(
createRemoteRepositories(),
Collections.singletonList(new BranchSpec("*")),
false, Collections.<SubmoduleConfig>emptyList(),
null, null,
Collections.<GitSCMExtension>emptyList());
scm.getExtensions().add(new PreBuildMerge(new UserMergeOptions("origin", "integration", null, null)));
addChangelogToBranchExtension(scm);
project.setScm(scm);
// create initial commit and then run the build against it:
commit("commitFileBase", johnDoe, "Initial Commit");
testRepo.git.branch("integration");
build(project, Result.SUCCESS, "commitFileBase");
testRepo.git.checkout(null, "topic1");
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
final MatrixBuild build1 = build(project, Result.SUCCESS, commitFile1);
assertTrue(build1.getWorkspace().child(commitFile1).exists());
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
// do what the GitPublisher would do
testRepo.git.deleteBranch("integration");
testRepo.git.checkout("topic1", "integration");
testRepo.git.checkout("master", "topic2");
final String commitFile2 = "commitFile2";
commit(commitFile2, johnDoe, "Commit number 2");
assertTrue("scm polling did not detect commit2 change", project.poll(listener).hasChanges());
final MatrixBuild build2 = build(project, Result.SUCCESS, commitFile2);
assertTrue(build2.getWorkspace().child(commitFile2).exists());
rule.assertBuildStatusSuccess(build2);
assertFalse("scm polling should not detect any more changes after build", project.poll(listener).hasChanges());
}
@Test
public void testEnvironmentVariableExpansion() throws Exception {
FreeStyleProject project = createFreeStyleProject();
project.setScm(new GitSCM("${CAT}"+testRepo.gitDir.getPath()));
// create initial commit and then run the build against it:
commit("a.txt", johnDoe, "Initial Commit");
build(project, Result.SUCCESS, "a.txt");
PollingResult r = project.poll(StreamTaskListener.fromStdout());
assertFalse(r.hasChanges());
commit("b.txt", johnDoe, "Another commit");
r = project.poll(StreamTaskListener.fromStdout());
assertTrue(r.hasChanges());
build(project, Result.SUCCESS, "b.txt");
}
@TestExtension("testEnvironmentVariableExpansion")
public static class SupplySomeEnvVars extends EnvironmentContributor {
@Override
public void buildEnvironmentFor(Run r, EnvVars envs, TaskListener listener) throws IOException, InterruptedException {
envs.put("CAT","");
}
}
private List<UserRemoteConfig> createRepoList(String url) {
List<UserRemoteConfig> repoList = new ArrayList<>();
repoList.add(new UserRemoteConfig(url, null, null, null));
return repoList;
}
/*
* Makes sure that git browser URL is preserved across config round trip.
*/
@Issue("JENKINS-22604")
@Test
public void testConfigRoundtripURLPreserved() throws Exception {
FreeStyleProject p = createFreeStyleProject();
final String url = "https://github.com/jenkinsci/jenkins";
GitRepositoryBrowser browser = new GithubWeb(url);
GitSCM scm = new GitSCM(createRepoList(url),
Collections.singletonList(new BranchSpec("")),
false, Collections.<SubmoduleConfig>emptyList(),
browser, null, null);
p.setScm(scm);
rule.configRoundtrip(p);
rule.assertEqualDataBoundBeans(scm,p.getScm());
assertEquals("Wrong key", "git " + url, scm.getKey());
}
/*
* Makes sure that git extensions are preserved across config round trip.
*/
@Issue("JENKINS-33695")
@Test
public void testConfigRoundtripExtensionsPreserved() throws Exception {
FreeStyleProject p = createFreeStyleProject();
final String url = "git://github.com/jenkinsci/git-plugin.git";
GitRepositoryBrowser browser = new GithubWeb(url);
GitSCM scm = new GitSCM(createRepoList(url),
Collections.singletonList(new BranchSpec("*/master")),
false, Collections.<SubmoduleConfig>emptyList(),
browser, null, null);
p.setScm(scm);
/* Assert that no extensions are loaded initially */
assertEquals(Collections.emptyList(), scm.getExtensions().toList());
/* Add LocalBranch extension */
LocalBranch localBranchExtension = new LocalBranch("**");
scm.getExtensions().add(localBranchExtension);
assertTrue(scm.getExtensions().toList().contains(localBranchExtension));
/* Save the configuration */
rule.configRoundtrip(p);
List<GitSCMExtension> extensions = scm.getExtensions().toList();;
assertTrue(extensions.contains(localBranchExtension));
assertEquals("Wrong extension count before reload", 1, extensions.size());
/* Reload configuration from disc */
p.doReload();
GitSCM reloadedGit = (GitSCM) p.getScm();
List<GitSCMExtension> reloadedExtensions = reloadedGit.getExtensions().toList();
assertEquals("Wrong extension count after reload", 1, reloadedExtensions.size());
LocalBranch reloadedLocalBranch = (LocalBranch) reloadedExtensions.get(0);
assertEquals(localBranchExtension.getLocalBranch(), reloadedLocalBranch.getLocalBranch());
}
/*
* Makes sure that the configuration form works.
*/
@Test
public void testConfigRoundtrip() throws Exception {
FreeStyleProject p = createFreeStyleProject();
GitSCM scm = new GitSCM("https://github.com/jenkinsci/jenkins");
p.setScm(scm);
rule.configRoundtrip(p);
rule.assertEqualDataBoundBeans(scm,p.getScm());
}
/*
* Sample configuration that should result in no extensions at all
*/
@Test
public void testDataCompatibility1() throws Exception {
FreeStyleProject p = (FreeStyleProject) rule.jenkins.createProjectFromXML("foo", getClass().getResourceAsStream("GitSCMTest/old1.xml"));
GitSCM oldGit = (GitSCM) p.getScm();
assertEquals(Collections.emptyList(), oldGit.getExtensions().toList());
assertEquals(0, oldGit.getSubmoduleCfg().size());
assertEquals("git git://github.com/jenkinsci/model-ant-project.git", oldGit.getKey());
assertThat(oldGit.getEffectiveBrowser(), instanceOf(GithubWeb.class));
GithubWeb browser = (GithubWeb) oldGit.getEffectiveBrowser();
assertEquals(browser.getRepoUrl(), "https://github.com/jenkinsci/model-ant-project.git/");
}
@Test
public void testPleaseDontContinueAnyway() throws Exception {
// create an empty repository with some commits
testRepo.commit("a","foo",johnDoe, "added");
FreeStyleProject p = createFreeStyleProject();
p.setScm(new GitSCM(testRepo.gitDir.getAbsolutePath()));
rule.assertBuildStatusSuccess(p.scheduleBuild2(0));
// this should fail as it fails to fetch
p.setScm(new GitSCM("http://localhost:4321/no/such/repository.git"));
rule.assertBuildStatus(Result.FAILURE, p.scheduleBuild2(0).get());
}
@Issue("JENKINS-19108")
@Test
public void testCheckoutToSpecificBranch() throws Exception {
FreeStyleProject p = createFreeStyleProject();
GitSCM oldGit = new GitSCM("https://github.com/jenkinsci/model-ant-project.git/");
setupJGit(oldGit);
oldGit.getExtensions().add(new LocalBranch("master"));
p.setScm(oldGit);
FreeStyleBuild b = rule.assertBuildStatusSuccess(p.scheduleBuild2(0));
GitClient gc = Git.with(StreamTaskListener.fromStdout(),null).in(b.getWorkspace()).getClient();
gc.withRepository(new RepositoryCallback<Void>() {
public Void invoke(Repository repo, VirtualChannel channel) throws IOException, InterruptedException {
Ref head = repo.findRef("HEAD");
assertTrue("Detached HEAD",head.isSymbolic());
Ref t = head.getTarget();
assertEquals(t.getName(),"refs/heads/master");
return null;
}
});
}
/**
* Verifies that if project specifies LocalBranch with value of "**"
* that the checkout to a local branch using remote branch name sans 'origin'.
* This feature is necessary to support Maven release builds that push updated
* pom.xml to remote branch as
* <pre>
* git push origin localbranch:localbranch
* </pre>
* @throws Exception on error
*/
@Test
public void testCheckoutToDefaultLocalBranch_StarStar() throws Exception {
FreeStyleProject project = setupSimpleProject("master");
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
GitSCM git = (GitSCM)project.getScm();
git.getExtensions().add(new LocalBranch("**"));
FreeStyleBuild build1 = build(project, Result.SUCCESS, commitFile1);
assertEquals("GIT_BRANCH", "origin/master", getEnvVars(project).get(GitSCM.GIT_BRANCH));
assertEquals("GIT_LOCAL_BRANCH", "master", getEnvVars(project).get(GitSCM.GIT_LOCAL_BRANCH));
}
/**
* Verifies that if project specifies LocalBranch with null value (empty string)
* that the checkout to a local branch using remote branch name sans 'origin'.
* This feature is necessary to support Maven release builds that push updated
* pom.xml to remote branch as
* <pre>
* git push origin localbranch:localbranch
* </pre>
* @throws Exception on error
*/
@Test
public void testCheckoutToDefaultLocalBranch_NULL() throws Exception {
FreeStyleProject project = setupSimpleProject("master");
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
GitSCM git = (GitSCM)project.getScm();
git.getExtensions().add(new LocalBranch(""));
FreeStyleBuild build1 = build(project, Result.SUCCESS, commitFile1);
assertEquals("GIT_BRANCH", "origin/master", getEnvVars(project).get(GitSCM.GIT_BRANCH));
assertEquals("GIT_LOCAL_BRANCH", "master", getEnvVars(project).get(GitSCM.GIT_LOCAL_BRANCH));
}
/*
* Verifies that GIT_LOCAL_BRANCH is not set if LocalBranch extension
* is not configured.
*/
@Test
public void testCheckoutSansLocalBranchExtension() throws Exception {
FreeStyleProject project = setupSimpleProject("master");
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
FreeStyleBuild build1 = build(project, Result.SUCCESS, commitFile1);
assertEquals("GIT_BRANCH", "origin/master", getEnvVars(project).get(GitSCM.GIT_BRANCH));
assertEquals("GIT_LOCAL_BRANCH", null, getEnvVars(project).get(GitSCM.GIT_LOCAL_BRANCH));
}
/*
* Verifies that GIT_CHECKOUT_DIR is set to "checkoutDir" if RelativeTargetDirectory extension
* is configured.
*/
@Test
public void testCheckoutRelativeTargetDirectoryExtension() throws Exception {
FreeStyleProject project = setupProject("master", false, "checkoutDir");
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
GitSCM git = (GitSCM)project.getScm();
git.getExtensions().add(new RelativeTargetDirectory("checkoutDir"));
FreeStyleBuild build1 = build(project, "checkoutDir", Result.SUCCESS, commitFile1);
assertEquals("GIT_CHECKOUT_DIR", "checkoutDir", getEnvVars(project).get(GitSCM.GIT_CHECKOUT_DIR));
}
/*
* Verifies that GIT_CHECKOUT_DIR is not set if RelativeTargetDirectory extension
* is not configured.
*/
@Test
public void testCheckoutSansRelativeTargetDirectoryExtension() throws Exception {
FreeStyleProject project = setupSimpleProject("master");
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
FreeStyleBuild build1 = build(project, Result.SUCCESS, commitFile1);
assertEquals("GIT_CHECKOUT_DIR", null, getEnvVars(project).get(GitSCM.GIT_CHECKOUT_DIR));
}
@Test
public void testCheckoutFailureIsRetryable() throws Exception {
FreeStyleProject project = setupSimpleProject("master");
// run build first to create workspace
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
final FreeStyleBuild build1 = build(project, Result.SUCCESS, commitFile1);
final String commitFile2 = "commitFile2";
commit(commitFile2, janeDoe, "Commit number 2");
// create lock file to simulate lock collision
File lock = new File(build1.getWorkspace().getRemote(), ".git/index.lock");
try {
FileUtils.touch(lock);
final FreeStyleBuild build2 = build(project, Result.FAILURE);
rule.waitForMessage("java.io.IOException: Could not checkout", build2);
} finally {
lock.delete();
}
}
@Test
public void testInitSparseCheckout() throws Exception {
if (!sampleRepo.gitVersionAtLeast(1, 7, 10)) {
/* Older git versions have unexpected behaviors with sparse checkout */
return;
}
FreeStyleProject project = setupProject("master", Lists.newArrayList(new SparseCheckoutPath("toto")));
// run build first to create workspace
final String commitFile1 = "toto/commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
final String commitFile2 = "titi/commitFile2";
commit(commitFile2, johnDoe, "Commit number 2");
final FreeStyleBuild build1 = build(project, Result.SUCCESS);
assertTrue(build1.getWorkspace().child("toto").exists());
assertTrue(build1.getWorkspace().child(commitFile1).exists());
assertFalse(build1.getWorkspace().child("titi").exists());
assertFalse(build1.getWorkspace().child(commitFile2).exists());
}
@Test
public void testInitSparseCheckoutBis() throws Exception {
if (!sampleRepo.gitVersionAtLeast(1, 7, 10)) {
/* Older git versions have unexpected behaviors with sparse checkout */
return;
}
FreeStyleProject project = setupProject("master", Lists.newArrayList(new SparseCheckoutPath("titi")));
// run build first to create workspace
final String commitFile1 = "toto/commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
final String commitFile2 = "titi/commitFile2";
commit(commitFile2, johnDoe, "Commit number 2");
final FreeStyleBuild build1 = build(project, Result.SUCCESS);
assertTrue(build1.getWorkspace().child("titi").exists());
assertTrue(build1.getWorkspace().child(commitFile2).exists());
assertFalse(build1.getWorkspace().child("toto").exists());
assertFalse(build1.getWorkspace().child(commitFile1).exists());
}
@Test
public void testSparseCheckoutAfterNormalCheckout() throws Exception {
if (!sampleRepo.gitVersionAtLeast(1, 7, 10)) {
/* Older git versions have unexpected behaviors with sparse checkout */
return;
}
FreeStyleProject project = setupSimpleProject("master");
// run build first to create workspace
final String commitFile1 = "toto/commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
final String commitFile2 = "titi/commitFile2";
commit(commitFile2, johnDoe, "Commit number 2");
final FreeStyleBuild build1 = build(project, Result.SUCCESS);
assertTrue(build1.getWorkspace().child("titi").exists());
assertTrue(build1.getWorkspace().child(commitFile2).exists());
assertTrue(build1.getWorkspace().child("toto").exists());
assertTrue(build1.getWorkspace().child(commitFile1).exists());
((GitSCM) project.getScm()).getExtensions().add(new SparseCheckoutPaths(Lists.newArrayList(new SparseCheckoutPath("titi"))));
final FreeStyleBuild build2 = build(project, Result.SUCCESS);
assertTrue(build2.getWorkspace().child("titi").exists());
assertTrue(build2.getWorkspace().child(commitFile2).exists());
assertFalse(build2.getWorkspace().child("toto").exists());
assertFalse(build2.getWorkspace().child(commitFile1).exists());
}
@Test
public void testNormalCheckoutAfterSparseCheckout() throws Exception {
if (!sampleRepo.gitVersionAtLeast(1, 7, 10)) {
/* Older git versions have unexpected behaviors with sparse checkout */
return;
}
FreeStyleProject project = setupProject("master", Lists.newArrayList(new SparseCheckoutPath("titi")));
// run build first to create workspace
final String commitFile1 = "toto/commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
final String commitFile2 = "titi/commitFile2";
commit(commitFile2, johnDoe, "Commit number 2");
final FreeStyleBuild build2 = build(project, Result.SUCCESS);
assertTrue(build2.getWorkspace().child("titi").exists());
assertTrue(build2.getWorkspace().child(commitFile2).exists());
assertFalse(build2.getWorkspace().child("toto").exists());
assertFalse(build2.getWorkspace().child(commitFile1).exists());
((GitSCM) project.getScm()).getExtensions().remove(SparseCheckoutPaths.class);
final FreeStyleBuild build1 = build(project, Result.SUCCESS);
assertTrue(build1.getWorkspace().child("titi").exists());
assertTrue(build1.getWorkspace().child(commitFile2).exists());
assertTrue(build1.getWorkspace().child("toto").exists());
assertTrue(build1.getWorkspace().child(commitFile1).exists());
}
@Test
public void testInitSparseCheckoutOverAgent() throws Exception {
if (!sampleRepo.gitVersionAtLeast(1, 7, 10)) {
/* Older git versions have unexpected behaviors with sparse checkout */
return;
}
FreeStyleProject project = setupProject("master", Lists.newArrayList(new SparseCheckoutPath("titi")));
project.setAssignedLabel(rule.createSlave().getSelfLabel());
// run build first to create workspace
final String commitFile1 = "toto/commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
final String commitFile2 = "titi/commitFile2";
commit(commitFile2, johnDoe, "Commit number 2");
final FreeStyleBuild build1 = build(project, Result.SUCCESS);
assertTrue(build1.getWorkspace().child("titi").exists());
assertTrue(build1.getWorkspace().child(commitFile2).exists());
assertFalse(build1.getWorkspace().child("toto").exists());
assertFalse(build1.getWorkspace().child(commitFile1).exists());
}
@Test
@Issue("JENKINS-22009")
public void testPolling_environmentValueInBranchSpec() throws Exception {
// create parameterized project with environment value in branch specification
FreeStyleProject project = createFreeStyleProject();
GitSCM scm = new GitSCM(
createRemoteRepositories(),
Collections.singletonList(new BranchSpec("${MY_BRANCH}")),
false, Collections.<SubmoduleConfig>emptyList(),
null, null,
Collections.<GitSCMExtension>emptyList());
project.setScm(scm);
project.addProperty(new ParametersDefinitionProperty(new StringParameterDefinition("MY_BRANCH", "master")));
// commit something in order to create an initial base version in git
commit("toto/commitFile1", johnDoe, "Commit number 1");
// build the project
build(project, Result.SUCCESS);
assertFalse("No changes to git since last build, thus no new build is expected", project.poll(listener).hasChanges());
}
@Issue("JENKINS-29066")
public void baseTestPolling_parentHead(List<GitSCMExtension> extensions) throws Exception {
// create parameterized project with environment value in branch specification
FreeStyleProject project = createFreeStyleProject();
GitSCM scm = new GitSCM(
createRemoteRepositories(),
Collections.singletonList(new BranchSpec("**")),
false, Collections.<SubmoduleConfig>emptyList(),
null, null,
extensions);
project.setScm(scm);
// commit something in order to create an initial base version in git
commit("toto/commitFile1", johnDoe, "Commit number 1");
git.branch("someBranch");
commit("toto/commitFile2", johnDoe, "Commit number 2");
assertTrue("polling should detect changes",project.poll(listener).hasChanges());
// build the project
build(project, Result.SUCCESS);
/* Expects 1 build because the build of someBranch incorporates all
* the changes from the master branch as well as the changes from someBranch.
*/
assertEquals("Wrong number of builds", 1, project.getBuilds().size());
assertFalse("polling should not detect changes",project.poll(listener).hasChanges());
}
@Issue("JENKINS-29066")
@Test
public void testPolling_parentHead() throws Exception {
baseTestPolling_parentHead(Collections.<GitSCMExtension>emptyList());
}
@Issue("JENKINS-29066")
@Test
public void testPolling_parentHead_DisableRemotePoll() throws Exception {
baseTestPolling_parentHead(Collections.<GitSCMExtension>singletonList(new DisableRemotePoll()));
}
@Test
public void testPollingAfterManualBuildWithParametrizedBranchSpec() throws Exception {
// create parameterized project with environment value in branch specification
FreeStyleProject project = createFreeStyleProject();
GitSCM scm = new GitSCM(
createRemoteRepositories(),
Collections.singletonList(new BranchSpec("${MY_BRANCH}")),
false, Collections.<SubmoduleConfig>emptyList(),
null, null,
Collections.<GitSCMExtension>emptyList());
project.setScm(scm);
project.addProperty(new ParametersDefinitionProperty(new StringParameterDefinition("MY_BRANCH", "trackedbranch")));
// Initial commit to master
commit("file1", johnDoe, "Initial Commit");
// Create the branches
git.branch("trackedbranch");
git.branch("manualbranch");
final StringParameterValue branchParam = new StringParameterValue("MY_BRANCH", "manualbranch");
final Action[] actions = {new ParametersAction(branchParam)};
FreeStyleBuild build = project.scheduleBuild2(0, new Cause.UserIdCause(), actions).get();
rule.assertBuildStatus(Result.SUCCESS, build);
assertFalse("No changes to git since last build", project.poll(listener).hasChanges());
git.checkout("manualbranch");
commit("file2", johnDoe, "Commit to manually build branch");
assertFalse("No changes to tracked branch", project.poll(listener).hasChanges());
git.checkout("trackedbranch");
commit("file3", johnDoe, "Commit to tracked branch");
assertTrue("A change should be detected in tracked branch", project.poll(listener).hasChanges());
}
private final class FakeParametersAction implements EnvironmentContributingAction, Serializable {
// Test class for testPolling_environmentValueAsEnvironmentContributingAction test case
final ParametersAction m_forwardingAction;
public FakeParametersAction(StringParameterValue params) {
this.m_forwardingAction = new ParametersAction(params);
}
@Deprecated
public void buildEnvVars(AbstractBuild<?, ?> ab, EnvVars ev) {
this.m_forwardingAction.buildEnvVars(ab, ev);
}
public String getIconFileName() {
return this.m_forwardingAction.getIconFileName();
}
public String getDisplayName() {
return this.m_forwardingAction.getDisplayName();
}
public String getUrlName() {
return this.m_forwardingAction.getUrlName();
}
public List<ParameterValue> getParameters() {
return this.m_forwardingAction.getParameters();
}
private void writeObject(java.io.ObjectOutputStream out) throws IOException {
}
private void readObject(java.io.ObjectInputStream in) throws IOException, ClassNotFoundException {
}
private void readObjectNoData() throws ObjectStreamException {
}
}
@Test
public void testPolling_CanDoRemotePollingIfOneBranchButMultipleRepositories() throws Exception {
FreeStyleProject project = createFreeStyleProject();
List<UserRemoteConfig> remoteConfigs = new ArrayList<>();
remoteConfigs.add(new UserRemoteConfig(testRepo.gitDir.getAbsolutePath(), "origin", "", null));
remoteConfigs.add(new UserRemoteConfig(testRepo.gitDir.getAbsolutePath(), "someOtherRepo", "", null));
GitSCM scm = new GitSCM(remoteConfigs,
Collections.singletonList(new BranchSpec("origin/master")), false,
Collections.<SubmoduleConfig> emptyList(), null, null,
Collections.<GitSCMExtension> emptyList());
project.setScm(scm);
commit("commitFile1", johnDoe, "Commit number 1");
FreeStyleBuild first_build = project.scheduleBuild2(0, new Cause.UserIdCause()).get();
rule.assertBuildStatus(Result.SUCCESS, first_build);
first_build.getWorkspace().deleteContents();
PollingResult pollingResult = scm.poll(project, null, first_build.getWorkspace(), listener, null);
assertFalse(pollingResult.hasChanges());
}
@Issue("JENKINS-24467")
@Test
public void testPolling_environmentValueAsEnvironmentContributingAction() throws Exception {
// create parameterized project with environment value in branch specification
FreeStyleProject project = createFreeStyleProject();
GitSCM scm = new GitSCM(
createRemoteRepositories(),
Collections.singletonList(new BranchSpec("${MY_BRANCH}")),
false, Collections.<SubmoduleConfig>emptyList(),
null, null,
Collections.<GitSCMExtension>emptyList());
project.setScm(scm);
// Initial commit and build
commit("toto/commitFile1", johnDoe, "Commit number 1");
String brokenPath = "\\broken/path\\of/doom";
if (!sampleRepo.gitVersionAtLeast(1, 8)) {
/* Git 1.7.10.4 fails the first build unless the git-upload-pack
* program is available in its PATH.
* Later versions of git don't have that problem.
*/
final String systemPath = System.getenv("PATH");
brokenPath = systemPath + File.pathSeparator + brokenPath;
}
final StringParameterValue real_param = new StringParameterValue("MY_BRANCH", "master");
final StringParameterValue fake_param = new StringParameterValue("PATH", brokenPath);
final Action[] actions = {new ParametersAction(real_param), new FakeParametersAction(fake_param)};
// SECURITY-170 - have to use ParametersDefinitionProperty
project.addProperty(new ParametersDefinitionProperty(new StringParameterDefinition("MY_BRANCH", "master")));
FreeStyleBuild first_build = project.scheduleBuild2(0, new Cause.UserIdCause(), actions).get();
rule.assertBuildStatus(Result.SUCCESS, first_build);
Launcher launcher = workspace.createLauncher(listener);
final EnvVars environment = GitUtils.getPollEnvironment(project, workspace, launcher, listener);
assertEquals(environment.get("MY_BRANCH"), "master");
assertNotSame("Environment path should not be broken path", environment.get("PATH"), brokenPath);
}
/**
* Tests that builds have the correctly specified Custom SCM names, associated with each build.
* @throws Exception on error
*/
@Ignore("Intermittent failures on stable-3.10 branch and master branch, not on stable-3.9")
@Test
public void testCustomSCMName() throws Exception {
final String branchName = "master";
final FreeStyleProject project = setupProject(branchName, false);
project.addTrigger(new SCMTrigger(""));
GitSCM git = (GitSCM) project.getScm();
setupJGit(git);
final String commitFile1 = "commitFile1";
final String scmNameString1 = "";
commit(commitFile1, johnDoe, "Commit number 1");
assertTrue("scm polling should not detect any more changes after build",
project.poll(listener).hasChanges());
build(project, Result.SUCCESS, commitFile1);
final ObjectId commit1 = testRepo.git.revListAll().get(0);
// Check unset build SCM Name carries
final int buildNumber1 = notifyAndCheckScmName(
project, commit1, scmNameString1, 1, git);
final String scmNameString2 = "ScmName2";
git.getExtensions().replace(new ScmName(scmNameString2));
commit("commitFile2", johnDoe, "Commit number 2");
assertTrue("scm polling should detect commit 2 (commit1=" + commit1 + ")", project.poll(listener).hasChanges());
final ObjectId commit2 = testRepo.git.revListAll().get(0);
// Check second set SCM Name
final int buildNumber2 = notifyAndCheckScmName(
project, commit2, scmNameString2, 2, git, commit1);
checkNumberedBuildScmName(project, buildNumber1, scmNameString1, git);
final String scmNameString3 = "ScmName3";
git.getExtensions().replace(new ScmName(scmNameString3));
commit("commitFile3", johnDoe, "Commit number 3");
assertTrue("scm polling should detect commit 3, (commit2=" + commit2 + ",commit1=" + commit1 + ")", project.poll(listener).hasChanges());
final ObjectId commit3 = testRepo.git.revListAll().get(0);
// Check third set SCM Name
final int buildNumber3 = notifyAndCheckScmName(
project, commit3, scmNameString3, 3, git, commit2, commit1);
checkNumberedBuildScmName(project, buildNumber1, scmNameString1, git);
checkNumberedBuildScmName(project, buildNumber2, scmNameString2, git);
commit("commitFile4", johnDoe, "Commit number 4");
assertTrue("scm polling should detect commit 4 (commit3=" + commit3 + ",commit2=" + commit2 + ",commit1=" + commit1 + ")", project.poll(listener).hasChanges());
final ObjectId commit4 = testRepo.git.revListAll().get(0);
// Check third set SCM Name still set
final int buildNumber4 = notifyAndCheckScmName(
project, commit4, scmNameString3, 4, git, commit3, commit2, commit1);
checkNumberedBuildScmName(project, buildNumber1, scmNameString1, git);
checkNumberedBuildScmName(project, buildNumber2, scmNameString2, git);
checkNumberedBuildScmName(project, buildNumber3, scmNameString3, git);
}
/**
* Method performs HTTP get on "notifyCommit" URL, passing it commit by SHA1
* and tests for custom SCM name build data consistency.
* @param project project to build
* @param commit commit to build
* @param expectedScmName Expected SCM name for commit.
* @param ordinal number of commit to log into errors, if any
* @param git git SCM
* @throws Exception on error
*/
private int notifyAndCheckScmName(FreeStyleProject project, ObjectId commit,
String expectedScmName, int ordinal, GitSCM git, ObjectId... priorCommits) throws Exception {
String priorCommitIDs = "";
for (ObjectId priorCommit : priorCommits) {
priorCommitIDs = priorCommitIDs + " " + priorCommit;
}
assertTrue("scm polling should detect commit " + ordinal, notifyCommit(project, commit));
final Build build = project.getLastBuild();
final BuildData buildData = git.getBuildData(build);
assertEquals("Expected SHA1 != built SHA1 for commit " + ordinal + " priors:" + priorCommitIDs, commit, buildData
.getLastBuiltRevision().getSha1());
assertEquals("Expected SHA1 != retrieved SHA1 for commit " + ordinal + " priors:" + priorCommitIDs, commit, buildData.getLastBuild(commit).getSHA1());
assertTrue("Commit " + ordinal + " not marked as built", buildData.hasBeenBuilt(commit));
assertEquals("Wrong SCM Name for commit " + ordinal, expectedScmName, buildData.getScmName());
return build.getNumber();
}
private void checkNumberedBuildScmName(FreeStyleProject project, int buildNumber,
String expectedScmName, GitSCM git) throws Exception {
final BuildData buildData = git.getBuildData(project.getBuildByNumber(buildNumber));
assertEquals("Wrong SCM Name", expectedScmName, buildData.getScmName());
}
/*
* Tests that builds have the correctly specified branches, associated with
* the commit id, passed with "notifyCommit" URL.
*/
@Ignore("Intermittent failures on stable-3.10 branch, not on stable-3.9 or master")
@Issue("JENKINS-24133")
// Flaky test distracting from primary focus
// @Test
public void testSha1NotificationBranches() throws Exception {
final String branchName = "master";
final FreeStyleProject project = setupProject(branchName, false);
project.addTrigger(new SCMTrigger(""));
final GitSCM git = (GitSCM) project.getScm();
setupJGit(git);
final String commitFile1 = "commitFile1";
commit(commitFile1, johnDoe, "Commit number 1");
assertTrue("scm polling should detect commit 1",
project.poll(listener).hasChanges());
build(project, Result.SUCCESS, commitFile1);
final ObjectId commit1 = testRepo.git.revListAll().get(0);
notifyAndCheckBranch(project, commit1, branchName, 1, git);
commit("commitFile2", johnDoe, "Commit number 2");
assertTrue("scm polling should detect commit 2", project.poll(listener).hasChanges());
final ObjectId commit2 = testRepo.git.revListAll().get(0);
notifyAndCheckBranch(project, commit2, branchName, 2, git);
notifyAndCheckBranch(project, commit1, branchName, 1, git);
}
/* A null pointer exception was detected because the plugin failed to
* write a branch name to the build data, so there was a SHA1 recorded
* in the build data, but no branch name.
*/
@Test
@Deprecated // Testing deprecated buildEnvVars
public void testNoNullPointerExceptionWithNullBranch() throws Exception {
ObjectId sha1 = ObjectId.fromString("2cec153f34767f7638378735dc2b907ed251a67d");
/* This is the null that causes NPE */
Branch branch = new Branch(null, sha1);
List<Branch> branchList = new ArrayList<>();
branchList.add(branch);
Revision revision = new Revision(sha1, branchList);
/* BuildData mock that will use the Revision with null branch name */
BuildData buildData = Mockito.mock(BuildData.class);
Mockito.when(buildData.getLastBuiltRevision()).thenReturn(revision);
Mockito.when(buildData.hasBeenReferenced(anyString())).thenReturn(true);
/* List of build data that will be returned by the mocked BuildData */
List<BuildData> buildDataList = new ArrayList<>();
buildDataList.add(buildData);
/* AbstractBuild mock which returns the buildDataList that contains a null branch name */
AbstractBuild build = Mockito.mock(AbstractBuild.class);
Mockito.when(build.getActions(BuildData.class)).thenReturn(buildDataList);
final FreeStyleProject project = setupProject("*/*", false);
GitSCM scm = (GitSCM) project.getScm();
scm.buildEnvVars(build, new EnvVars()); // NPE here before fix applied
/* Verify mocks were called as expected */
verify(buildData, times(1)).getLastBuiltRevision();
verify(buildData, times(1)).hasBeenReferenced(anyString());
verify(build, times(1)).getActions(BuildData.class);
}
@Test
@Deprecated // Testing deprecated buildEnvVars
public void testBuildEnvVarsLocalBranchStarStar() throws Exception {
ObjectId sha1 = ObjectId.fromString("2cec153f34767f7638378735dc2b907ed251a67d");
/* This is the null that causes NPE */
Branch branch = new Branch("origin/master", sha1);
List<Branch> branchList = new ArrayList<>();
branchList.add(branch);
Revision revision = new Revision(sha1, branchList);
/* BuildData mock that will use the Revision with null branch name */
BuildData buildData = Mockito.mock(BuildData.class);
Mockito.when(buildData.getLastBuiltRevision()).thenReturn(revision);
Mockito.when(buildData.hasBeenReferenced(anyString())).thenReturn(true);
/* List of build data that will be returned by the mocked BuildData */
List<BuildData> buildDataList = new ArrayList<>();
buildDataList.add(buildData);
/* AbstractBuild mock which returns the buildDataList that contains a null branch name */
AbstractBuild build = Mockito.mock(AbstractBuild.class);
Mockito.when(build.getActions(BuildData.class)).thenReturn(buildDataList);
final FreeStyleProject project = setupProject("*/*", false);
GitSCM scm = (GitSCM) project.getScm();
scm.getExtensions().add(new LocalBranch("**"));
EnvVars env = new EnvVars();
scm.buildEnvVars(build, env); // NPE here before fix applied
assertEquals("GIT_BRANCH", "origin/master", env.get("GIT_BRANCH"));
assertEquals("GIT_LOCAL_BRANCH", "master", env.get("GIT_LOCAL_BRANCH"));
/* Verify mocks were called as expected */
verify(buildData, times(1)).getLastBuiltRevision();
verify(buildData, times(1)).hasBeenReferenced(anyString());
verify(build, times(1)).getActions(BuildData.class);
}
@Test
@Deprecated // Testing deprecated buildEnvVars
public void testBuildEnvVarsLocalBranchNull() throws Exception {
ObjectId sha1 = ObjectId.fromString("2cec153f34767f7638378735dc2b907ed251a67d");
/* This is the null that causes NPE */
Branch branch = new Branch("origin/master", sha1);
List<Branch> branchList = new ArrayList<>();
branchList.add(branch);
Revision revision = new Revision(sha1, branchList);
/* BuildData mock that will use the Revision with null branch name */
BuildData buildData = Mockito.mock(BuildData.class);
Mockito.when(buildData.getLastBuiltRevision()).thenReturn(revision);
Mockito.when(buildData.hasBeenReferenced(anyString())).thenReturn(true);
/* List of build data that will be returned by the mocked BuildData */
List<BuildData> buildDataList = new ArrayList<>();
buildDataList.add(buildData);
/* AbstractBuild mock which returns the buildDataList that contains a null branch name */
AbstractBuild build = Mockito.mock(AbstractBuild.class);
Mockito.when(build.getActions(BuildData.class)).thenReturn(buildDataList);
final FreeStyleProject project = setupProject("*/*", false);
GitSCM scm = (GitSCM) project.getScm();
scm.getExtensions().add(new LocalBranch(""));
EnvVars env = new EnvVars();
scm.buildEnvVars(build, env); // NPE here before fix applied
assertEquals("GIT_BRANCH", "origin/master", env.get("GIT_BRANCH"));
assertEquals("GIT_LOCAL_BRANCH", "master", env.get("GIT_LOCAL_BRANCH"));
/* Verify mocks were called as expected */
verify(buildData, times(1)).getLastBuiltRevision();
verify(buildData, times(1)).hasBeenReferenced(anyString());
verify(build, times(1)).getActions(BuildData.class);
}
@Test
@Deprecated // testing deprecated buildEnvVars
public void testBuildEnvVarsLocalBranchNotSet() throws Exception {
ObjectId sha1 = ObjectId.fromString("2cec153f34767f7638378735dc2b907ed251a67d");
/* This is the null that causes NPE */
Branch branch = new Branch("origin/master", sha1);
List<Branch> branchList = new ArrayList<>();
branchList.add(branch);
Revision revision = new Revision(sha1, branchList);
/* BuildData mock that will use the Revision with null branch name */
BuildData buildData = Mockito.mock(BuildData.class);
Mockito.when(buildData.getLastBuiltRevision()).thenReturn(revision);
Mockito.when(buildData.hasBeenReferenced(anyString())).thenReturn(true);
/* List of build data that will be returned by the mocked BuildData */
List<BuildData> buildDataList = new ArrayList<>();
buildDataList.add(buildData);
/* AbstractBuild mock which returns the buildDataList that contains a null branch name */
AbstractBuild build = Mockito.mock(AbstractBuild.class);
Mockito.when(build.getActions(BuildData.class)).thenReturn(buildDataList);
final FreeStyleProject project = setupProject("*/*", false);
GitSCM scm = (GitSCM) project.getScm();
EnvVars env = new EnvVars();
scm.buildEnvVars(build, env); // NPE here before fix applied
assertEquals("GIT_BRANCH", "origin/master", env.get("GIT_BRANCH"));
assertEquals("GIT_LOCAL_BRANCH", null, env.get("GIT_LOCAL_BRANCH"));
/* Verify mocks were called as expected */
verify(buildData, times(1)).getLastBuiltRevision();
verify(buildData, times(1)).hasBeenReferenced(anyString());
verify(build, times(1)).getActions(BuildData.class);
}
@Issue("JENKINS-38241")
@Test
public void testCommitMessageIsPrintedToLogs() throws Exception {
sampleRepo.init();
sampleRepo.write("file", "v1");
sampleRepo.git("commit", "--all", "--message=test commit");
FreeStyleProject p = setupSimpleProject("master");
Run<?,?> run = rule.buildAndAssertSuccess(p);
TaskListener mockListener = Mockito.mock(TaskListener.class);
Mockito.when(mockListener.getLogger()).thenReturn(Mockito.spy(StreamTaskListener.fromStdout().getLogger()));
p.getScm().checkout(run, new Launcher.LocalLauncher(listener),
new FilePath(run.getRootDir()).child("tmp-" + "master"),
mockListener, null, SCMRevisionState.NONE);
ArgumentCaptor<String> logCaptor = ArgumentCaptor.forClass(String.class);
verify(mockListener.getLogger(), atLeastOnce()).println(logCaptor.capture());
List<String> values = logCaptor.getAllValues();
assertThat(values, hasItem("Commit message: \"test commit\""));
}
/**
* Method performs HTTP get on "notifyCommit" URL, passing it commit by SHA1
* and tests for build data consistency.
* @param project project to build
* @param commit commit to build
* @param expectedBranch branch, that is expected to be built
* @param ordinal number of commit to log into errors, if any
* @param git git SCM
* @throws Exception on error
*/
private void notifyAndCheckBranch(FreeStyleProject project, ObjectId commit,
String expectedBranch, int ordinal, GitSCM git) throws Exception {
assertTrue("scm polling should detect commit " + ordinal, notifyCommit(project, commit));
final BuildData buildData = git.getBuildData(project.getLastBuild());
final Collection<Branch> builtBranches = buildData.lastBuild.getRevision().getBranches();
assertEquals("Commit " + ordinal + " should be built", commit, buildData
.getLastBuiltRevision().getSha1());
final String expectedBranchString = "origin/" + expectedBranch;
assertFalse("Branches should be detected for the build", builtBranches.isEmpty());
assertEquals(expectedBranch + " branch should be detected", expectedBranchString,
builtBranches.iterator().next().getName());
assertEquals(expectedBranchString, getEnvVars(project).get(GitSCM.GIT_BRANCH));
}
/**
* Method performs commit notification for the last committed SHA1 using
* notifyCommit URL.
* @param project project to trigger
* @return whether the new build has been triggered (<code>true</code>) or
* not (<code>false</code>).
* @throws Exception on error
*/
private boolean notifyCommit(FreeStyleProject project, ObjectId commitId) throws Exception {
final int initialBuildNumber = project.getLastBuild().getNumber();
final String commit1 = ObjectId.toString(commitId);
final String notificationPath = rule.getURL().toExternalForm()
+ "git/notifyCommit?url=" + testRepo.gitDir.toString() + "&sha1=" + commit1;
final URL notifyUrl = new URL(notificationPath);
String notifyContent = null;
try (final InputStream is = notifyUrl.openStream()) {
notifyContent = IOUtils.toString(is, "UTF-8");
}
assertThat(notifyContent, containsString("No Git consumers using SCM API plugin for: " + testRepo.gitDir.toString()));
if ((project.getLastBuild().getNumber() == initialBuildNumber)
&& (rule.jenkins.getQueue().isEmpty())) {
return false;
} else {
while (!rule.jenkins.getQueue().isEmpty()) {
Thread.sleep(100);
}
final FreeStyleBuild build = project.getLastBuild();
while (build.isBuilding()) {
Thread.sleep(100);
}
return true;
}
}
private void setupJGit(GitSCM git) {
git.gitTool="jgit";
rule.jenkins.getDescriptorByType(GitTool.DescriptorImpl.class).setInstallations(new JGitTool(Collections.<ToolProperty<?>>emptyList()));
}
/** We clean the environment, just in case the test is being run from a Jenkins job using this same plugin :). */
@TestExtension
public static class CleanEnvironment extends EnvironmentContributor {
@Override
public void buildEnvironmentFor(Run run, EnvVars envs, TaskListener listener) {
envs.remove(GitSCM.GIT_BRANCH);
envs.remove(GitSCM.GIT_LOCAL_BRANCH);
envs.remove(GitSCM.GIT_COMMIT);
envs.remove(GitSCM.GIT_PREVIOUS_COMMIT);
envs.remove(GitSCM.GIT_PREVIOUS_SUCCESSFUL_COMMIT);
}
}
/** Returns true if test cleanup is not reliable */
private boolean cleanupIsUnreliable() {
// Windows cleanup is unreliable on ci.jenkins.io
String jobUrl = System.getenv("JOB_URL");
return isWindows() && jobUrl != null && jobUrl.contains("ci.jenkins.io");
}
/** inline ${@link hudson.Functions#isWindows()} to prevent a transient remote classloader issue */
private boolean isWindows() {
return java.io.File.pathSeparatorChar==';';
}
}
| [
"\"PATH\"",
"\"JOB_URL\""
]
| []
| [
"PATH",
"JOB_URL"
]
| [] | ["PATH", "JOB_URL"] | java | 2 | 0 | |
input/litdata/ferland1980.py | import os
import numpy as np
info = \
{
'reference': 'Ferland 1980',
'data': 'Table 1'
}
def _load():
ARES = os.environ.get('ARES')
E, T10, T20 = np.loadtxt('{}/input/litdata/ferland1980.txt'.format(ARES),
delimiter=',')
return E, T10, T20
| []
| []
| [
"ARES"
]
| [] | ["ARES"] | python | 1 | 0 | |
examples/pwr_run/checkpointing/final/no_safeguard/job61.py | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 64
args_lr = 0.008
args_model = 'vgg16'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_no_safeguard/' + job_name + '*'
total_epochs = 8
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '16' in args_model:
base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '19' in args_model:
base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.BatchNormalization())
model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_no_safeguard/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
main.py | '''
Readme Development Metrics With waka time progress
'''
import re
import os
import base64
from pytz import timezone
import pytz
import requests
from github import Github, GithubException, InputGitAuthor
import datetime
from string import Template
from loc import LinesOfCode
import time
import traceback
import humanize
from urllib.parse import quote
import json
import sys
from datetime import date
import math
from dotenv import load_dotenv
load_dotenv()
START_COMMENT = '<!--START_SECTION:waka-->'
END_COMMENT = '<!--END_SECTION:waka-->'
listReg = f"{START_COMMENT}[\\s\\S]+{END_COMMENT}"
waka_key = os.getenv('INPUT_WAKATIME_API_KEY')
ghtoken = os.getenv('INPUT_GH_TOKEN')
showTimeZone = os.getenv('INPUT_SHOW_TIMEZONE')
showProjects = os.getenv('INPUT_SHOW_PROJECTS')
showEditors = os.getenv('INPUT_SHOW_EDITORS')
showOs = os.getenv('INPUT_SHOW_OS')
showCommit = os.getenv('INPUT_SHOW_COMMIT')
showLanguage = os.getenv('INPUT_SHOW_LANGUAGE')
show_loc = os.getenv('INPUT_SHOW_LINES_OF_CODE')
show_days_of_week = os.getenv('INPUT_SHOW_DAYS_OF_WEEK')
showLanguagePerRepo = os.getenv('INPUT_SHOW_LANGUAGE_PER_REPO')
showLocChart = os.getenv('INPUT_SHOW_LOC_CHART')
show_profile_view = os.getenv('INPUT_SHOW_PROFILE_VIEWS')
show_short_info = os.getenv('INPUT_SHOW_SHORT_INFO')
locale = os.getenv('INPUT_LOCALE')
commit_by_me = os.getenv('INPUT_COMMIT_BY_ME')
ignored_repos_name = str(os.getenv('INPUT_IGNORED_REPOS') or '').replace(' ', '').split(',')
show_updated_date = os.getenv('INPUT_SHOW_UPDATED_DATE')
commit_message = os.getenv('INPUT_COMMIT_MESSAGE')
show_total_code_time = os.getenv('INPUT_SHOW_TOTAL_CODE_TIME')
symbol_version = os.getenv('INPUT_SYMBOL_VERSION').strip()
show_waka_stats = 'y'
# The GraphQL query to get commit data.
userInfoQuery = """
{
viewer {
login
email
id
}
}
"""
createContributedRepoQuery = Template("""query {
user(login: "$username") {
repositoriesContributedTo(last: 100, includeUserRepositories: true) {
nodes {
isFork
name
owner {
login
}
}
}
}
}
""")
createCommittedDateQuery = Template("""
query {
repository(owner: "$owner", name: "$name") {
defaultBranchRef {
target {
... on Commit {
history(first: 100, author: { id: "$id" }) {
edges {
node {
committedDate
}
}
}
}
}
}
}
}
""")
get_loc_url = Template("""/repos/$owner/$repo/stats/code_frequency""")
get_profile_view = Template("""/repos/$owner/$repo/traffic/views?per=week""")
get_profile_traffic = Template("""/repos/$owner/$repo/traffic/popular/referrers""")
truthy = ['true', '1', 't', 'y', 'yes']
def run_v3_api(query):
request = requests.get('https://api.github.com' + query, headers=headers)
if request.status_code == 200:
return request.json()
else:
raise Exception(
"Query failed to run by returning code of {}. {},... {}".format(request.status_code, query,
str(request.json())))
repositoryListQuery = Template("""
{
user(login: "$username") {
repositories(orderBy: {field: CREATED_AT, direction: ASC}, last: 100, affiliations: [OWNER, COLLABORATOR, ORGANIZATION_MEMBER], isFork: false) {
totalCount
edges {
node {
object(expression:"master") {
... on Commit {
history (author: { id: "$id" }){
totalCount
}
}
}
primaryLanguage {
color
name
id
}
stargazers {
totalCount
}
collaborators {
totalCount
}
createdAt
name
owner {
id
login
}
nameWithOwner
}
}
}
location
createdAt
name
}
}
""")
def millify(n):
millnames = ['', ' Thousand', ' Million', ' Billion', ' Trillion']
n = float(n)
millidx = max(0, min(len(millnames) - 1,
int(math.floor(0
if n == 0
else math.log10(abs(n)) / 3))))
return '{:.0f}{}'.format(n / 10 ** (3 * millidx), millnames[millidx])
def run_query(query):
request = requests.post('https://api.github.com/graphql', json={'query': query}, headers=headers)
if request.status_code == 200:
return request.json()
else:
raise Exception("Query failed to run by returning code of {}. {}".format(request.status_code, query))
def make_graph(percent: float):
'''Make progress graph from API graph'''
if (symbol_version == '1'): # version 1
done_block = '█'
empty_block = '░'
elif (symbol_version == '2'): #version 2
done_block = '⣿'
empty_block = '⣀'
elif (symbol_version == '3'): # version 3
done_block = '⬛'
empty_block = '⬜'
else:
done_block = '█' #default is version 1
empty_block = '░'
pc_rnd = round(percent)
return f"{done_block * int(pc_rnd / 4)}{empty_block * int(25 - int(pc_rnd / 4))}"
def make_list(data: list):
'''Make List'''
data_list = []
for l in data[:5]:
ln = len(l['name'])
ln_text = len(l['text'])
op = f"{l['name'][:25]}{' ' * (25 - ln)}{l['text']}{' ' * (20 - ln_text)}{make_graph(l['percent'])} {l['percent']}%"
data_list.append(op)
return ' \n'.join(data_list)
def make_commit_list(data: list):
'''Make List'''
data_list = []
for l in data[:7]:
ln = len(l['name'])
ln_text = len(l['text'])
op = f"{l['name']}{' ' * (13 - ln)}{l['text']}{' ' * (15 - ln_text)}{make_graph(l['percent'])} {l['percent']}%"
data_list.append(op)
return ' \n'.join(data_list)
def generate_commit_list(tz):
string = ''
result = run_query(userInfoQuery) # Execute the query
username = result["data"]["viewer"]["login"]
id = result["data"]["viewer"]["id"]
# print("user {}".format(username))
result = run_query(createContributedRepoQuery.substitute(username=username))
nodes = result["data"]["user"]["repositoriesContributedTo"]["nodes"]
repos = [d for d in nodes if d['isFork'] is False]
morning = 0 # 6 - 12
daytime = 0 # 12 - 18
evening = 0 # 18 - 24
night = 0 # 0 - 6
Monday = 0
Tuesday = 0
Wednesday = 0
Thursday = 0
Friday = 0
Saturday = 0
Sunday = 0
for repository in repos:
result = run_query(
createCommittedDateQuery.substitute(owner=repository["owner"]["login"], name=repository["name"], id=id))
try:
committed_dates = result["data"]["repository"]["defaultBranchRef"]["target"]["history"]["edges"]
for committedDate in committed_dates:
date = datetime.datetime.strptime(committedDate["node"]["committedDate"],
"%Y-%m-%dT%H:%M:%SZ").replace(tzinfo=pytz.utc).astimezone(
timezone(tz))
hour = date.hour
weekday = date.strftime('%A')
if 6 <= hour < 12:
morning += 1
if 12 <= hour < 18:
daytime += 1
if 18 <= hour < 24:
evening += 1
if 0 <= hour < 6:
night += 1
if weekday == "Monday":
Monday += 1
if weekday == "Tuesday":
Tuesday += 1
if weekday == "Wednesday":
Wednesday += 1
if weekday == "Thursday":
Thursday += 1
if weekday == "Friday":
Friday += 1
if weekday == "Saturday":
Saturday += 1
if weekday == "Sunday":
Sunday += 1
except Exception as ex:
if str(ex) != "'NoneType' object is not subscriptable":
print("Exception occurred " + str(ex))
sumAll = morning + daytime + evening + night
sum_week = Sunday + Monday + Tuesday + Friday + Saturday + Wednesday + Thursday
title = translate['I am an Early'] if morning + daytime >= evening + night else translate['I am a Night']
one_day = [
{"name": "🌞 " + translate['Morning'], "text": str(morning) + " commits",
"percent": round((morning / sumAll) * 100, 2)},
{"name": "🌆 " + translate['Daytime'], "text": str(daytime) + " commits",
"percent": round((daytime / sumAll) * 100, 2)},
{"name": "🌃 " + translate['Evening'], "text": str(evening) + " commits",
"percent": round((evening / sumAll) * 100, 2)},
{"name": "🌙 " + translate['Night'], "text": str(night) + " commits",
"percent": round((night / sumAll) * 100, 2)},
]
dayOfWeek = [
{"name": translate['Monday'], "text": str(Monday) + " commits", "percent": round((Monday / sum_week) * 100, 2)},
{"name": translate['Tuesday'], "text": str(Tuesday) + " commits",
"percent": round((Tuesday / sum_week) * 100, 2)},
{"name": translate['Wednesday'], "text": str(Wednesday) + " commits",
"percent": round((Wednesday / sum_week) * 100, 2)},
{"name": translate['Thursday'], "text": str(Thursday) + " commits",
"percent": round((Thursday / sum_week) * 100, 2)},
{"name": translate['Friday'], "text": str(Friday) + " commits", "percent": round((Friday / sum_week) * 100, 2)},
{"name": translate['Saturday'], "text": str(Saturday) + " commits",
"percent": round((Saturday / sum_week) * 100, 2)},
{"name": translate['Sunday'], "text": str(Sunday) + " commits", "percent": round((Sunday / sum_week) * 100, 2)},
]
string = string + '**' + title + '** \n\n' + '```text\n' + make_commit_list(one_day) + '\n\n```\n'
if show_days_of_week.lower() in truthy:
max_element = {
'percent': 0
}
for day in dayOfWeek:
if day['percent'] > max_element['percent']:
max_element = day
days_title = translate['I am Most Productive on'] % max_element['name']
string = string + '📅 **' + days_title + '** \n\n' + '```text\n' + make_commit_list(dayOfWeek) + '\n\n```\n'
return string
def get_waka_time_stats():
stats = ''
request = requests.get(
f"https://wakatime.com/api/v1/users/current/stats/last_7_days?api_key={waka_key}")
no_activity = translate["No Activity Tracked This Week"]
if request.status_code == 401:
print("Error With WAKA time API returned " + str(request.status_code) + " Response " + str(request.json()))
else:
empty = True
data = request.json()
if showCommit.lower() in truthy:
empty = False
stats = stats + generate_commit_list(tz=data['data']['timezone']) + '\n\n'
stats += '📊 **' + translate['This Week I Spend My Time On'] + '** \n\n'
stats += '```text\n'
if showTimeZone.lower() in truthy:
empty = False
tzone = data['data']['timezone']
stats = stats + '⌚︎ ' + translate['Timezone'] + ': ' + tzone + '\n\n'
if showLanguage.lower() in truthy:
empty = False
if len(data['data']['languages']) == 0:
lang_list = no_activity
else:
lang_list = make_list(data['data']['languages'])
stats = stats + '💬 ' + translate['Languages'] + ': \n' + lang_list + '\n\n'
if showEditors.lower() in truthy:
empty = False
if len(data['data']['editors']) == 0:
edit_list = no_activity
else:
edit_list = make_list(data['data']['editors'])
stats = stats + '🔥 ' + translate['Editors'] + ': \n' + edit_list + '\n\n'
if showProjects.lower() in truthy:
empty = False
if len(data['data']['projects']) == 0:
project_list = no_activity
else:
# Re-order the project list by percentage
data['data']['projects'] = sorted(data['data']['projects'], key=lambda x: x["percent"], reverse=True)
project_list = make_list(data['data']['projects'])
stats = stats + '🐱💻 ' + translate['Projects'] + ': \n' + project_list + '\n\n'
if showOs.lower() in truthy:
empty = False
if len(data['data']['operating_systems']) == 0:
os_list = no_activity
else:
os_list = make_list(data['data']['operating_systems'])
stats = stats + '💻 ' + translate['operating system'] + ': \n' + os_list + '\n\n'
stats += '```\n\n'
if empty:
return ""
return stats
def generate_language_per_repo(result):
language_count = {}
total = 0
for repo in result['data']['user']['repositories']['edges']:
if repo['node']['primaryLanguage'] is None:
continue
language = repo['node']['primaryLanguage']['name']
color_code = repo['node']['primaryLanguage']['color']
total += 1
if language not in language_count.keys():
language_count[language] = {}
language_count[language]['count'] = 1
else:
language_count[language]['count'] = language_count[language]['count'] + 1
language_count[language]['color'] = color_code
data = []
sorted_labels = list(language_count.keys())
sorted_labels.sort(key=lambda x: language_count[x]['count'], reverse=True)
most_language_repo = sorted_labels[0]
for label in sorted_labels:
percent = round(language_count[label]['count'] / total * 100, 2)
extension = " repos"
if language_count[label]['count'] == 1:
extension = " repo"
data.append({
"name": label,
"text": str(language_count[label]['count']) + extension,
"percent": percent
})
title = translate['I Mostly Code in'] % most_language_repo
return '**' + title + '** \n\n' + '```text\n' + make_list(data) + '\n\n```\n'
def get_yearly_data():
repository_list = run_query(repositoryListQuery.substitute(username=username, id=id))
loc = LinesOfCode(id, username, ghtoken, repository_list, ignored_repos_name)
yearly_data = loc.calculateLoc()
if showLocChart.lower() in truthy:
loc.plotLoc(yearly_data)
return yearly_data
def get_line_of_code():
repositoryList = run_query(repositoryListQuery.substitute(username=username, id=id))
loc = LinesOfCode(id, username, ghtoken, repositoryList, ignored_repos_name)
yearly_data = loc.calculateLoc()
total_loc = sum(
[yearly_data[year][quarter][lang] for year in yearly_data for quarter in yearly_data[year] for lang in
yearly_data[year][quarter]])
return millify(int(total_loc))
def get_short_info(github):
string = '**🐱 ' + translate['My GitHub Data'] + '** \n\n'
user_info = github.get_user()
if user_info.disk_usage is None:
disk_usage = humanize.naturalsize(0)
print("Please add new github personal access token with user permission")
else:
disk_usage = humanize.naturalsize(user_info.disk_usage)
request = requests.get('https://github-contributions.now.sh/api/v1/' + user_info.login)
if request.status_code == 200:
data = request.json()
total = data['years'][0]['total']
year = data['years'][0]['year']
string += '> 🏆 ' + translate['Contributions in the year'] % (humanize.intcomma(total), year) + '\n > \n'
string += '> 📦 ' + translate["Used in GitHub's Storage"] % disk_usage + ' \n > \n'
is_hireable = user_info.hireable
public_repo = user_info.public_repos
private_repo = user_info.owned_private_repos
if private_repo is None:
private_repo = 0
if is_hireable:
string += "> 💼 " + translate["Opted to Hire"] + "\n > \n"
else:
string += "> 🚫 " + translate["Not Opted to Hire"] + "\n > \n"
string += '> 📜 '
string += translate['public repositories'] % public_repo + " " + '\n > \n' if public_repo != 1 else translate[
'public repository'] % public_repo + " " + '\n > \n'
string += '> 🔑 '
string += translate['private repositories'] % private_repo + " " + ' \n > \n' if private_repo != 1 else translate[
'private repository'] % private_repo + " " + '\n > \n'
return string
def get_stats(github):
'''Gets API data and returns markdown progress'''
stats = ''
repositoryList = run_query(repositoryListQuery.substitute(username=username, id=id))
if show_loc.lower() in truthy or showLocChart.lower() in truthy:
# This condition is written to calculate the lines of code because it is heavy process soo needs to be calculate once this will reduce the execution time
yearly_data = get_yearly_data()
if show_total_code_time.lower() in truthy:
request = requests.get(
f"https://wakatime.com/api/v1/users/current/all_time_since_today?api_key={waka_key}")
if request.status_code == 401:
print("Error With WAKA time API returned " + str(request.status_code) + " Response " + str(request.json()))
elif "text" not in request.json()["data"]:
print("User stats are calculating. Try again later.")
else:
data = request.json()
stats += ') + '-' + quote(str(
data['data']['text'])) + '-blue)\n\n'
if show_profile_view.lower() in truthy:
data = run_v3_api(get_profile_view.substitute(owner=username, repo=username))
stats += ') + '-' + str(
data['count']) + '-blue)\n\n'
if show_loc.lower() in truthy:
stats += ') + '-' + quote(
str(get_line_of_code())) + '%20' + quote(str(translate['Lines of code'])) + '-blue)\n\n'
if show_short_info.lower() in truthy:
stats += get_short_info(github)
if show_waka_stats.lower() in truthy:
stats += get_waka_time_stats()
if showLanguagePerRepo.lower() in truthy:
stats = stats + generate_language_per_repo(repositoryList) + '\n\n'
if showLocChart.lower() in truthy:
stats += '**' + translate['Timeline'] + '**\n\n'
branch_name = github.get_repo(f'{username}/{username}').default_branch
stats = stats + ' \n\n'
if show_updated_date.lower() in truthy:
now = datetime.datetime.utcnow()
d1 = now.strftime("%d/%m/%Y %H:%M:%S")
stats = stats + "\n Last Updated on " + d1 + " UTC"
return stats
# def star_me():
# requests.put("https://api.github.com/user/starred/anmol098/waka-readme-stats", headers=headers)
def decode_readme(data: str):
'''Decode the contents of old readme'''
decoded_bytes = base64.b64decode(data)
return str(decoded_bytes, 'utf-8')
def generate_new_readme(stats: str, readme: str):
'''Generate a new Readme.md'''
stats_in_readme = f"{START_COMMENT}\n{stats}\n{END_COMMENT}"
return re.sub(listReg, stats_in_readme, readme)
if __name__ == '__main__':
try:
start_time = datetime.datetime.now().timestamp() * 1000
if ghtoken is None:
raise Exception('Token not available')
g = Github(ghtoken)
headers = {"Authorization": "Bearer " + ghtoken}
user_data = run_query(userInfoQuery) # Execute the query
username = user_data["data"]["viewer"]["login"]
id = user_data["data"]["viewer"]["id"]
emails_user = run_v3_api("/user/emails") # Execute the api
email = emails_user[0]['email']
print("Username " + username)
repo = g.get_repo(f"{username}/{username}")
contents = repo.get_readme()
try:
with open(os.path.join(os.path.dirname(__file__), 'translation.json'), encoding='utf-8') as config_file:
data = json.load(config_file)
translate = data[locale]
except Exception as e:
print("Cannot find the Locale choosing default to english")
translate = data['en']
waka_stats = get_stats(g)
# star_me()
rdmd = decode_readme(contents.content)
new_readme = generate_new_readme(stats=waka_stats, readme=rdmd)
if commit_by_me.lower() in truthy:
committer = InputGitAuthor(username, email)
else:
committer = InputGitAuthor('readme-bot', '41898282+github-actions[bot]@users.noreply.github.com')
if new_readme != rdmd:
try:
repo.update_file(path=contents.path, message=commit_message,
content=new_readme, sha=contents.sha, branch='master',
committer=committer)
except:
repo.update_file(path=contents.path, message=commit_message,
content=new_readme, sha=contents.sha, branch='main',
committer=committer)
print("Readme updated")
end_time = datetime.datetime.now().timestamp() * 1000
print("Program processed in {} miliseconds.".format(round(end_time - start_time, 0)))
except Exception as e:
traceback.print_exc()
print("Exception Occurred " + str(e))
| []
| []
| [
"INPUT_IGNORED_REPOS",
"INPUT_GH_TOKEN",
"INPUT_SHOW_PROJECTS",
"INPUT_COMMIT_BY_ME",
"INPUT_SHOW_OS",
"INPUT_SHOW_LANGUAGE",
"INPUT_SHOW_DAYS_OF_WEEK",
"INPUT_SYMBOL_VERSION",
"INPUT_LOCALE",
"INPUT_SHOW_LOC_CHART",
"INPUT_COMMIT_MESSAGE",
"INPUT_SHOW_UPDATED_DATE",
"INPUT_SHOW_TIMEZONE",
"INPUT_SHOW_LINES_OF_CODE",
"INPUT_SHOW_LANGUAGE_PER_REPO",
"INPUT_SHOW_EDITORS",
"INPUT_WAKATIME_API_KEY",
"INPUT_SHOW_COMMIT",
"INPUT_SHOW_PROFILE_VIEWS",
"INPUT_SHOW_SHORT_INFO",
"INPUT_SHOW_TOTAL_CODE_TIME"
]
| [] | ["INPUT_IGNORED_REPOS", "INPUT_GH_TOKEN", "INPUT_SHOW_PROJECTS", "INPUT_COMMIT_BY_ME", "INPUT_SHOW_OS", "INPUT_SHOW_LANGUAGE", "INPUT_SHOW_DAYS_OF_WEEK", "INPUT_SYMBOL_VERSION", "INPUT_LOCALE", "INPUT_SHOW_LOC_CHART", "INPUT_COMMIT_MESSAGE", "INPUT_SHOW_UPDATED_DATE", "INPUT_SHOW_TIMEZONE", "INPUT_SHOW_LINES_OF_CODE", "INPUT_SHOW_LANGUAGE_PER_REPO", "INPUT_SHOW_EDITORS", "INPUT_WAKATIME_API_KEY", "INPUT_SHOW_COMMIT", "INPUT_SHOW_PROFILE_VIEWS", "INPUT_SHOW_SHORT_INFO", "INPUT_SHOW_TOTAL_CODE_TIME"] | python | 21 | 0 | |
test_nerf.py | import os, sys
# os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
sys.path.append(r'/home/luca/Desktop/NERFPosit/Inference')
import numpy as np
import imageio
import json
import random
import time
import pprint
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
import run_nerf
from load_llff import load_llff_data
from load_deepvoxels import load_dv_data
from load_blender import load_blender_data
basedir = './logs'
expname = 'fern_example'
config = os.path.join(basedir, expname, 'config.txt')
print('Args:')
print(open(config, 'r').read())
parser = run_nerf.config_parser()
args = parser.parse_args('--config {} --ft_path {}'.format(config, os.path.join(basedir, expname, 'model_200000.npy')))
print('loaded args')
images, poses, bds, render_poses, i_test = load_llff_data(args.datadir, args.factor,
recenter=True, bd_factor=.75,
spherify=args.spherify)
H, W, focal = poses[0,:3,-1].astype(np.float32)
H = int(H)
W = int(W)
hwf = [H, W, focal]
images = images.astype(np.float32)
poses = poses.astype(np.float32)
if args.no_ndc:
near = tf.reduce_min(bds) * .9
far = tf.reduce_max(bds) * 1.
else:
near = 0.
far = 1.
# Create nerf model
_, render_kwargs_test, start, grad_vars, models = run_nerf.create_nerf(args)
print(models['model'].input)
model = models['model']
print(model.summary())
#extractor = keras.Model(inputs=model.inputs,
# outputs=model.layers[1].output)
#embed_fn, input_ch = run_nerf.get_embedder(10,1)
#embed_fn1, input_ch = run_nerf.get_embedder(4,1)
#a = embed_fn(tf.constant([[0.5,0.5,0.5]]))
#b = embed_fn1(tf.constant([[0.5,0.5,0.5]]))
#c = tf.concat([a,b],1)
#print(c.shape)
#print(extractor.predict(c))
#exit(0)
#features = extractor()
bds_dict = {
'near' : tf.cast(near, tf.float32),
'far' : tf.cast(far, tf.float32),
}
render_kwargs_test.update(bds_dict)
print('Render kwargs:')
pprint.pprint(render_kwargs_test)
down = 4
render_kwargs_fast = {k : render_kwargs_test[k] for k in render_kwargs_test}
render_kwargs_fast['N_importance'] = 0
c2w = np.eye(4)[:3,:4].astype(np.float32) # identity pose matrix
test = run_nerf.render(H//down, W//down, focal/down, c2w=c2w, **render_kwargs_fast)
img = np.clip(test[0],0,1)
plt.imshow(img)
plt.show()
| []
| []
| [
"CUDA_VISIBLE_DEVICES",
"TF_FORCE_GPU_ALLOW_GROWTH"
]
| [] | ["CUDA_VISIBLE_DEVICES", "TF_FORCE_GPU_ALLOW_GROWTH"] | python | 2 | 0 | |
networking/networking.go | package networking
import (
"bytes"
"fmt"
"net/http"
"os"
)
func SendSoap(endpoint, message string) (*http.Response, error) {
if os.Getenv("GOVNIF_DEBUG") == "true" || os.Getenv("GOVNIF_DEBUG") == "1" {
fmt.Fprintln(os.Stdout, message)
}
httpClient := new(http.Client)
resp, err := httpClient.Post(endpoint, "application/soap+xml; charset=utf-8", bytes.NewBufferString(message))
if err != nil {
return resp, err
}
return resp, nil
}
| [
"\"GOVNIF_DEBUG\"",
"\"GOVNIF_DEBUG\""
]
| []
| [
"GOVNIF_DEBUG"
]
| [] | ["GOVNIF_DEBUG"] | go | 1 | 0 | |
pkg/kube/deployments.go | package kube
import (
jsonencoding "encoding/json"
"fmt"
"os"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"github.com/ajssmith/skupper/api/types"
)
func GetOwnerReference(dep *appsv1.Deployment) metav1.OwnerReference {
return metav1.OwnerReference{
APIVersion: "apps/v1",
Kind: "Deployment",
Name: dep.ObjectMeta.Name,
UID: dep.ObjectMeta.UID,
}
}
func DeleteDeployment(name string, namespace string, cli *kubernetes.Clientset) error {
_, err := cli.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{})
if err == nil {
err = cli.AppsV1().Deployments(namespace).Delete(name, &metav1.DeleteOptions{})
}
return err
}
// todo, pass full client object with namespace and clientset
func GetDeployment(name string, namespace string, cli *kubernetes.Clientset) (*appsv1.Deployment, error) {
existing, err := cli.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
} else {
return existing, err
}
}
func NewProxyStatefulSet(serviceInterface types.ServiceInterface, namespace string, cli *kubernetes.Clientset) (*appsv1.StatefulSet, error) {
// Do stateful sets use a different name >>> config.origion ? config.headless.name
proxyName := serviceInterface.Address + "-proxy"
statefulSets := cli.AppsV1().StatefulSets(namespace)
deployments := cli.AppsV1().Deployments(namespace)
transportDep, err := deployments.Get(types.TransportDeploymentName, metav1.GetOptions{})
if err != nil {
return nil, err
}
serviceInterface.Origin = ""
encoded, err := jsonencoding.Marshal(serviceInterface)
if err != nil {
return nil, err
}
ownerRef := GetOwnerReference(transportDep)
var imageName string
if os.Getenv("PROXY_IMAGE") != "" {
imageName = os.Getenv("PROXY_IMAGE")
} else {
imageName = types.DefaultProxyImage
}
// TODO: Fix replicas
proxyStatefulSet := &appsv1.StatefulSet{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "StatefulSet",
},
ObjectMeta: metav1.ObjectMeta{
Name: proxyName,
Namespace: namespace,
OwnerReferences: []metav1.OwnerReference{ownerRef},
Annotations: map[string]string{
types.ServiceQualifier: serviceInterface.Address,
},
Labels: map[string]string{
"internal.skupper.io/type": "proxy",
},
},
Spec: appsv1.StatefulSetSpec{
ServiceName: serviceInterface.Address,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"internal.skupper.io/service": serviceInterface.Address,
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"internal.skupper.io/service": serviceInterface.Address,
},
},
Spec: corev1.PodSpec{
ServiceAccountName: types.TransportServiceAccountName,
Containers: []corev1.Container{
{
Image: imageName,
Name: "proxy",
Env: []corev1.EnvVar{
{
Name: "SKUPPER_PROXY_CONFIG",
Value: string(encoded),
},
{
Name: "NAMESPACE",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.namespace",
},
},
},
},
VolumeMounts: []corev1.VolumeMount{
{
Name: "connect",
MountPath: "/etc/messaging/",
},
},
},
},
Volumes: []corev1.Volume{
{
Name: "connect",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: "skupper",
},
},
},
},
},
},
},
}
created, err := statefulSets.Create(proxyStatefulSet)
if err != nil {
return nil, err
} else {
return created, nil
}
}
func NewProxyDeployment(serviceInterface types.ServiceInterface, namespace string, cli *kubernetes.Clientset) (*appsv1.Deployment, error) {
proxyName := serviceInterface.Address + "-proxy"
deployments := cli.AppsV1().Deployments(namespace)
transportDep, err := deployments.Get(types.TransportDeploymentName, metav1.GetOptions{})
if err != nil {
return nil, err
}
serviceInterface.Origin = ""
encoded, err := jsonencoding.Marshal(serviceInterface)
if err != nil {
return nil, err
}
ownerRef := GetOwnerReference(transportDep)
var imageName string
if os.Getenv("PROXY_IMAGE") != "" {
imageName = os.Getenv("PROXY_IMAGE")
} else {
imageName = types.DefaultProxyImage
}
proxyDep := &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: proxyName,
Namespace: namespace,
OwnerReferences: []metav1.OwnerReference{ownerRef},
Annotations: map[string]string{
types.ServiceQualifier: serviceInterface.Address,
},
Labels: map[string]string{
"internal.skupper.io/type": "proxy",
},
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"internal.skupper.io/service": serviceInterface.Address,
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"internal.skupper.io/service": serviceInterface.Address,
},
},
Spec: corev1.PodSpec{
ServiceAccountName: types.TransportServiceAccountName,
Containers: []corev1.Container{
{
Image: imageName,
Name: "proxy",
Env: []corev1.EnvVar{
{
Name: "SKUPPER_PROXY_CONFIG",
Value: string(encoded),
},
{
Name: "NAMESPACE",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.namespace",
},
},
},
},
VolumeMounts: []corev1.VolumeMount{
{
Name: "connect",
MountPath: "/etc/messaging/",
},
},
},
},
Volumes: []corev1.Volume{
{
Name: "connect",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: "skupper",
},
},
},
},
},
},
},
}
created, err := deployments.Create(proxyDep)
if err != nil {
return nil, err
} else {
return created, nil
}
}
func NewControllerDeployment(van *types.VanRouterSpec, ownerRef metav1.OwnerReference, cli *kubernetes.Clientset) *appsv1.Deployment {
deployments := cli.AppsV1().Deployments(van.Namespace)
existing, err := deployments.Get(types.ControllerDeploymentName, metav1.GetOptions{})
if err == nil {
fmt.Println("VAN site controller already exists")
return existing
} else if errors.IsNotFound(err) {
dep := &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: types.ControllerDeploymentName,
Namespace: van.Namespace,
OwnerReferences: []metav1.OwnerReference{ownerRef},
},
Spec: appsv1.DeploymentSpec{
Replicas: &van.Controller.Replicas,
Selector: &metav1.LabelSelector{
MatchLabels: van.Controller.Labels,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: van.Controller.Labels,
},
Spec: corev1.PodSpec{
ServiceAccountName: types.ControllerServiceAccountName,
Containers: []corev1.Container{ContainerForController(van.Controller)},
},
},
},
}
dep.Spec.Template.Spec.Volumes = van.Controller.Volumes
dep.Spec.Template.Spec.Containers[0].VolumeMounts = van.Controller.VolumeMounts
created, err := deployments.Create(dep)
if err != nil {
fmt.Println("Failed to create controller deployment: ", err.Error())
return nil
} else {
return created
}
} else {
dep := &appsv1.Deployment{}
fmt.Println("Failed to check controller deployment: ", err.Error())
return dep
}
return nil
}
func NewTransportDeployment(van *types.VanRouterSpec, cli *kubernetes.Clientset) *appsv1.Deployment {
deployments := cli.AppsV1().Deployments(van.Namespace)
existing, err := deployments.Get(types.TransportDeploymentName, metav1.GetOptions{})
if err == nil {
fmt.Println("VAN site transport already exists")
return existing
} else if errors.IsNotFound(err) {
dep := &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: types.TransportDeploymentName,
Namespace: van.Namespace,
},
Spec: appsv1.DeploymentSpec{
Replicas: &van.Transport.Replicas,
Selector: &metav1.LabelSelector{
MatchLabels: van.Transport.Labels,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: van.Transport.Labels,
Annotations: van.Transport.Annotations,
},
Spec: corev1.PodSpec{
ServiceAccountName: types.TransportServiceAccountName,
Containers: []corev1.Container{ContainerForTransport(van.Transport)},
},
},
},
}
dep.Spec.Template.Spec.Volumes = van.Transport.Volumes
dep.Spec.Template.Spec.Containers[0].VolumeMounts = van.Transport.VolumeMounts
created, err := deployments.Create(dep)
if err != nil {
fmt.Println("Failed to create transport deployment: ", err.Error())
return nil
} else {
return created
}
} else {
dep := &appsv1.Deployment{}
fmt.Println("Failed to check transport deployment: ", err.Error())
return dep
}
return nil
}
| [
"\"PROXY_IMAGE\"",
"\"PROXY_IMAGE\"",
"\"PROXY_IMAGE\"",
"\"PROXY_IMAGE\""
]
| []
| [
"PROXY_IMAGE"
]
| [] | ["PROXY_IMAGE"] | go | 1 | 0 | |
orch/docker/docker.go | package docker
import (
"fmt"
"io/ioutil"
"os"
"strconv"
"github.com/Sirupsen/logrus"
"github.com/pkg/errors"
"github.com/urfave/cli"
"golang.org/x/net/context"
dTypes "github.com/docker/docker/api/types"
dCli "github.com/docker/docker/client"
"github.com/rancher/longhorn-manager/api"
"github.com/rancher/longhorn-manager/kvstore"
"github.com/rancher/longhorn-manager/orch"
"github.com/rancher/longhorn-manager/scheduler"
"github.com/rancher/longhorn-manager/types"
"github.com/rancher/longhorn-manager/util"
)
const (
cfgDirectory = "/var/lib/rancher/longhorn/"
hostUUIDFile = cfgDirectory + ".physical_host_uuid"
)
type dockerOrc struct {
EngineImage string
Network string
IP string
currentHost *types.HostInfo
kv *kvstore.KVStore
cli *dCli.Client
scheduler types.Scheduler
}
type dockerOrcConfig struct {
servers []string
prefix string
image string
network string
}
func New(c *cli.Context) (types.Orchestrator, error) {
servers := c.StringSlice("etcd-servers")
if len(servers) == 0 {
return nil, fmt.Errorf("Unspecified etcd servers")
}
prefix := c.String("etcd-prefix")
image := c.String(orch.EngineImageParam)
network := c.String("docker-network")
return newDocker(&dockerOrcConfig{
servers: servers,
prefix: prefix,
image: image,
network: network,
})
}
func newDocker(cfg *dockerOrcConfig) (types.Orchestrator, error) {
etcdBackend, err := kvstore.NewETCDBackend(cfg.servers)
if err != nil {
return nil, err
}
kvStore, err := kvstore.NewKVStore(cfg.prefix, etcdBackend)
if err != nil {
return nil, err
}
docker := &dockerOrc{
EngineImage: cfg.image,
kv: kvStore,
}
docker.scheduler = scheduler.NewOrcScheduler(docker)
//Set Docker API to compatible with 1.12
os.Setenv("DOCKER_API_VERSION", "1.24")
docker.cli, err = dCli.NewEnvClient()
if err != nil {
return nil, errors.Wrap(err, "cannot connect to docker")
}
if _, err := docker.cli.ContainerList(context.Background(), dTypes.ContainerListOptions{}); err != nil {
return nil, errors.Wrap(err, "cannot pass test to get container list")
}
if err = docker.updateNetwork(cfg.network); err != nil {
return nil, errors.Wrapf(err, "fail to detect dedicated container network: %v", cfg.network)
}
logrus.Infof("Detected network is %s, IP is %s", docker.Network, docker.IP)
address := docker.IP + ":" + strconv.Itoa(api.DefaultPort)
logrus.Info("Local address is: ", address)
if err := docker.Register(address); err != nil {
return nil, err
}
logrus.Info("Docker orchestrator is ready")
return docker, nil
}
func getCurrentHost(address string) (*types.HostInfo, error) {
var err error
host := &types.HostInfo{
Address: address,
}
host.Name, err = os.Hostname()
if err != nil {
return nil, err
}
uuid, err := ioutil.ReadFile(hostUUIDFile)
if err == nil {
host.UUID = string(uuid)
return host, nil
}
// file doesn't exists, generate new UUID for the host
host.UUID = util.UUID()
if err := os.MkdirAll(cfgDirectory, os.ModeDir|0600); err != nil {
return nil, fmt.Errorf("Fail to create configuration directory: %v", err)
}
if err := ioutil.WriteFile(hostUUIDFile, []byte(host.UUID), 0600); err != nil {
return nil, fmt.Errorf("Fail to write host uuid file: %v", err)
}
return host, nil
}
func (d *dockerOrc) updateNetwork(userSpecifiedNetwork string) error {
containerID := os.Getenv("HOSTNAME")
inspectJSON, err := d.cli.ContainerInspect(context.Background(), containerID)
if err != nil {
return errors.Errorf("cannot find manager container, may not be running inside container")
}
networks := inspectJSON.NetworkSettings.Networks
if len(networks) == 0 {
return errors.Errorf("cannot find manager container's network")
}
if userSpecifiedNetwork != "" {
net := networks[userSpecifiedNetwork]
if net == nil {
return errors.Errorf("user specified network %v doesn't exist", userSpecifiedNetwork)
}
d.Network = userSpecifiedNetwork
d.IP = net.IPAddress
return nil
}
if len(networks) > 1 {
return errors.Errorf("found multiple networks for container %v, "+
"unable to decide which one to use, "+
"use --docker-network option to specify: %+v", containerID, networks)
}
// only one entry here
for k, v := range networks {
d.Network = k
d.IP = v.IPAddress
}
return nil
}
func (d *dockerOrc) Register(address string) error {
currentHost, err := getCurrentHost(address)
if err != nil {
return err
}
if err := d.kv.SetHost(currentHost); err != nil {
return err
}
d.currentHost = currentHost
return nil
}
func (d *dockerOrc) GetHost(id string) (*types.HostInfo, error) {
return d.kv.GetHost(id)
}
func (d *dockerOrc) ListHosts() (map[string]*types.HostInfo, error) {
return d.kv.ListHosts()
}
func (d *dockerOrc) GetCurrentHostID() string {
return d.currentHost.UUID
}
func (d *dockerOrc) GetAddress(hostID string) (string, error) {
if hostID == d.currentHost.UUID {
return d.currentHost.Address, nil
}
host, err := d.GetHost(hostID)
if err != nil {
return "", err
}
return host.Address, nil
}
func (d *dockerOrc) CreateVolume(volume *types.VolumeInfo) (*types.VolumeInfo, error) {
v, err := d.kv.GetVolumeBase(volume.Name)
if err == nil && v != nil {
return nil, errors.Errorf("volume %v already exists %+v", volume.Name, v)
}
if err := d.kv.SetVolumeBase(volume); err != nil {
return nil, errors.Wrap(err, "fail to create new volume metadata")
}
return volume, nil
}
func (d *dockerOrc) DeleteVolume(volumeName string) error {
return d.kv.DeleteVolume(volumeName)
}
func (d *dockerOrc) GetVolume(volumeName string) (*types.VolumeInfo, error) {
return d.kv.GetVolume(volumeName)
}
func (d *dockerOrc) UpdateVolume(volume *types.VolumeInfo) error {
v, err := d.kv.GetVolumeBase(volume.Name)
if err != nil {
return errors.Errorf("cannot update volume %v because it doesn't exists %+v", volume.Name, v)
}
return d.kv.SetVolumeBase(volume)
}
func (d *dockerOrc) ListVolumes() ([]*types.VolumeInfo, error) {
return d.kv.ListVolumes()
}
func (d *dockerOrc) MarkBadReplica(volumeName string, replica *types.ReplicaInfo) error {
v, err := d.kv.GetVolume(volumeName)
if err != nil {
return errors.Wrap(err, "fail to mark bad replica, cannot get volume")
}
for k, r := range v.Replicas {
if r.Name == replica.Name {
r.BadTimestamp = util.Now()
v.Replicas[k] = r
break
}
}
if err := d.UpdateVolume(v); err != nil {
return errors.Wrap(err, "fail to mark bad replica, cannot update volume")
}
return nil
}
func (d *dockerOrc) GetSettings() (*types.SettingsInfo, error) {
settings, err := d.kv.GetSettings()
if err != nil {
return nil, err
}
if settings == nil {
return &types.SettingsInfo{
BackupTarget: "",
EngineImage: d.EngineImage,
}, nil
}
return settings, nil
}
func (d *dockerOrc) SetSettings(settings *types.SettingsInfo) error {
return d.kv.SetSettings(settings)
}
func (d *dockerOrc) Scheduler() types.Scheduler {
return d.scheduler
}
| [
"\"HOSTNAME\""
]
| []
| [
"HOSTNAME"
]
| [] | ["HOSTNAME"] | go | 1 | 0 | |
main.go | package main
import (
"RTIW/RTIW"
"RTIW/RTIW/Materials"
"RTIW/RTIW/Shapes"
"RTIW/RTIW/Utils"
"flag"
"image"
"image/png"
"log"
"math"
"math/rand"
"os"
"runtime/pprof"
"time"
"github.com/engoengine/glm"
)
func ComputeColor(ray *RTIW.Ray, surfaces *RTIW.Surfaces, depth int, r *rand.Rand) glm.Vec3 {
hitRecord := RTIW.HitRecord{}
if surfaces.Hit(ray, 0.001, math.MaxFloat32, &hitRecord) {
scattered := RTIW.Ray{}
attenuation := glm.Vec3{}
if depth < 50 && hitRecord.Material.Scatter(ray, &hitRecord, &attenuation, &scattered, r) {
c := ComputeColor(&scattered, surfaces, depth+1, r)
return glm.Vec3{attenuation[0] * c[0], attenuation[1] * c[1], attenuation[2] * c[2]}
}
return glm.Vec3{}
}
unitDirection := ray.Direction.Normalized()
t := 0.5 * (unitDirection.Y() + 1.0)
interpA := glm.Vec3{1.0, 1.0, 1.0}
interpB := glm.Vec3{0.5, 0.7, 1.0}
computed := interpA.Mul(1.0 - t)
computed.AddScaledVec(t, &interpB)
return computed
}
func RandomScene(r *rand.Rand) *RTIW.Surfaces {
scene := RTIW.Surfaces{}
scene.Add(Shapes.NewSphere(glm.Vec3{0, -1000, 0}, 1000, Materials.NewLambertian(glm.Vec3{0.5, 0.5, 0.5})))
limit := glm.Vec3{4, 0.2, 0}
for a := -11; a < 11; a++ {
for b := -11; b < 11; b++ {
chooseMaterial := r.Float32()
center := glm.Vec3{float32(a) + 0.9*r.Float32(), 0.2, float32(b) + 0.9*r.Float32()}
dist := center.Sub(&limit)
if dist.Len() > 0.9 { //Diffuse
if chooseMaterial < 0.8 {
lamb := glm.Vec3{r.Float32() * r.Float32(), r.Float32() * r.Float32(), r.Float32() * r.Float32()}
scene.Add(Shapes.NewSphere(center, 0.2, Materials.NewLambertian(lamb)))
} else if chooseMaterial < 0.95 { //Metal
metal := glm.Vec3{0.5 * (1 + r.Float32()), 0.5 * (1 + r.Float32()), 0.5 * (1 + r.Float32())}
scene.Add(Shapes.NewSphere(center, 0.2, Materials.NewMetal(metal, 0.5*r.Float32())))
} else { //Glass
scene.Add(Shapes.NewSphere(center, 0.2, Materials.NewDieletric(1.5)))
}
}
}
}
scene.Add(Shapes.NewSphere(glm.Vec3{0, 1, 0}, 1.0, Materials.NewDieletric(1.5)))
scene.Add(Shapes.NewSphere(glm.Vec3{-4, 1, 0}, 1.0, Materials.NewLambertian(glm.Vec3{0.4, 0.2, 0.1})))
scene.Add(Shapes.NewSphere(glm.Vec3{4, 1, 0}, 1.0, Materials.NewMetal(glm.Vec3{0.7, 0.6, 0.5}, 0.0)))
return &scene
}
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to file")
func main() {
flag.Parse()
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
log.Fatal(err)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
file, err := os.Create("output.png")
if err != nil {
log.Fatal("error creating ouput file: ", err)
}
defer file.Close()
nx := 1200
ny := 800
ns := 100
//Camera setup
origin := glm.Vec3{13, 2, 3}
lookAt := glm.Vec3{0, 0, 0}
distToFocus := float32(10.0)
aperture := float32(0.1)
camera := RTIW.NewCamera(
origin, //Origin
lookAt, //LookAt
glm.Vec3{0, 1, 0}, //Up
20, //FOV
float32(nx)/float32(ny), //Aspect
aperture, //Aperture
distToFocus, //Distance to focus
)
output := image.NewRGBA(image.Rect(0, 0, nx, ny))
r := rand.New(rand.NewSource(time.Now().Unix()))
surfaces := RandomScene(r)
for j := 0; j < ny; j++ {
for i := 0; i < nx; i++ {
acc := glm.Vec3{}
for s := 0; s < ns; s++ {
u := (float32(i) + r.Float32()) / float32(nx)
v := (float32(j) + r.Float32()) / float32(ny)
ray := camera.GetRay(u, v, r)
color := ComputeColor(&ray, surfaces, 0, r)
acc.AddWith(&color)
}
acc.MulWith(1 / float32(ns))
acc = glm.Vec3{
float32(math.Sqrt(float64(acc.X()))),
float32(math.Sqrt(float64(acc.Y()))),
float32(math.Sqrt(float64(acc.Z()))),
}
c := Utils.ColorRGBAFromVec3(acc)
output.SetRGBA(i, ny-j, c)
}
}
err = png.Encode(file, output)
if err != nil {
log.Fatal("error enconding jpg: ", err)
}
}
| []
| []
| []
| [] | [] | go | null | null | null |
template/faas-flow/handler.go | package main
import (
"bytes"
"fmt"
"handler/function"
"io/ioutil"
"log"
"net/http"
"os"
"regexp"
"strings"
faasflow "github.com/s8sg/faas-flow"
sdk "github.com/s8sg/faas-flow/sdk"
executor "github.com/s8sg/faas-flow/sdk/executor"
exporter "github.com/s8sg/faas-flow/sdk/exporter"
)
const (
// A signature of SHA265 equivalent of "github.com/s8sg/faas-flow"
defaultHmacKey = "71F1D3011F8E6160813B4997BA29856744375A7F26D427D491E1CCABD4627E7C"
counterUpdateRetryCount = 10
)
var (
re = regexp.MustCompile(`(?m)^[^:.]+\s*`)
)
// implements faasflow.EventHandler
type openFaasEventHandler struct {
currentNodeId string // used to inject current node id in tracer
tracer *traceHandler // handle traces with open-tracing
flowName string
header http.Header
}
// implements faasflow.Logger
type openFaasLogger struct{}
// implements faasflow.Executor + RequestHandler
type openFaasExecutor struct {
gateway string
asyncUrl string // the async URL of the flow
flowName string // the name of the function
reqId string // the request id
callbackUrl string // the callback url
partialState []byte
rawRequest *executor.RawRequest
defaultStateStore sdk.StateStore
openFaasEventHandler
openFaasLogger
}
// Logger
func (logger *openFaasLogger) Configure(flowName string, requestId string) {}
func (logger *openFaasLogger) Init() error {
return nil
}
func (logger *openFaasLogger) Log(str string) {
fmt.Print(str)
}
// EventHandler
func (eh *openFaasEventHandler) Configure(flowName string, requestId string) {
eh.flowName = flowName
}
func (eh *openFaasEventHandler) Init() error {
var err error
// initialize trace server if tracing enabled
eh.tracer, err = initRequestTracer(eh.flowName)
if err != nil {
return fmt.Errorf("failed to init request tracer, error %v", err)
}
return nil
}
func (eh *openFaasEventHandler) ReportRequestStart(requestId string) {
eh.tracer.startReqSpan(requestId)
}
func (eh *openFaasEventHandler) ReportRequestFailure(requestId string, err error) {
// TODO: add log
eh.tracer.stopReqSpan()
}
func (eh *openFaasEventHandler) ReportExecutionForward(currentNodeId string, requestId string) {
eh.currentNodeId = currentNodeId
}
func (eh *openFaasEventHandler) ReportExecutionContinuation(requestId string) {
eh.tracer.continueReqSpan(requestId, eh.header)
}
func (eh *openFaasEventHandler) ReportRequestEnd(requestId string) {
eh.tracer.stopReqSpan()
}
func (eh *openFaasEventHandler) ReportNodeStart(nodeId string, requestId string) {
eh.tracer.startNodeSpan(nodeId, requestId)
}
func (eh *openFaasEventHandler) ReportNodeEnd(nodeId string, requestId string) {
eh.tracer.stopNodeSpan(nodeId)
}
func (eh *openFaasEventHandler) ReportNodeFailure(nodeId string, requestId string, err error) {
// TODO: add log
eh.tracer.stopNodeSpan(nodeId)
}
func (eh *openFaasEventHandler) ReportOperationStart(operationId string, nodeId string, requestId string) {
eh.tracer.startOperationSpan(nodeId, requestId, operationId)
}
func (eh *openFaasEventHandler) ReportOperationEnd(operationId string, nodeId string, requestId string) {
eh.tracer.stopOperationSpan(nodeId, operationId)
}
func (eh *openFaasEventHandler) ReportOperationFailure(operationId string, nodeId string, requestId string, err error) {
// TODO: add log
eh.tracer.stopOperationSpan(nodeId, operationId)
}
func (eh *openFaasEventHandler) Flush() {
eh.tracer.flushTracer()
}
// ExecutionRuntime
func (of *openFaasExecutor) HandleNextNode(partial *executor.PartialState) error {
state, err := partial.Encode()
if err != nil {
return fmt.Errorf("failed to encode partial state, error %v", err)
}
// build url for calling the flow in async
httpreq, _ := http.NewRequest(http.MethodPost, of.asyncUrl, bytes.NewReader(state))
httpreq.Header.Add("Accept", "application/json")
httpreq.Header.Add("Content-Type", "application/json")
httpreq.Header.Add("X-Faas-Flow-Reqid", of.reqId)
httpreq.Header.Set("X-Faas-Flow-State", "partial")
httpreq.Header.Set("X-Faas-Flow-Callback-Url", of.callbackUrl)
// extend req span for async call
if of.MonitoringEnabled() {
of.tracer.extendReqSpan(of.reqId, of.openFaasEventHandler.currentNodeId,
of.asyncUrl, httpreq)
}
client := &http.Client{}
res, resErr := client.Do(httpreq)
if resErr != nil {
return resErr
}
defer res.Body.Close()
resdata, _ := ioutil.ReadAll(res.Body)
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusAccepted {
return fmt.Errorf("%d: %s", res.StatusCode, string(resdata))
}
return nil
}
func (of *openFaasExecutor) GetExecutionOption(operation sdk.Operation) map[string]interface{} {
options := make(map[string]interface{})
options["gateway"] = of.gateway
options["request-id"] = of.reqId
return options
}
func (of *openFaasExecutor) HandleExecutionCompletion(data []byte) error {
if of.callbackUrl == "" {
return nil
}
log.Printf("calling callback url (%s) with result", of.callbackUrl)
httpreq, _ := http.NewRequest(http.MethodPost, of.callbackUrl, bytes.NewReader(data))
httpreq.Header.Add("X-Faas-Flow-Reqid", of.reqId)
client := &http.Client{}
res, resErr := client.Do(httpreq)
if resErr != nil {
return resErr
}
defer res.Body.Close()
resdata, _ := ioutil.ReadAll(res.Body)
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusAccepted {
return fmt.Errorf("failed to call callback %d: %s", res.StatusCode, string(resdata))
}
return nil
}
// Executor
func (of *openFaasExecutor) Configure(requestId string) {
of.reqId = requestId
}
func (of *openFaasExecutor) GetFlowName() string {
return of.flowName
}
func (of *openFaasExecutor) GetFlowDefinition(pipeline *sdk.Pipeline, context *sdk.Context) error {
workflow := faasflow.GetWorkflow(pipeline)
faasflowContext := (*faasflow.Context)(context)
err := function.Define(workflow, faasflowContext)
return err
}
func (of *openFaasExecutor) ReqValidationEnabled() bool {
status := true
hmacStatus := os.Getenv("validate_request")
if strings.ToUpper(hmacStatus) == "FALSE" {
status = false
}
return status
}
func (of *openFaasExecutor) GetValidationKey() (string, error) {
key, keyErr := readSecret("faasflow-hmac-secret")
if keyErr != nil {
key = defaultHmacKey
}
return key, nil
}
func (of *openFaasExecutor) ReqAuthEnabled() bool {
status := false
verifyStatus := os.Getenv("authenticate_request")
if strings.ToUpper(verifyStatus) == "TRUE" {
status = true
}
return status
}
func (of *openFaasExecutor) GetReqAuthKey() (string, error) {
key, keyErr := readSecret("faasflow-hmac-secret")
return key, keyErr
}
func (of *openFaasExecutor) MonitoringEnabled() bool {
tracing := os.Getenv("enable_tracing")
if strings.ToUpper(tracing) == "TRUE" {
return true
}
return false
}
func (of *openFaasExecutor) GetEventHandler() (sdk.EventHandler, error) {
return &of.openFaasEventHandler, nil
}
func (of *openFaasExecutor) LoggingEnabled() bool {
return true
}
func (of *openFaasExecutor) GetLogger() (sdk.Logger, error) {
return &of.openFaasLogger, nil
}
func (of *openFaasExecutor) GetStateStore() (sdk.StateStore, error) {
stateStore, err := function.DefineStateStore()
if err != nil {
return stateStore, err
}
if stateStore == nil {
stateStore = of.defaultStateStore
log.Print("Warning: using default StateStore, distributed async call will not work properly")
}
return stateStore, nil
}
func (of *openFaasExecutor) GetDataStore() (sdk.DataStore, error) {
stateStore, err := function.DefineDataStore()
return stateStore, err
}
// internal
func (of *openFaasExecutor) init(req *HttpRequest) error {
of.gateway = getGateway()
of.flowName = getWorkflowNameFromHost(req.Host)
if of.flowName == "" {
return fmt.Errorf("failed to parse workflow name from host")
}
of.asyncUrl = buildURL("http://"+of.gateway, "async-function", of.flowName)
if of.MonitoringEnabled() {
var err error
// initialize trace server if tracing enabled
of.openFaasEventHandler.tracer, err = initRequestTracer(of.flowName)
if err != nil {
return fmt.Errorf("failed to init request tracer, error %v", err)
}
}
return nil
}
// Handle handle requests to flow function
func (of *openFaasExecutor) Handle(req *HttpRequest, response *HttpResponse) error {
err := of.init(req)
if err != nil {
return err
}
notifyChan := make(chan string, 1)
defer close(notifyChan)
switch {
case isDagExportRequest(req):
flowExporter := exporter.CreateFlowExporter(of)
resp, err := flowExporter.Export()
if err != nil {
return fmt.Errorf("failed to export dag, error %v", err)
}
response.Body = resp
case getStopRequestId(req) != "":
requestId := getStopRequestId(req)
flowExecutor := executor.CreateFlowExecutor(of, notifyChan)
err := flowExecutor.Stop(requestId)
if err != nil {
log.Printf(err.Error())
return fmt.Errorf("failed to stop request " + requestId + ", check if request is active")
}
response.Body = []byte("Successfully stopped request " + requestId)
case getPauseRequestId(req) != "":
requestId := getPauseRequestId(req)
flowExecutor := executor.CreateFlowExecutor(of, notifyChan)
err := flowExecutor.Pause(requestId)
if err != nil {
log.Printf(err.Error())
return fmt.Errorf("failed to pause request " + requestId + ", check if request is active")
}
response.Body = []byte("Successfully paused request " + requestId)
case getResumeRequestId(req) != "":
requestId := getResumeRequestId(req)
flowExecutor := executor.CreateFlowExecutor(of, notifyChan)
err := flowExecutor.Resume(requestId)
if err != nil {
log.Printf(err.Error())
return fmt.Errorf("failed to resume request " + requestId + ", check if request is active")
}
response.Body = []byte("Successfully resumed request " + requestId)
default:
var stateOption executor.ExecutionStateOption
requestId := req.Header.Get("X-Faas-Flow-Reqid")
state := req.Header.Get("X-Faas-Flow-State")
of.callbackUrl = req.Header.Get("X-Faas-Flow-Callback-Url")
if state == "" {
rawRequest := &executor.RawRequest{}
rawRequest.Data = req.Body
rawRequest.Query = req.QueryString
rawRequest.AuthSignature = req.Header.Get("X-Hub-Signature")
// Check if any request Id is passed
if requestId != "" {
rawRequest.RequestId = requestId
}
stateOption = executor.NewRequest(rawRequest)
} else {
if requestId == "" {
return fmt.Errorf("request ID not set in partial request")
}
of.openFaasEventHandler.header = req.Header
partialState, err := executor.DecodePartialReq(req.Body)
if err != nil {
log.Printf(err.Error())
return fmt.Errorf("failed to decode partial state")
}
stateOption = executor.PartialRequest(partialState)
}
// Create a flow executor, OpenFaaSExecutor implements executor
flowExecutor := executor.CreateFlowExecutor(of, notifyChan)
resp, err := flowExecutor.Execute(stateOption)
if err != nil {
log.Printf(err.Error())
return fmt.Errorf("failed to execute request")
}
response.Body = resp
response.Header.Set("X-Faas-Flow-Reqid", of.reqId)
response.Header.Set("X-Faas-Flow-Callback-Url", of.callbackUrl)
}
response.StatusCode = http.StatusOK
return nil
}
| [
"\"validate_request\"",
"\"authenticate_request\"",
"\"enable_tracing\""
]
| []
| [
"authenticate_request",
"validate_request",
"enable_tracing"
]
| [] | ["authenticate_request", "validate_request", "enable_tracing"] | go | 3 | 0 | |
Day9_Building_and_Deploying_DL_using_Tensorflow/05/visualize_training final.py | import os
import tensorflow as tf
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
# Turn off TensorFlow warning messages in program output
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Load training data set from CSV file
training_data_df = pd.read_csv("sales_data_training.csv", dtype=float)
# Pull out columns for X (data to train with) and Y (value to predict)
X_training = training_data_df.drop('total_earnings', axis=1).values
Y_training = training_data_df[['total_earnings']].values
# Load testing data set from CSV file
test_data_df = pd.read_csv("sales_data_test.csv", dtype=float)
# Pull out columns for X (data to train with) and Y (value to predict)
X_testing = test_data_df.drop('total_earnings', axis=1).values
Y_testing = test_data_df[['total_earnings']].values
# All data needs to be scaled to a small range like 0 to 1 for the neural
# network to work well. Create scalers for the inputs and outputs.
X_scaler = MinMaxScaler(feature_range=(0, 1))
Y_scaler = MinMaxScaler(feature_range=(0, 1))
# Scale both the training inputs and outputs
X_scaled_training = X_scaler.fit_transform(X_training)
Y_scaled_training = Y_scaler.fit_transform(Y_training)
# It's very important that the training and test data are scaled with the same scaler.
X_scaled_testing = X_scaler.transform(X_testing)
Y_scaled_testing = Y_scaler.transform(Y_testing)
# Define model parameters
RUN_NAME = "run 2 with 20 nodes"
learning_rate = 0.001
training_epochs = 100
# Define how many inputs and outputs are in our neural network
number_of_inputs = 9
number_of_outputs = 1
# Define how many neurons we want in each layer of our neural network
layer_1_nodes = 20
layer_2_nodes = 100
layer_3_nodes = 50
# Section One: Define the layers of the neural network itself
# Input Layer
with tf.variable_scope('input'):
X = tf.placeholder(tf.float32, shape=(None, number_of_inputs), name="X")
# Layer 1
with tf.variable_scope('layer_1'):
weights = tf.get_variable("weights1", shape=[number_of_inputs, layer_1_nodes], initializer=tf.contrib.layers.xavier_initializer())
biases = tf.get_variable(name="biases1", shape=[layer_1_nodes], initializer=tf.zeros_initializer())
layer_1_output = tf.nn.relu(tf.matmul(X, weights) + biases)
# Layer 2
with tf.variable_scope('layer_2'):
weights = tf.get_variable("weights2", shape=[layer_1_nodes, layer_2_nodes], initializer=tf.contrib.layers.xavier_initializer())
biases = tf.get_variable(name="biases2", shape=[layer_2_nodes], initializer=tf.zeros_initializer())
layer_2_output = tf.nn.relu(tf.matmul(layer_1_output, weights) + biases)
# Layer 3
with tf.variable_scope('layer_3'):
weights = tf.get_variable("weights3", shape=[layer_2_nodes, layer_3_nodes], initializer=tf.contrib.layers.xavier_initializer())
biases = tf.get_variable(name="biases3", shape=[layer_3_nodes], initializer=tf.zeros_initializer())
layer_3_output = tf.nn.relu(tf.matmul(layer_2_output, weights) + biases)
# Output Layer
with tf.variable_scope('output'):
weights = tf.get_variable("weights4", shape=[layer_3_nodes, number_of_outputs], initializer=tf.contrib.layers.xavier_initializer())
biases = tf.get_variable(name="biases4", shape=[number_of_outputs], initializer=tf.zeros_initializer())
prediction = tf.matmul(layer_3_output, weights) + biases
# Section Two: Define the cost function of the neural network that will be optimized during training
with tf.variable_scope('cost'):
Y = tf.placeholder(tf.float32, shape=(None, 1), name="Y")
cost = tf.reduce_mean(tf.squared_difference(prediction, Y))
# Section Three: Define the optimizer function that will be run to optimize the neural network
with tf.variable_scope('train'):
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# Create a summary operation to log the progress of the network
with tf.variable_scope('logging'):
tf.summary.scalar('current_cost', cost)
summary = tf.summary.merge_all()
# Initialize a session so that we can run TensorFlow operations
with tf.Session() as session:
# Run the global variable initializer to initialize all variables and layers of the neural network
session.run(tf.global_variables_initializer())
# Create log file writers to record training progress.
# We'll store training and testing log data separately.
training_writer = tf.summary.FileWriter("./logs/{}/training".format(RUN_NAME), session.graph)
testing_writer = tf.summary.FileWriter("./logs/{}/testing".format(RUN_NAME), session.graph)
# Run the optimizer over and over to train the network.
# One epoch is one full run through the training data set.
for epoch in range(training_epochs):
# Feed in the training data and do one step of neural network training
session.run(optimizer, feed_dict={X: X_scaled_training, Y: Y_scaled_training})
# Every few training steps, log our progress
if epoch % 5 == 0:
# Get the current accuracy scores by running the "cost" operation on the training and test data sets
training_cost, training_summary = session.run([cost, summary], feed_dict={X: X_scaled_training, Y:Y_scaled_training})
testing_cost, testing_summary = session.run([cost, summary], feed_dict={X: X_scaled_testing, Y:Y_scaled_testing})
# Write the current training status to the log files (Which we can view with TensorBoard)
training_writer.add_summary(training_summary, epoch)
testing_writer.add_summary(testing_summary, epoch)
# Print the current training status to the screen
print("Epoch: {} - Training Cost: {} Testing Cost: {}".format(epoch, training_cost, testing_cost))
# Training is now complete!
# Get the final accuracy scores by running the "cost" operation on the training and test data sets
final_training_cost = session.run(cost, feed_dict={X: X_scaled_training, Y: Y_scaled_training})
final_testing_cost = session.run(cost, feed_dict={X: X_scaled_testing, Y: Y_scaled_testing})
print("Final Training cost: {}".format(final_training_cost))
print("Final Testing cost: {}".format(final_testing_cost))
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
router.go | // Copyright 2014 beego Author. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package beego
import (
"fmt"
"net/http"
"os"
"path"
"path/filepath"
"reflect"
// "runtime"
"strconv"
"strings"
"sync"
"time"
cc "context"
beecontext "github.com/tomasprotolog/beego/context"
"github.com/tomasprotolog/beego/logs"
"github.com/tomasprotolog/beego/toolbox"
"github.com/tomasprotolog/beego/utils"
)
// default filter execution points
const (
BeforeStatic = iota
BeforeRouter
BeforeExec
AfterExec
FinishRouter
)
const (
routerTypeBeego = iota
routerTypeRESTFul
routerTypeHandler
)
var (
// HTTPMETHOD list the supported http methods.
HTTPMETHOD = map[string]string{
"GET": "GET",
"POST": "POST",
"PUT": "PUT",
"DELETE": "DELETE",
"PATCH": "PATCH",
"OPTIONS": "OPTIONS",
"HEAD": "HEAD",
"TRACE": "TRACE",
"CONNECT": "CONNECT",
"MKCOL": "MKCOL",
"COPY": "COPY",
"MOVE": "MOVE",
"PROPFIND": "PROPFIND",
"PROPPATCH": "PROPPATCH",
"LOCK": "LOCK",
"UNLOCK": "UNLOCK",
}
// these beego.Controller's methods shouldn't reflect to AutoRouter
exceptMethod = []string{"Init", "Prepare", "Finish", "Render", "RenderString",
"RenderBytes", "Redirect", "Abort", "StopRun", "UrlFor", "ServeJSON", "ServeJSONP",
"ServeXML", "Input", "ParseForm", "GetString", "GetStrings", "GetInt", "GetBool",
"GetFloat", "GetFile", "SaveToFile", "StartSession", "SetSession", "GetSession",
"DelSession", "SessionRegenerateID", "DestroySession", "IsAjax", "GetSecureCookie",
"SetSecureCookie", "XsrfToken", "CheckXsrfCookie", "XsrfFormHtml",
"GetControllerAndAction", "ServeFormatted"}
urlPlaceholder = "{{placeholder}}"
// DefaultAccessLogFilter will skip the accesslog if return true
DefaultAccessLogFilter FilterHandler = &logFilter{}
)
// FilterHandler is an interface for
type FilterHandler interface {
Filter(*beecontext.Context) bool
}
// default log filter static file will not show
type logFilter struct {
}
func (l *logFilter) Filter(ctx *beecontext.Context) bool {
requestPath := path.Clean(ctx.Request.URL.Path)
if requestPath == "/favicon.ico" || requestPath == "/robots.txt" {
return true
}
for prefix := range BConfig.WebConfig.StaticDir {
if strings.HasPrefix(requestPath, prefix) {
return true
}
}
return false
}
// ExceptMethodAppend to append a slice's value into "exceptMethod", for controller's methods shouldn't reflect to AutoRouter
func ExceptMethodAppend(action string) {
exceptMethod = append(exceptMethod, action)
}
type controllerInfo struct {
pattern string
controllerType reflect.Type
methods map[string]string
handler http.Handler
runFunction FilterFunc
routerType int
}
// ControllerRegister containers registered router rules, controller handlers and filters.
type ControllerRegister struct {
routers map[string]*Tree
enablePolicy bool
policies map[string]*Tree
enableFilter bool
filters [FinishRouter + 1][]*FilterRouter
pool sync.Pool
}
// NewControllerRegister returns a new ControllerRegister.
func NewControllerRegister() *ControllerRegister {
cr := &ControllerRegister{
routers: make(map[string]*Tree),
policies: make(map[string]*Tree),
}
cr.pool.New = func() interface{} {
return beecontext.NewContext()
}
return cr
}
// Add controller handler and pattern rules to ControllerRegister.
// usage:
// default methods is the same name as method
// Add("/user",&UserController{})
// Add("/api/list",&RestController{},"*:ListFood")
// Add("/api/create",&RestController{},"post:CreateFood")
// Add("/api/update",&RestController{},"put:UpdateFood")
// Add("/api/delete",&RestController{},"delete:DeleteFood")
// Add("/api",&RestController{},"get,post:ApiFunc"
// Add("/simple",&SimpleController{},"get:GetFunc;post:PostFunc")
func (p *ControllerRegister) Add(pattern string, c ControllerInterface, mappingMethods ...string) {
reflectVal := reflect.ValueOf(c)
t := reflect.Indirect(reflectVal).Type()
methods := make(map[string]string)
if len(mappingMethods) > 0 {
semi := strings.Split(mappingMethods[0], ";")
for _, v := range semi {
colon := strings.Split(v, ":")
if len(colon) != 2 {
panic("method mapping format is invalid")
}
comma := strings.Split(colon[0], ",")
for _, m := range comma {
if _, ok := HTTPMETHOD[strings.ToUpper(m)]; m == "*" || ok {
if val := reflectVal.MethodByName(colon[1]); val.IsValid() {
methods[strings.ToUpper(m)] = colon[1]
} else {
panic("'" + colon[1] + "' method doesn't exist in the controller " + t.Name())
}
} else {
panic(v + " is an invalid method mapping. Method doesn't exist " + m)
}
}
}
}
route := &controllerInfo{}
route.pattern = pattern
route.methods = methods
route.routerType = routerTypeBeego
route.controllerType = t
if len(methods) == 0 {
for _, m := range HTTPMETHOD {
p.addToRouter(m, pattern, route)
}
} else {
for k := range methods {
if k == "*" {
for _, m := range HTTPMETHOD {
p.addToRouter(m, pattern, route)
}
} else {
p.addToRouter(k, pattern, route)
}
}
}
}
func (p *ControllerRegister) addToRouter(method, pattern string, r *controllerInfo) {
if !BConfig.RouterCaseSensitive {
pattern = strings.ToLower(pattern)
}
if t, ok := p.routers[method]; ok {
t.AddRouter(pattern, r)
} else {
t := NewTree()
t.AddRouter(pattern, r)
p.routers[method] = t
}
}
// Include only when the Runmode is dev will generate router file in the router/auto.go from the controller
// Include(&BankAccount{}, &OrderController{},&RefundController{},&ReceiptController{})
func (p *ControllerRegister) Include(cList ...ControllerInterface) {
if BConfig.RunMode == DEV {
skip := make(map[string]bool, 10)
for _, c := range cList {
reflectVal := reflect.ValueOf(c)
t := reflect.Indirect(reflectVal).Type()
gopath := os.Getenv("GOPATH")
if gopath == "" {
panic("you are in dev mode. So please set gopath")
}
pkgpath := ""
wgopath := filepath.SplitList(gopath)
for _, wg := range wgopath {
wg, _ = filepath.EvalSymlinks(filepath.Join(wg, "src", t.PkgPath()))
if utils.FileExists(wg) {
pkgpath = wg
break
}
}
if pkgpath != "" {
if _, ok := skip[pkgpath]; !ok {
skip[pkgpath] = true
parserPkg(pkgpath, t.PkgPath())
}
}
}
}
for _, c := range cList {
reflectVal := reflect.ValueOf(c)
t := reflect.Indirect(reflectVal).Type()
key := t.PkgPath() + ":" + t.Name()
if comm, ok := GlobalControllerRouter[key]; ok {
for _, a := range comm {
p.Add(a.Router, c, strings.Join(a.AllowHTTPMethods, ",")+":"+a.Method)
}
}
}
}
// Get add get method
// usage:
// Get("/", func(ctx *context.Context){
// ctx.Output.Body("hello world")
// })
func (p *ControllerRegister) Get(pattern string, f FilterFunc) {
p.AddMethod("get", pattern, f)
}
// Post add post method
// usage:
// Post("/api", func(ctx *context.Context){
// ctx.Output.Body("hello world")
// })
func (p *ControllerRegister) Post(pattern string, f FilterFunc) {
p.AddMethod("post", pattern, f)
}
// Put add put method
// usage:
// Put("/api/:id", func(ctx *context.Context){
// ctx.Output.Body("hello world")
// })
func (p *ControllerRegister) Put(pattern string, f FilterFunc) {
p.AddMethod("put", pattern, f)
}
// Delete add delete method
// usage:
// Delete("/api/:id", func(ctx *context.Context){
// ctx.Output.Body("hello world")
// })
func (p *ControllerRegister) Delete(pattern string, f FilterFunc) {
p.AddMethod("delete", pattern, f)
}
// Head add head method
// usage:
// Head("/api/:id", func(ctx *context.Context){
// ctx.Output.Body("hello world")
// })
func (p *ControllerRegister) Head(pattern string, f FilterFunc) {
p.AddMethod("head", pattern, f)
}
// Patch add patch method
// usage:
// Patch("/api/:id", func(ctx *context.Context){
// ctx.Output.Body("hello world")
// })
func (p *ControllerRegister) Patch(pattern string, f FilterFunc) {
p.AddMethod("patch", pattern, f)
}
// Options add options method
// usage:
// Options("/api/:id", func(ctx *context.Context){
// ctx.Output.Body("hello world")
// })
func (p *ControllerRegister) Options(pattern string, f FilterFunc) {
p.AddMethod("options", pattern, f)
}
// Any add all method
// usage:
// Any("/api/:id", func(ctx *context.Context){
// ctx.Output.Body("hello world")
// })
func (p *ControllerRegister) Any(pattern string, f FilterFunc) {
p.AddMethod("*", pattern, f)
}
// AddMethod add http method router
// usage:
// AddMethod("get","/api/:id", func(ctx *context.Context){
// ctx.Output.Body("hello world")
// })
func (p *ControllerRegister) AddMethod(method, pattern string, f FilterFunc) {
method = strings.ToUpper(method)
if _, ok := HTTPMETHOD[method]; method != "*" && !ok {
panic("not support http method: " + method)
}
route := &controllerInfo{}
route.pattern = pattern
route.routerType = routerTypeRESTFul
route.runFunction = f
methods := make(map[string]string)
if method == "*" {
for _, val := range HTTPMETHOD {
methods[val] = val
}
} else {
methods[method] = method
}
route.methods = methods
for k := range methods {
if k == "*" {
for _, m := range HTTPMETHOD {
p.addToRouter(m, pattern, route)
}
} else {
p.addToRouter(k, pattern, route)
}
}
}
// Handler add user defined Handler
func (p *ControllerRegister) Handler(pattern string, h http.Handler, options ...interface{}) {
route := &controllerInfo{}
route.pattern = pattern
route.routerType = routerTypeHandler
route.handler = h
if len(options) > 0 {
if _, ok := options[0].(bool); ok {
pattern = path.Join(pattern, "?:all(.*)")
}
}
for _, m := range HTTPMETHOD {
p.addToRouter(m, pattern, route)
}
}
// AddAuto router to ControllerRegister.
// example beego.AddAuto(&MainContorlller{}),
// MainController has method List and Page.
// visit the url /main/list to execute List function
// /main/page to execute Page function.
func (p *ControllerRegister) AddAuto(c ControllerInterface) {
p.AddAutoPrefix("/", c)
}
// AddAutoPrefix Add auto router to ControllerRegister with prefix.
// example beego.AddAutoPrefix("/admin",&MainContorlller{}),
// MainController has method List and Page.
// visit the url /admin/main/list to execute List function
// /admin/main/page to execute Page function.
func (p *ControllerRegister) AddAutoPrefix(prefix string, c ControllerInterface) {
reflectVal := reflect.ValueOf(c)
rt := reflectVal.Type()
ct := reflect.Indirect(reflectVal).Type()
controllerName := strings.TrimSuffix(ct.Name(), "Controller")
for i := 0; i < rt.NumMethod(); i++ {
if !utils.InSlice(rt.Method(i).Name, exceptMethod) {
route := &controllerInfo{}
route.routerType = routerTypeBeego
route.methods = map[string]string{"*": rt.Method(i).Name}
route.controllerType = ct
pattern := path.Join(prefix, strings.ToLower(controllerName), strings.ToLower(rt.Method(i).Name), "*")
patternInit := path.Join(prefix, controllerName, rt.Method(i).Name, "*")
patternFix := path.Join(prefix, strings.ToLower(controllerName), strings.ToLower(rt.Method(i).Name))
patternFixInit := path.Join(prefix, controllerName, rt.Method(i).Name)
route.pattern = pattern
for _, m := range HTTPMETHOD {
p.addToRouter(m, pattern, route)
p.addToRouter(m, patternInit, route)
p.addToRouter(m, patternFix, route)
p.addToRouter(m, patternFixInit, route)
}
}
}
}
// InsertFilter Add a FilterFunc with pattern rule and action constant.
// params is for:
// 1. setting the returnOnOutput value (false allows multiple filters to execute)
// 2. determining whether or not params need to be reset.
func (p *ControllerRegister) InsertFilter(pattern string, pos int, filter FilterFunc, params ...bool) error {
mr := &FilterRouter{
tree: NewTree(),
pattern: pattern,
filterFunc: filter,
returnOnOutput: true,
}
if !BConfig.RouterCaseSensitive {
mr.pattern = strings.ToLower(pattern)
}
paramsLen := len(params)
if paramsLen > 0 {
mr.returnOnOutput = params[0]
}
if paramsLen > 1 {
mr.resetParams = params[1]
}
mr.tree.AddRouter(pattern, true)
return p.insertFilterRouter(pos, mr)
}
// add Filter into
func (p *ControllerRegister) insertFilterRouter(pos int, mr *FilterRouter) (err error) {
if pos < BeforeStatic || pos > FinishRouter {
err = fmt.Errorf("can not find your filter position")
return
}
p.enableFilter = true
p.filters[pos] = append(p.filters[pos], mr)
return nil
}
// URLFor does another controller handler in this request function.
// it can access any controller method.
func (p *ControllerRegister) URLFor(endpoint string, values ...interface{}) string {
paths := strings.Split(endpoint, ".")
if len(paths) <= 1 {
logs.Warn("urlfor endpoint must like path.controller.method")
return ""
}
if len(values)%2 != 0 {
logs.Warn("urlfor params must key-value pair")
return ""
}
params := make(map[string]string)
if len(values) > 0 {
key := ""
for k, v := range values {
if k%2 == 0 {
key = fmt.Sprint(v)
} else {
params[key] = fmt.Sprint(v)
}
}
}
controllName := strings.Join(paths[:len(paths)-1], "/")
methodName := paths[len(paths)-1]
for m, t := range p.routers {
ok, url := p.geturl(t, "/", controllName, methodName, params, m)
if ok {
return url
}
}
return ""
}
func (p *ControllerRegister) geturl(t *Tree, url, controllName, methodName string, params map[string]string, httpMethod string) (bool, string) {
for _, subtree := range t.fixrouters {
u := path.Join(url, subtree.prefix)
ok, u := p.geturl(subtree, u, controllName, methodName, params, httpMethod)
if ok {
return ok, u
}
}
if t.wildcard != nil {
u := path.Join(url, urlPlaceholder)
ok, u := p.geturl(t.wildcard, u, controllName, methodName, params, httpMethod)
if ok {
return ok, u
}
}
for _, l := range t.leaves {
if c, ok := l.runObject.(*controllerInfo); ok {
if c.routerType == routerTypeBeego &&
strings.HasSuffix(path.Join(c.controllerType.PkgPath(), c.controllerType.Name()), controllName) {
find := false
if _, ok := HTTPMETHOD[strings.ToUpper(methodName)]; ok {
if len(c.methods) == 0 {
find = true
} else if m, ok := c.methods[strings.ToUpper(methodName)]; ok && m == strings.ToUpper(methodName) {
find = true
} else if m, ok = c.methods["*"]; ok && m == methodName {
find = true
}
}
if !find {
for m, md := range c.methods {
if (m == "*" || m == httpMethod) && md == methodName {
find = true
}
}
}
if find {
if l.regexps == nil {
if len(l.wildcards) == 0 {
return true, strings.Replace(url, "/"+urlPlaceholder, "", 1) + toURL(params)
}
if len(l.wildcards) == 1 {
if v, ok := params[l.wildcards[0]]; ok {
delete(params, l.wildcards[0])
return true, strings.Replace(url, urlPlaceholder, v, 1) + toURL(params)
}
return false, ""
}
if len(l.wildcards) == 3 && l.wildcards[0] == "." {
if p, ok := params[":path"]; ok {
if e, isok := params[":ext"]; isok {
delete(params, ":path")
delete(params, ":ext")
return true, strings.Replace(url, urlPlaceholder, p+"."+e, -1) + toURL(params)
}
}
}
canskip := false
for _, v := range l.wildcards {
if v == ":" {
canskip = true
continue
}
if u, ok := params[v]; ok {
delete(params, v)
url = strings.Replace(url, urlPlaceholder, u, 1)
} else {
if canskip {
canskip = false
continue
}
return false, ""
}
}
return true, url + toURL(params)
}
var i int
var startreg bool
regurl := ""
for _, v := range strings.Trim(l.regexps.String(), "^$") {
if v == '(' {
startreg = true
continue
} else if v == ')' {
startreg = false
if v, ok := params[l.wildcards[i]]; ok {
delete(params, l.wildcards[i])
regurl = regurl + v
i++
} else {
break
}
} else if !startreg {
regurl = string(append([]rune(regurl), v))
}
}
if l.regexps.MatchString(regurl) {
ps := strings.Split(regurl, "/")
for _, p := range ps {
url = strings.Replace(url, urlPlaceholder, p, 1)
}
return true, url + toURL(params)
}
}
}
}
}
return false, ""
}
func (p *ControllerRegister) execFilter(context *beecontext.Context, urlPath string, pos int) (started bool) {
var preFilterParams map[string]string
for _, filterR := range p.filters[pos] {
if filterR.returnOnOutput && context.ResponseWriter.Started {
return true
}
if filterR.resetParams {
preFilterParams = context.Input.Params()
}
if ok := filterR.ValidRouter(urlPath, context); ok {
filterR.filterFunc(context)
if filterR.resetParams {
context.Input.ResetParams()
for k, v := range preFilterParams {
context.Input.SetParam(k, v)
}
}
}
if filterR.returnOnOutput && context.ResponseWriter.Started {
return true
}
}
return false
}
// Implement http.Handler interface.
func (p *ControllerRegister) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
startTime := time.Now()
var (
runRouter reflect.Type
findRouter bool
runMethod string
routerInfo *controllerInfo
isRunnable bool
)
context := p.pool.Get().(*beecontext.Context)
context.Reset(rw, r)
defer p.pool.Put(context)
if BConfig.RecoverFunc != nil {
defer BConfig.RecoverFunc(context)
}
context.Output.EnableGzip = BConfig.EnableGzip
if BConfig.RunMode == DEV {
context.Output.Header("Server", BConfig.ServerName)
}
var urlPath = r.URL.Path
if !BConfig.RouterCaseSensitive {
urlPath = strings.ToLower(urlPath)
}
// filter wrong http method
if _, ok := HTTPMETHOD[r.Method]; !ok {
http.Error(rw, "Method Not Allowed", 405)
goto Admin
}
// filter for static file
if len(p.filters[BeforeStatic]) > 0 && p.execFilter(context, urlPath, BeforeStatic) {
goto Admin
}
serverStaticRouter(context)
if context.ResponseWriter.Started {
findRouter = true
goto Admin
}
if r.Method != "GET" && r.Method != "HEAD" {
if BConfig.CopyRequestBody && !context.Input.IsUpload() {
context.Input.CopyBody(BConfig.MaxMemory)
}
context.Input.ParseFormOrMulitForm(BConfig.MaxMemory)
}
// session init
if BConfig.WebConfig.Session.SessionOn {
var err error
context.Input.CruSession, err = GlobalSessions.SessionStart(rw, r)
if err != nil {
logs.Error(err)
exception("503", context)
goto Admin
}
defer func() {
if context.Input.CruSession != nil {
context.Input.CruSession.SessionRelease(rw)
}
}()
}
if len(p.filters[BeforeRouter]) > 0 && p.execFilter(context, urlPath, BeforeRouter) {
goto Admin
}
// User can define RunController and RunMethod in filter
if context.Input.RunController != nil && context.Input.RunMethod != "" {
findRouter = true
isRunnable = true
runMethod = context.Input.RunMethod
runRouter = context.Input.RunController
} else {
routerInfo, findRouter = p.FindRouter(context)
}
//if no matches to url, throw a not found exception
if !findRouter {
exception("404", context)
goto Admin
}
if splat := context.Input.Param(":splat"); splat != "" {
for k, v := range strings.Split(splat, "/") {
context.Input.SetParam(strconv.Itoa(k), v)
}
}
//execute middleware filters
if len(p.filters[BeforeExec]) > 0 && p.execFilter(context, urlPath, BeforeExec) {
goto Admin
}
//check policies
if p.execPolicy(context, urlPath) {
goto Admin
}
if routerInfo != nil {
//store router pattern into context
context.Input.SetData("RouterPattern", routerInfo.pattern)
if routerInfo.routerType == routerTypeRESTFul {
if _, ok := routerInfo.methods[r.Method]; ok {
isRunnable = true
routerInfo.runFunction(context)
} else {
exception("405", context)
goto Admin
}
} else if routerInfo.routerType == routerTypeHandler {
isRunnable = true
routerInfo.handler.ServeHTTP(rw, r)
} else {
runRouter = routerInfo.controllerType
method := r.Method
if r.Method == "POST" && context.Input.Query("_method") == "PUT" {
method = "PUT"
}
if r.Method == "POST" && context.Input.Query("_method") == "DELETE" {
method = "DELETE"
}
if m, ok := routerInfo.methods[method]; ok {
runMethod = m
} else if m, ok = routerInfo.methods["*"]; ok {
runMethod = m
} else {
runMethod = method
}
}
}
// also defined runRouter & runMethod from filter
if !isRunnable {
//Invoke the request handler
vc := reflect.New(runRouter)
execController, ok := vc.Interface().(ControllerInterface)
if !ok {
panic("controller is not ControllerInterface")
}
//call the controller init function
execController.Init(context, runRouter.Name(), runMethod, vc.Interface())
//call prepare function
execController.Prepare()
//if XSRF is Enable then check cookie where there has any cookie in the request's cookie _csrf
if BConfig.WebConfig.EnableXSRF {
execController.XSRFToken()
if r.Method == "POST" || r.Method == "DELETE" || r.Method == "PUT" ||
(r.Method == "POST" && (context.Input.Query("_method") == "DELETE" || context.Input.Query("_method") == "PUT")) {
execController.CheckXSRFCookie()
}
}
execController.URLMapping()
if !context.ResponseWriter.Started {
//exec main logic
switch runMethod {
case "GET":
execController.Get()
case "POST":
execController.Post()
case "DELETE":
execController.Delete()
case "PUT":
execController.Put()
case "HEAD":
execController.Head()
case "PATCH":
execController.Patch()
case "OPTIONS":
execController.Options()
default:
if !execController.HandlerFunc(runMethod) {
var in []reflect.Value
method := vc.MethodByName(runMethod)
method.Call(in)
}
}
//render template
if !context.ResponseWriter.Started && context.Output.Status == 0 {
if BConfig.WebConfig.AutoRender {
if err := execController.Render(); err != nil {
logs.Error(err)
}
}
}
}
// finish all runRouter. release resource
execController.Finish()
}
//execute middleware filters
if len(p.filters[AfterExec]) > 0 && p.execFilter(context, urlPath, AfterExec) {
goto Admin
}
if len(p.filters[FinishRouter]) > 0 && p.execFilter(context, urlPath, FinishRouter) {
goto Admin
}
Admin:
//admin module record QPS
if BConfig.Listen.EnableAdmin {
timeDur := time.Since(startTime)
if FilterMonitorFunc(r.Method, r.URL.Path, timeDur) {
if runRouter != nil {
go toolbox.StatisticsMap.AddStatistics(r.Method, r.URL.Path, runRouter.Name(), timeDur)
} else {
go toolbox.StatisticsMap.AddStatistics(r.Method, r.URL.Path, "", timeDur)
}
}
}
if BConfig.RunMode == DEV || BConfig.Log.AccessLogs {
timeDur := time.Since(startTime)
var devInfo string
statusCode := context.ResponseWriter.Status
if statusCode == 0 {
statusCode = 200
}
iswin := false //(runtime.GOOS == "windows")
statusColor := logs.ColorByStatus(iswin, statusCode)
methodColor := logs.ColorByMethod(iswin, r.Method)
resetColor := logs.ColorByMethod(iswin, "")
tmpCtx := cc.Background()
if val, ok := context.Input.Data()["RefID"].(string); ok {
tmpCtx = cc.WithValue(tmpCtx, "RefID", val)
}
if val, ok := context.Input.Data()["RefIP"].(string); ok {
tmpCtx = cc.WithValue(tmpCtx, "RefIP", val)
}
if val, ok := context.Input.Data()["UserName"].(string); ok {
tmpCtx = cc.WithValue(tmpCtx, "UserName", val)
}
if findRouter {
if routerInfo != nil {
devInfo = fmt.Sprintf("|%15s|%s %3d %s|%13s|%8s|%s %-7s %s %-3s r:%s", context.Input.IP(), statusColor, statusCode,
resetColor, timeDur.String(), "match", methodColor, r.Method, resetColor, r.URL.Path,
routerInfo.pattern)
} else {
devInfo = fmt.Sprintf("|%15s|%s %3d %s|%13s|%8s|%s %-7s %s %-3s", context.Input.IP(), statusColor, statusCode, resetColor,
timeDur.String(), "match", methodColor, r.Method, resetColor, r.URL.Path)
}
} else {
devInfo = fmt.Sprintf("|%15s|%s %3d %s|%13s|%8s|%s %-7s %s %-3s", context.Input.IP(), statusColor, statusCode, resetColor,
timeDur.String(), "nomatch", methodColor, r.Method, resetColor, r.URL.Path)
}
//if iswin {
//logs.W32Debug(devInfo)
//} else {
logs.Debugctx(tmpCtx, devInfo)
//}
}
// Call WriteHeader if status code has been set changed
if context.Output.Status != 0 {
context.ResponseWriter.WriteHeader(context.Output.Status)
}
}
// FindRouter Find Router info for URL
func (p *ControllerRegister) FindRouter(context *beecontext.Context) (routerInfo *controllerInfo, isFind bool) {
var urlPath = context.Input.URL()
if !BConfig.RouterCaseSensitive {
urlPath = strings.ToLower(urlPath)
}
httpMethod := context.Input.Method()
if t, ok := p.routers[httpMethod]; ok {
runObject := t.Match(urlPath, context)
if r, ok := runObject.(*controllerInfo); ok {
return r, true
}
}
return
}
func toURL(params map[string]string) string {
if len(params) == 0 {
return ""
}
u := "?"
for k, v := range params {
u += k + "=" + v + "&"
}
return strings.TrimRight(u, "&")
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
distribution/push_v2.go | package distribution // import "github.com/docker/docker/distribution"
import (
"context"
"fmt"
"io"
"os"
"runtime"
"sort"
"strings"
"sync"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/client"
apitypes "github.com/docker/docker/api/types"
"github.com/docker/docker/distribution/metadata"
"github.com/docker/docker/distribution/xfer"
"github.com/docker/docker/layer"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/registry"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
smallLayerMaximumSize = 100 * (1 << 10) // 100KB
middleLayerMaximumSize = 10 * (1 << 20) // 10MB
)
// newPusher creates a new pusher for pushing to a v2 registry.
// The parameters are passed through to the underlying pusher implementation for
// use during the actual push operation.
func newPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, config *ImagePushConfig) *pusher {
return &pusher{
metadataService: metadata.NewV2MetadataService(config.MetadataStore),
ref: ref,
endpoint: endpoint,
repoInfo: repoInfo,
config: config,
}
}
type pusher struct {
metadataService metadata.V2MetadataService
ref reference.Named
endpoint registry.APIEndpoint
repoInfo *registry.RepositoryInfo
config *ImagePushConfig
repo distribution.Repository
// pushState is state built by the Upload functions.
pushState pushState
}
type pushState struct {
sync.Mutex
// remoteLayers is the set of layers known to exist on the remote side.
// This avoids redundant queries when pushing multiple tags that
// involve the same layers. It is also used to fill in digest and size
// information when building the manifest.
remoteLayers map[layer.DiffID]distribution.Descriptor
hasAuthInfo bool
}
// TODO(tiborvass): have push() take a reference to repository + tag, so that the pusher itself is repository-agnostic.
func (p *pusher) push(ctx context.Context) (err error) {
p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor)
p.repo, err = newRepository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull")
p.pushState.hasAuthInfo = p.config.AuthConfig.RegistryToken != "" || (p.config.AuthConfig.Username != "" && p.config.AuthConfig.Password != "")
if err != nil {
logrus.Debugf("Error getting v2 registry: %v", err)
return err
}
if err = p.pushRepository(ctx); err != nil {
if continueOnError(err, p.endpoint.Mirror) {
return fallbackError{
err: err,
transportOK: true,
}
}
}
return err
}
func (p *pusher) pushRepository(ctx context.Context) (err error) {
if namedTagged, isNamedTagged := p.ref.(reference.NamedTagged); isNamedTagged {
imageID, err := p.config.ReferenceStore.Get(p.ref)
if err != nil {
return fmt.Errorf("tag does not exist: %s", reference.FamiliarString(p.ref))
}
return p.pushTag(ctx, namedTagged, imageID)
}
if !reference.IsNameOnly(p.ref) {
return errors.New("cannot push a digest reference")
}
// Push all tags
pushed := 0
for _, association := range p.config.ReferenceStore.ReferencesByName(p.ref) {
if namedTagged, isNamedTagged := association.Ref.(reference.NamedTagged); isNamedTagged {
pushed++
if err := p.pushTag(ctx, namedTagged, association.ID); err != nil {
return err
}
}
}
if pushed == 0 {
return fmt.Errorf("no tags to push for %s", reference.FamiliarName(p.repoInfo.Name))
}
return nil
}
func (p *pusher) pushTag(ctx context.Context, ref reference.NamedTagged, id digest.Digest) error {
logrus.Debugf("Pushing repository: %s", reference.FamiliarString(ref))
imgConfig, err := p.config.ImageStore.Get(ctx, id)
if err != nil {
return fmt.Errorf("could not find image from tag %s: %v", reference.FamiliarString(ref), err)
}
rootfs, err := rootFSFromConfig(imgConfig)
if err != nil {
return fmt.Errorf("unable to get rootfs for image %s: %s", reference.FamiliarString(ref), err)
}
l, err := p.config.LayerStores.Get(rootfs.ChainID())
if err != nil {
return fmt.Errorf("failed to get top layer from image: %v", err)
}
defer l.Release()
hmacKey, err := metadata.ComputeV2MetadataHMACKey(p.config.AuthConfig)
if err != nil {
return fmt.Errorf("failed to compute hmac key of auth config: %v", err)
}
var descriptors []xfer.UploadDescriptor
descriptorTemplate := pushDescriptor{
metadataService: p.metadataService,
hmacKey: hmacKey,
repoInfo: p.repoInfo.Name,
ref: p.ref,
endpoint: p.endpoint,
repo: p.repo,
pushState: &p.pushState,
}
// Loop bounds condition is to avoid pushing the base layer on Windows.
for range rootfs.DiffIDs {
descriptor := descriptorTemplate
descriptor.layer = l
descriptor.checkedDigests = make(map[digest.Digest]struct{})
descriptors = append(descriptors, &descriptor)
l = l.Parent()
}
if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil {
return err
}
// Try schema2 first
builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), p.config.ConfigMediaType, imgConfig)
manifest, err := manifestFromBuilder(ctx, builder, descriptors)
if err != nil {
return err
}
manSvc, err := p.repo.Manifests(ctx)
if err != nil {
return err
}
putOptions := []distribution.ManifestServiceOption{distribution.WithTag(ref.Tag())}
if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil {
if runtime.GOOS == "windows" || p.config.TrustKey == nil || p.config.RequireSchema2 {
logrus.Warnf("failed to upload schema2 manifest: %v", err)
return err
}
// This is a temporary environment variables used in CI to allow pushing
// manifest v2 schema 1 images to test-registries used for testing *pulling*
// these images.
if os.Getenv("DOCKER_ALLOW_SCHEMA1_PUSH_DONOTUSE") == "" {
if err.Error() == "tag invalid" {
msg := "[DEPRECATED] support for pushing manifest v2 schema1 images has been removed. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/"
logrus.WithError(err).Error(msg)
return errors.Wrap(err, msg)
}
return err
}
logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err)
// Note: this fallback is deprecated, see log messages below
manifestRef, err := reference.WithTag(p.repo.Named(), ref.Tag())
if err != nil {
return err
}
builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, manifestRef, imgConfig)
manifest, err = manifestFromBuilder(ctx, builder, descriptors)
if err != nil {
return err
}
if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil {
return err
}
// schema2 failed but schema1 succeeded
msg := fmt.Sprintf("[DEPRECATION NOTICE] support for pushing manifest v2 schema1 images will be removed in an upcoming release. Please contact admins of the %s registry NOW to avoid future disruption. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", reference.Domain(ref))
logrus.Warn(msg)
progress.Message(p.config.ProgressOutput, "", msg)
}
var canonicalManifest []byte
switch v := manifest.(type) {
case *schema1.SignedManifest:
canonicalManifest = v.Canonical
case *schema2.DeserializedManifest:
_, canonicalManifest, err = v.Payload()
if err != nil {
return err
}
}
manifestDigest := digest.FromBytes(canonicalManifest)
progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest))
if err := addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil {
return err
}
// Signal digest to the trust client so it can sign the
// push, if appropriate.
progress.Aux(p.config.ProgressOutput, apitypes.PushResult{Tag: ref.Tag(), Digest: manifestDigest.String(), Size: len(canonicalManifest)})
return nil
}
func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuilder, descriptors []xfer.UploadDescriptor) (distribution.Manifest, error) {
// descriptors is in reverse order; iterate backwards to get references
// appended in the right order.
for i := len(descriptors) - 1; i >= 0; i-- {
if err := builder.AppendReference(descriptors[i].(*pushDescriptor)); err != nil {
return nil, err
}
}
return builder.Build(ctx)
}
type pushDescriptor struct {
layer PushLayer
metadataService metadata.V2MetadataService
hmacKey []byte
repoInfo reference.Named
ref reference.Named
endpoint registry.APIEndpoint
repo distribution.Repository
pushState *pushState
remoteDescriptor distribution.Descriptor
// a set of digests whose presence has been checked in a target repository
checkedDigests map[digest.Digest]struct{}
}
func (pd *pushDescriptor) Key() string {
return "v2push:" + pd.ref.Name() + " " + pd.layer.DiffID().String()
}
func (pd *pushDescriptor) ID() string {
return stringid.TruncateID(pd.layer.DiffID().String())
}
func (pd *pushDescriptor) DiffID() layer.DiffID {
return pd.layer.DiffID()
}
func (pd *pushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) {
// Skip foreign layers unless this registry allows nondistributable artifacts.
if !pd.endpoint.AllowNondistributableArtifacts {
if fs, ok := pd.layer.(distribution.Describable); ok {
if d := fs.Descriptor(); len(d.URLs) > 0 {
progress.Update(progressOutput, pd.ID(), "Skipped foreign layer")
return d, nil
}
}
}
diffID := pd.DiffID()
pd.pushState.Lock()
if descriptor, ok := pd.pushState.remoteLayers[diffID]; ok {
// it is already known that the push is not needed and
// therefore doing a stat is unnecessary
pd.pushState.Unlock()
progress.Update(progressOutput, pd.ID(), "Layer already exists")
return descriptor, nil
}
pd.pushState.Unlock()
maxMountAttempts, maxExistenceChecks, checkOtherRepositories := getMaxMountAndExistenceCheckAttempts(pd.layer)
// Do we have any metadata associated with this layer's DiffID?
metaData, err := pd.metadataService.GetMetadata(diffID)
if err == nil {
// check for blob existence in the target repository
descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, true, 1, metaData)
if exists || err != nil {
return descriptor, err
}
}
// if digest was empty or not saved, or if blob does not exist on the remote repository,
// then push the blob.
bs := pd.repo.Blobs(ctx)
var layerUpload distribution.BlobWriter
// Attempt to find another repository in the same registry to mount the layer from to avoid an unnecessary upload
candidates := getRepositoryMountCandidates(pd.repoInfo, pd.hmacKey, maxMountAttempts, metaData)
isUnauthorizedError := false
for _, mc := range candidates {
mountCandidate := mc
logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, mountCandidate.Digest, mountCandidate.SourceRepository)
createOpts := []distribution.BlobCreateOption{}
if len(mountCandidate.SourceRepository) > 0 {
namedRef, err := reference.ParseNormalizedNamed(mountCandidate.SourceRepository)
if err != nil {
logrus.WithError(err).Errorf("failed to parse source repository reference %v", reference.FamiliarString(namedRef))
_ = pd.metadataService.Remove(mountCandidate)
continue
}
// Candidates are always under same domain, create remote reference
// with only path to set mount from with
remoteRef, err := reference.WithName(reference.Path(namedRef))
if err != nil {
logrus.WithError(err).Errorf("failed to make remote reference out of %q", reference.Path(namedRef))
continue
}
canonicalRef, err := reference.WithDigest(reference.TrimNamed(remoteRef), mountCandidate.Digest)
if err != nil {
logrus.WithError(err).Error("failed to make canonical reference")
continue
}
createOpts = append(createOpts, client.WithMountFrom(canonicalRef))
}
// send the layer
lu, err := bs.Create(ctx, createOpts...)
switch err := err.(type) {
case nil:
// noop
case distribution.ErrBlobMounted:
progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name())
err.Descriptor.MediaType = schema2.MediaTypeLayer
pd.pushState.Lock()
pd.pushState.remoteLayers[diffID] = err.Descriptor
pd.pushState.Unlock()
// Cache mapping from this layer's DiffID to the blobsum
if err := pd.metadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{
Digest: err.Descriptor.Digest,
SourceRepository: pd.repoInfo.Name(),
}); err != nil {
return distribution.Descriptor{}, xfer.DoNotRetry{Err: err}
}
return err.Descriptor, nil
case errcode.Errors:
for _, e := range err {
switch e := e.(type) {
case errcode.Error:
if e.Code == errcode.ErrorCodeUnauthorized {
// when unauthorized error that indicate user don't has right to push layer to register
logrus.Debugln("failed to push layer to registry because unauthorized error")
isUnauthorizedError = true
}
default:
}
}
default:
logrus.Infof("failed to mount layer %s (%s) from %s: %v", diffID, mountCandidate.Digest, mountCandidate.SourceRepository, err)
}
// when error is unauthorizedError and user don't hasAuthInfo that's the case user don't has right to push layer to register
// and he hasn't login either, in this case candidate cache should be removed
if len(mountCandidate.SourceRepository) > 0 &&
!(isUnauthorizedError && !pd.pushState.hasAuthInfo) &&
(metadata.CheckV2MetadataHMAC(&mountCandidate, pd.hmacKey) ||
len(mountCandidate.HMAC) == 0) {
cause := "blob mount failure"
if err != nil {
cause = fmt.Sprintf("an error: %v", err.Error())
}
logrus.Debugf("removing association between layer %s and %s due to %s", mountCandidate.Digest, mountCandidate.SourceRepository, cause)
_ = pd.metadataService.Remove(mountCandidate)
}
if lu != nil {
// cancel previous upload
cancelLayerUpload(ctx, mountCandidate.Digest, layerUpload)
layerUpload = lu
}
}
if maxExistenceChecks-len(pd.checkedDigests) > 0 {
// do additional layer existence checks with other known digests if any
descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, checkOtherRepositories, maxExistenceChecks-len(pd.checkedDigests), metaData)
if exists || err != nil {
return descriptor, err
}
}
logrus.Debugf("Pushing layer: %s", diffID)
if layerUpload == nil {
layerUpload, err = bs.Create(ctx)
if err != nil {
return distribution.Descriptor{}, retryOnError(err)
}
}
defer layerUpload.Close()
// upload the blob
return pd.uploadUsingSession(ctx, progressOutput, diffID, layerUpload)
}
func (pd *pushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) {
pd.remoteDescriptor = descriptor
}
func (pd *pushDescriptor) Descriptor() distribution.Descriptor {
return pd.remoteDescriptor
}
func (pd *pushDescriptor) uploadUsingSession(
ctx context.Context,
progressOutput progress.Output,
diffID layer.DiffID,
layerUpload distribution.BlobWriter,
) (distribution.Descriptor, error) {
var reader io.ReadCloser
contentReader, err := pd.layer.Open()
if err != nil {
return distribution.Descriptor{}, retryOnError(err)
}
reader = progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, contentReader), progressOutput, pd.layer.Size(), pd.ID(), "Pushing")
switch m := pd.layer.MediaType(); m {
case schema2.MediaTypeUncompressedLayer:
compressedReader, compressionDone := compress(reader)
defer func(closer io.Closer) {
closer.Close()
<-compressionDone
}(reader)
reader = compressedReader
case schema2.MediaTypeLayer:
default:
reader.Close()
return distribution.Descriptor{}, xfer.DoNotRetry{Err: fmt.Errorf("unsupported layer media type %s", m)}
}
digester := digest.Canonical.Digester()
tee := io.TeeReader(reader, digester.Hash())
nn, err := layerUpload.ReadFrom(tee)
reader.Close()
if err != nil {
return distribution.Descriptor{}, retryOnError(err)
}
pushDigest := digester.Digest()
if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil {
return distribution.Descriptor{}, retryOnError(err)
}
logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn)
progress.Update(progressOutput, pd.ID(), "Pushed")
// Cache mapping from this layer's DiffID to the blobsum
if err := pd.metadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{
Digest: pushDigest,
SourceRepository: pd.repoInfo.Name(),
}); err != nil {
return distribution.Descriptor{}, xfer.DoNotRetry{Err: err}
}
desc := distribution.Descriptor{
Digest: pushDigest,
MediaType: schema2.MediaTypeLayer,
Size: nn,
}
pd.pushState.Lock()
pd.pushState.remoteLayers[diffID] = desc
pd.pushState.Unlock()
return desc, nil
}
// layerAlreadyExists checks if the registry already knows about any of the metadata passed in the "metadata"
// slice. If it finds one that the registry knows about, it returns the known digest and "true". If
// "checkOtherRepositories" is true, stat will be performed also with digests mapped to any other repository
// (not just the target one).
func (pd *pushDescriptor) layerAlreadyExists(
ctx context.Context,
progressOutput progress.Output,
diffID layer.DiffID,
checkOtherRepositories bool,
maxExistenceCheckAttempts int,
v2Metadata []metadata.V2Metadata,
) (desc distribution.Descriptor, exists bool, err error) {
// filter the metadata
candidates := []metadata.V2Metadata{}
for _, meta := range v2Metadata {
if len(meta.SourceRepository) > 0 && !checkOtherRepositories && meta.SourceRepository != pd.repoInfo.Name() {
continue
}
candidates = append(candidates, meta)
}
// sort the candidates by similarity
sortV2MetadataByLikenessAndAge(pd.repoInfo, pd.hmacKey, candidates)
digestToMetadata := make(map[digest.Digest]*metadata.V2Metadata)
// an array of unique blob digests ordered from the best mount candidates to worst
layerDigests := []digest.Digest{}
for i := 0; i < len(candidates); i++ {
if len(layerDigests) >= maxExistenceCheckAttempts {
break
}
meta := &candidates[i]
if _, exists := digestToMetadata[meta.Digest]; exists {
// keep reference just to the first mapping (the best mount candidate)
continue
}
if _, exists := pd.checkedDigests[meta.Digest]; exists {
// existence of this digest has already been tested
continue
}
digestToMetadata[meta.Digest] = meta
layerDigests = append(layerDigests, meta.Digest)
}
attempts:
for _, dgst := range layerDigests {
meta := digestToMetadata[dgst]
logrus.Debugf("Checking for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name())
desc, err = pd.repo.Blobs(ctx).Stat(ctx, dgst)
pd.checkedDigests[meta.Digest] = struct{}{}
switch err {
case nil:
if m, ok := digestToMetadata[desc.Digest]; !ok || m.SourceRepository != pd.repoInfo.Name() || !metadata.CheckV2MetadataHMAC(m, pd.hmacKey) {
// cache mapping from this layer's DiffID to the blobsum
if err := pd.metadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{
Digest: desc.Digest,
SourceRepository: pd.repoInfo.Name(),
}); err != nil {
return distribution.Descriptor{}, false, xfer.DoNotRetry{Err: err}
}
}
desc.MediaType = schema2.MediaTypeLayer
exists = true
break attempts
case distribution.ErrBlobUnknown:
if meta.SourceRepository == pd.repoInfo.Name() {
// remove the mapping to the target repository
pd.metadataService.Remove(*meta)
}
default:
logrus.WithError(err).Debugf("Failed to check for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name())
}
}
if exists {
progress.Update(progressOutput, pd.ID(), "Layer already exists")
pd.pushState.Lock()
pd.pushState.remoteLayers[diffID] = desc
pd.pushState.Unlock()
}
return desc, exists, nil
}
// getMaxMountAndExistenceCheckAttempts returns a maximum number of cross repository mount attempts from
// source repositories of target registry, maximum number of layer existence checks performed on the target
// repository and whether the check shall be done also with digests mapped to different repositories. The
// decision is based on layer size. The smaller the layer, the fewer attempts shall be made because the cost
// of upload does not outweigh a latency.
func getMaxMountAndExistenceCheckAttempts(layer PushLayer) (maxMountAttempts, maxExistenceCheckAttempts int, checkOtherRepositories bool) {
size := layer.Size()
switch {
// big blob
case size > middleLayerMaximumSize:
// 1st attempt to mount the blob few times
// 2nd few existence checks with digests associated to any repository
// then fallback to upload
return 4, 3, true
// middle sized blobs; if we could not get the size, assume we deal with middle sized blob
case size > smallLayerMaximumSize:
// 1st attempt to mount blobs of average size few times
// 2nd try at most 1 existence check if there's an existing mapping to the target repository
// then fallback to upload
return 3, 1, false
// small blobs, do a minimum number of checks
default:
return 1, 1, false
}
}
// getRepositoryMountCandidates returns an array of v2 metadata items belonging to the given registry. The
// array is sorted from youngest to oldest. If requireRegistryMatch is true, the resulting array will contain
// only metadata entries having registry part of SourceRepository matching the part of repoInfo.
func getRepositoryMountCandidates(
repoInfo reference.Named,
hmacKey []byte,
max int,
v2Metadata []metadata.V2Metadata,
) []metadata.V2Metadata {
candidates := []metadata.V2Metadata{}
for _, meta := range v2Metadata {
sourceRepo, err := reference.ParseNamed(meta.SourceRepository)
if err != nil || reference.Domain(repoInfo) != reference.Domain(sourceRepo) {
continue
}
// target repository is not a viable candidate
if meta.SourceRepository == repoInfo.Name() {
continue
}
candidates = append(candidates, meta)
}
sortV2MetadataByLikenessAndAge(repoInfo, hmacKey, candidates)
if max >= 0 && len(candidates) > max {
// select the youngest metadata
candidates = candidates[:max]
}
return candidates
}
// byLikeness is a sorting container for v2 metadata candidates for cross repository mount. The
// candidate "a" is preferred over "b":
//
// 1. if it was hashed using the same AuthConfig as the one used to authenticate to target repository and the
// "b" was not
// 2. if a number of its repository path components exactly matching path components of target repository is higher
type byLikeness struct {
arr []metadata.V2Metadata
hmacKey []byte
pathComponents []string
}
func (bla byLikeness) Less(i, j int) bool {
aMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[i], bla.hmacKey)
bMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[j], bla.hmacKey)
if aMacMatch != bMacMatch {
return aMacMatch
}
aMatch := numOfMatchingPathComponents(bla.arr[i].SourceRepository, bla.pathComponents)
bMatch := numOfMatchingPathComponents(bla.arr[j].SourceRepository, bla.pathComponents)
return aMatch > bMatch
}
func (bla byLikeness) Swap(i, j int) {
bla.arr[i], bla.arr[j] = bla.arr[j], bla.arr[i]
}
func (bla byLikeness) Len() int { return len(bla.arr) }
func sortV2MetadataByLikenessAndAge(repoInfo reference.Named, hmacKey []byte, marr []metadata.V2Metadata) {
// reverse the metadata array to shift the newest entries to the beginning
for i := 0; i < len(marr)/2; i++ {
marr[i], marr[len(marr)-i-1] = marr[len(marr)-i-1], marr[i]
}
// keep equal entries ordered from the youngest to the oldest
sort.Stable(byLikeness{
arr: marr,
hmacKey: hmacKey,
pathComponents: getPathComponents(repoInfo.Name()),
})
}
// numOfMatchingPathComponents returns a number of path components in "pth" that exactly match "matchComponents".
func numOfMatchingPathComponents(pth string, matchComponents []string) int {
pthComponents := getPathComponents(pth)
i := 0
for ; i < len(pthComponents) && i < len(matchComponents); i++ {
if matchComponents[i] != pthComponents[i] {
return i
}
}
return i
}
func getPathComponents(path string) []string {
return strings.Split(path, "/")
}
func cancelLayerUpload(ctx context.Context, dgst digest.Digest, layerUpload distribution.BlobWriter) {
if layerUpload != nil {
logrus.Debugf("cancelling upload of blob %s", dgst)
err := layerUpload.Cancel(ctx)
if err != nil {
logrus.Warnf("failed to cancel upload: %v", err)
}
}
}
| [
"\"DOCKER_ALLOW_SCHEMA1_PUSH_DONOTUSE\""
]
| []
| [
"DOCKER_ALLOW_SCHEMA1_PUSH_DONOTUSE"
]
| [] | ["DOCKER_ALLOW_SCHEMA1_PUSH_DONOTUSE"] | go | 1 | 0 | |
cmd/autoscaler/main.go | /*
Copyright 2021 Cortex Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"flag"
"fmt"
"net/http"
"os"
"os/signal"
"strconv"
"time"
"github.com/cortexlabs/cortex/pkg/autoscaler"
"github.com/cortexlabs/cortex/pkg/lib/aws"
"github.com/cortexlabs/cortex/pkg/lib/errors"
"github.com/cortexlabs/cortex/pkg/lib/k8s"
"github.com/cortexlabs/cortex/pkg/lib/logging"
"github.com/cortexlabs/cortex/pkg/lib/telemetry"
"github.com/cortexlabs/cortex/pkg/types/clusterconfig"
"github.com/cortexlabs/cortex/pkg/types/userconfig"
"github.com/gorilla/mux"
promapi "github.com/prometheus/client_golang/api"
promv1 "github.com/prometheus/client_golang/api/prometheus/v1"
"go.uber.org/zap"
istioclient "istio.io/client-go/pkg/clientset/versioned"
istioinformers "istio.io/client-go/pkg/informers/externalversions"
"k8s.io/apimachinery/pkg/api/meta"
kmeta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/cache"
)
func main() {
var (
port int
inCluster bool
prometheusURL string
namespace string
clusterConfigPath string
)
flag.IntVar(&port, "port", 8000, "port where the autoscaler server will be exposed")
flag.BoolVar(&inCluster, "in-cluster", false, "use when autoscaler runs in-cluster")
flag.StringVar(&prometheusURL, "prometheus-url", os.Getenv("CORTEX_PROMETHEUS_URL"),
"prometheus url (can be set through the CORTEX_PROMETHEUS_URL env variable)",
)
flag.StringVar(&namespace, "namespace", os.Getenv("CORTEX_NAMESPACE"),
"kubernetes namespace where the cortex APIs are deployed "+
"(can be set through the CORTEX_NAMESPACE env variable)",
)
flag.StringVar(&clusterConfigPath, "cluster-config", "", "cluster config path")
flag.Parse()
log := logging.GetLogger()
defer func() {
_ = log.Sync()
}()
switch {
case prometheusURL == "":
log.Fatal("--prometheus-url is a required option")
case namespace == "":
log.Fatal("--namespace is a required option")
case clusterConfigPath == "":
log.Fatal("--cluster-config flag is required")
}
clusterConfig, err := clusterconfig.NewForFile(clusterConfigPath)
if err != nil {
exit(log, err)
}
awsClient, err := aws.NewForRegion(clusterConfig.Region)
if err != nil {
exit(log, err)
}
_, userID, err := awsClient.CheckCredentials()
if err != nil {
exit(log, err)
}
err = telemetry.Init(telemetry.Config{
Enabled: clusterConfig.Telemetry,
UserID: userID,
Properties: map[string]string{
"kind": userconfig.RealtimeAPIKind.String(),
"image_type": "autoscaler",
},
Environment: "operator",
LogErrors: true,
BackoffMode: telemetry.BackoffDuplicateMessages,
})
if err != nil {
log.Fatalw("failed to initialize telemetry", zap.Error(err))
}
defer telemetry.Close()
scheme := runtime.NewScheme()
if err := clientgoscheme.AddToScheme(scheme); err != nil {
exit(log, err, "failed to add k8s client-go-scheme to scheme")
}
k8sClient, err := k8s.New(namespace, inCluster, nil, scheme)
if err != nil {
exit(log, err, "failed to initialize kubernetes client")
}
//goland:noinspection GoNilness
istioClient, err := istioclient.NewForConfig(k8sClient.RestConfig)
if err != nil {
exit(log, err, "failed to initialize istio client")
}
promClient, err := promapi.NewClient(
promapi.Config{
Address: prometheusURL,
},
)
if err != nil {
exit(log, err, "failed to initialize prometheus client")
}
promAPIClient := promv1.NewAPI(promClient)
realtimeScaler := autoscaler.NewRealtimeScaler(k8sClient, promAPIClient, log)
asyncScaler := autoscaler.NewAsyncScaler(k8sClient, promAPIClient)
autoScaler := autoscaler.New(log)
autoScaler.AddScaler(realtimeScaler, userconfig.RealtimeAPIKind)
autoScaler.AddScaler(asyncScaler, userconfig.AsyncAPIKind)
defer autoScaler.Stop()
istioInformerFactory := istioinformers.NewSharedInformerFactoryWithOptions(
istioClient, 10*time.Second, // TODO: check how much makes sense
istioinformers.WithNamespace(namespace),
istioinformers.WithTweakListOptions(informerFilter),
)
virtualServiceInformer := istioInformerFactory.Networking().V1beta1().VirtualServices().Informer()
virtualServiceInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
resource, err := meta.Accessor(obj)
if err != nil {
log.Errorw("failed to access resource metadata", zap.Error(err))
return
}
if resource.GetNamespace() != namespace {
// filter out virtual services that are not in the cortex namespace
return
}
api, err := apiResourceFromLabels(resource.GetLabels())
if err != nil {
// filter out non-cortex apis
return
}
if err := autoScaler.AddAPI(api); err != nil {
log.Errorw("failed to add API to autoscaler",
zap.Error(err),
zap.String("apiName", api.Name),
zap.String("apiKind", api.Kind.String()),
)
return
}
},
DeleteFunc: func(obj interface{}) {
resource, err := meta.Accessor(obj)
if err != nil {
log.Errorw("failed to access resource metadata", zap.Error(err))
}
if resource.GetNamespace() != namespace {
// filter out virtual services that are not in the cortex namespace
return
}
api, err := apiResourceFromLabels(resource.GetLabels())
if err != nil {
// filter out non-cortex apis
return
}
autoScaler.RemoveAPI(api)
},
},
)
handler := autoscaler.NewHandler(autoScaler)
router := mux.NewRouter()
router.HandleFunc("/awaken", handler.Awaken).Methods(http.MethodPost)
router.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte("ok"))
}).Methods(http.MethodGet)
server := &http.Server{
Addr: ":" + strconv.Itoa(port),
Handler: router,
}
stopCh := make(chan struct{})
go virtualServiceInformer.Run(stopCh)
defer func() { stopCh <- struct{}{} }()
errCh := make(chan error)
go func() {
log.Infof("Starting autoscaler server on %s", server.Addr)
errCh <- server.ListenAndServe()
}()
sigint := make(chan os.Signal, 1)
signal.Notify(sigint, os.Interrupt)
select {
case err = <-errCh:
exit(log, err, "failed to start autoscaler server")
case <-sigint:
// We received an interrupt signal, shut down.
log.Info("Received TERM signal, handling a graceful shutdown...")
log.Info("Shutting down server")
if err = server.Shutdown(context.Background()); err != nil {
// Error from closing listeners, or context timeout:
log.Warnw("HTTP server Shutdown Error", zap.Error(err))
}
log.Info("Shutdown complete, exiting...")
}
}
func apiResourceFromLabels(labels map[string]string) (userconfig.Resource, error) {
apiName, ok := labels["apiName"]
if !ok {
return userconfig.Resource{}, fmt.Errorf("apiName key does not exist")
}
apiKind, ok := labels["apiKind"]
if !ok {
return userconfig.Resource{}, fmt.Errorf("apiKind key does not exist")
}
return userconfig.Resource{
Name: apiName,
Kind: userconfig.KindFromString(apiKind),
}, nil
}
func informerFilter(listOptions *kmeta.ListOptions) {
listOptions.LabelSelector = kmeta.FormatLabelSelector(&kmeta.LabelSelector{
MatchExpressions: []kmeta.LabelSelectorRequirement{
{
Key: "apiName",
Operator: kmeta.LabelSelectorOpExists,
},
{
Key: "apiKind",
Operator: kmeta.LabelSelectorOpExists,
},
},
})
}
func exit(log *zap.SugaredLogger, err error, wrapStrs ...string) {
for _, str := range wrapStrs {
err = errors.Wrap(err, str)
}
if err != nil && !errors.IsNoTelemetry(err) {
telemetry.Error(err)
}
if err != nil && !errors.IsNoPrint(err) {
log.Error(err)
}
os.Exit(1)
}
| [
"\"CORTEX_PROMETHEUS_URL\"",
"\"CORTEX_NAMESPACE\""
]
| []
| [
"CORTEX_PROMETHEUS_URL",
"CORTEX_NAMESPACE"
]
| [] | ["CORTEX_PROMETHEUS_URL", "CORTEX_NAMESPACE"] | go | 2 | 0 | |
src/SparseSC/utils/match_space.py | """ Utils for getting match spaces (Matching features and potentially feature wegihts)
"""
# To do:
# - Hve the factories return partial objects rather than anonymous functions. That way they can be pickled and parallelized in get_c_predictions_honest
# - Implement Post-lasso versions. Could do MT OLS as fully separate then aggregate coefs like with Lasso.
# (Though some coefficients aren't well estimated we don't want to just take t-stats as we still want to be fit-based.
# Ideally we'd have something like marginal R2, but the initial method is probably fine for most uses. (We could standardize input features).)
import numpy as np
from sklearn.linear_model import MultiTaskLassoCV, MultiTaskLasso, LassoCV
from .misc import capture_all
def keras_reproducible(seed=1234, verbose=0, TF_CPP_MIN_LOG_LEVEL="3"):
"""
https://keras.io/getting-started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development
"""
import random
import pkg_resources
import os
random.seed(seed)
np.random.seed(seed)
os.environ["PYTHONHASHSEED"] = "0" # might need to do this outside the script
if verbose == 0:
os.environ[
"TF_CPP_MIN_LOG_LEVEL"
] = TF_CPP_MIN_LOG_LEVEL # 2 will print warnings
try:
import tensorflow
except ImportError:
raise ImportError("Missing required package 'tensorflow'")
# Use the TF 1.x API
if pkg_resources.get_distribution("tensorflow").version.startswith("1."):
tf = tensorflow
else:
tf = tensorflow.compat.v1
if verbose == 0:
# https://github.com/tensorflow/tensorflow/issues/27023
try:
from tensorflow.python.util import deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
except ImportError:
try:
from tensorflow.python.util import module_wrapper as deprecation
except ImportError:
from tensorflow.python.util import deprecation_wrapper as deprecation
deprecation._PER_MODULE_WARNING_LIMIT = 0
# this was deprecated in 1.15 (maybe earlier)
tensorflow.compat.v1.logging.set_verbosity(tensorflow.compat.v1.logging.ERROR)
ConfigProto = tf.ConfigProto
session_conf = tf.ConfigProto(
intra_op_parallelism_threads=1, inter_op_parallelism_threads=1
)
with capture_all(): # doesn't have quiet option
try:
from tensorflow.python.keras import backend as K
except ImportError:
raise ImportError("Missing required module 'keras'")
tf.set_random_seed(seed)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
def Fixed_V_factory(V):
"""
Return a MatchSpace function with user-supplied V over raw X.
:param V: V Matrix on the raw features
:returns: a function with the signature
MatchSpace fn, V vector, best_v_pen, V = function(X,Y)
"""
def _Fixed_V_MatchSpace_wrapper(X, Y, **kwargs):
return _Fixed_V_MatchSpace(X, Y, V=V, **kwargs)
return _Fixed_V_MatchSpace_wrapper
def _Fixed_V_MatchSpace(X, Y, V, **kwargs): # pylint: disable=unused-argument
return IdTransformer(), V, np.nan, V
class IdTransformer:
def transform(self, X):
return X
def _neg_se_rule(lasso_fit=None, mse_path=None, alphas=None, alpha_min=None, factor = 1):
from statsmodels.stats.weightstats import DescrStatsW
if lasso_fit is not None:
mse_path = lasso_fit.mse_path_
alphas = lasso_fit.alphas_
alpha_min = lasso_fit.alpha_
alpha_min_i = np.where(alphas == alpha_min)
dw = DescrStatsW(mse_path[alpha_min_i,:].T)
mse_mean = mse_path.mean(axis=1)
allowed = mse_mean<=(mse_mean[alpha_min_i] + factor*dw.std_mean[0])
new_alpha_i = max(np.where(allowed)[0])
return alphas[new_alpha_i]
def _block_summ_cols(Y, Y_col_block_size):
"""Block averages a Y np.array. E.g., convert 150->5 where each is a 30-day average. so that MTLasso could be faster
"""
if Y_col_block_size is not None:
if (Y.shape[1] % Y_col_block_size) == 0:
# Can just change the dim on the ndarray (which doesn't shuffle data) to add a new dim and then average across it.
return Y.reshape(Y.shape[0], Y.shape[1]//Y_col_block_size, Y_col_block_size).mean(axis=2)
print("Can only average target across columns blocks if blocks fit evenly")
return Y
def MTLassoCV_MatchSpace_factory(v_pens=None, n_v_cv=5, sample_frac=1, Y_col_block_size=None, se_factor=None, normalize=True):
"""
Return a MatchSpace function that will fit a MultiTaskLassoCV for Y ~ X
:param v_pens: Penalties to evaluate (default is to automatically determince)
:param n_v_cv: Number of Cross-Validation folds
:param sample_frac: Fraction of the data to sample
:param se_factor: Allows taking a different penalty than the min mse. Similar to the lambda.1se rule,
if not None, it will take the max lambda that has mse < min_mse + se_factor*(MSE standard error).
:returns: MatchSpace fn, V vector, best_v_pen, V
"""
def _MTLassoCV_MatchSpace_wrapper(X, Y, **kwargs):
return _MTLassoCV_MatchSpace(
X, Y, v_pens=v_pens, n_v_cv=n_v_cv, sample_frac=sample_frac, Y_col_block_size=Y_col_block_size, se_factor=se_factor, normalize=normalize, **kwargs
)
return _MTLassoCV_MatchSpace_wrapper
def _MTLassoCV_MatchSpace(
X, Y, v_pens=None, n_v_cv=5, sample_frac=1, Y_col_block_size=None, se_factor=None, normalize=True, **kwargs
): # pylint: disable=missing-param-doc, unused-argument
# A fake MT would do Lasso on y_mean = Y.mean(axis=1)
if sample_frac < 1:
N = X.shape[0]
sample = np.random.choice(N, int(sample_frac * N), replace=False)
X = X[sample, :]
Y = Y[sample, :]
if Y_col_block_size is not None:
Y = _block_summ_cols(Y, Y_col_block_size)
varselectorfit = MultiTaskLassoCV(normalize=normalize, cv=n_v_cv, alphas=v_pens).fit(
X, Y
)
best_v_pen = varselectorfit.alpha_
if se_factor is not None:
best_v_pen = _neg_se_rule(varselectorfit, factor=se_factor)
varselectorfit = MultiTaskLasso(alpha=best_v_pen, normalize=normalize).fit(X, Y)
V = np.sqrt(
np.sum(np.square(varselectorfit.coef_), axis=0)
) # n_tasks x n_features -> n_feature
m_sel = V != 0
transformer = SelMatchSpace(m_sel)
return transformer, V[m_sel], best_v_pen, (V, varselectorfit)
def MTLasso_MatchSpace_factory(v_pen, sample_frac=1, Y_col_block_size=None, se_factor=None, normalize=True):
"""
Return a MatchSpace function that will fit a MultiTaskLasso for Y ~ X
:param v_pen: Penalty
:param sample_frac: Fraction of the data to sample
:param se_factor: Allows taking a different penalty than the min mse. Similar to the lambda.1se rule,
if not None, it will take the max lambda that has mse < min_mse + se_factor*(MSE standard error).
:returns: MatchSpace fn, V vector, best_v_pen, V
"""
def _MTLasso_MatchSpace_wrapper(X, Y, **kwargs):
return _MTLasso_MatchSpace(
X, Y, v_pen=v_pen, sample_frac=sample_frac, Y_col_block_size=Y_col_block_size, se_factor=se_factor, normalize=normalize, **kwargs
)
return _MTLasso_MatchSpace_wrapper
def _MTLasso_MatchSpace(
X, Y, v_pen, sample_frac=1, Y_col_block_size=None, se_factor=None, normalize=True, **kwargs
): # pylint: disable=missing-param-doc, unused-argument
# A fake MT would do Lasso on y_mean = Y.mean(axis=1)
if sample_frac < 1:
N = X.shape[0]
sample = np.random.choice(N, int(sample_frac * N), replace=False)
X = X[sample, :]
Y = Y[sample, :]
if Y_col_block_size is not None:
Y = _block_summ_cols(Y, Y_col_block_size)
varselectorfit = MultiTaskLasso(normalize=normalize, alpha=v_pen).fit(
X, Y
)
V = np.sqrt(
np.sum(np.square(varselectorfit.coef_), axis=0)
) # n_tasks x n_features -> n_feature
m_sel = V != 0
transformer = SelMatchSpace(m_sel)
return transformer, V[m_sel], v_pen, (V, varselectorfit)
def D_LassoCV_MatchSpace_factory(v_pens=None, n_v_cv=5, sample_frac=1, y_V_share=0.5):
"""
Return a MatchSpace function that will fit a MultiTaskLassoCV for Y ~ X and Lasso of D_full ~ X_full
and then combines the coefficients into weights using y_V_share
:param v_pens: Penalties to evaluate (default is to automatically determince)
:param n_v_cv: Number of Cross-Validation folds
:param sample_frac: Fraction of the data to sample
:param y_V_share: The fraction of the V weight that goes to the variables weights from the Y~X problem.
:returns: MatchSpace fn, V vector, best_v_pen, V
"""
def _D_LassoCV_MatchSpace_wrapper(X, Y, **kwargs):
return _D_LassoCV_MatchSpace(
X,
Y,
v_pens=v_pens,
n_v_cv=n_v_cv,
sample_frac=sample_frac,
y_V_share=y_V_share,
**kwargs
)
return _D_LassoCV_MatchSpace_wrapper
def _D_LassoCV_MatchSpace(
X, Y, X_full, D_full, v_pens=None, n_v_cv=5, sample_frac=1, y_V_share=0.5, **kwargs
): # pylint: disable=missing-param-doc, unused-argument
if sample_frac < 1:
N_y = X.shape[0]
sample_y = np.random.choice(N_y, int(sample_frac * N_y), replace=False)
X = X[sample_y, :]
Y = Y[sample_y, :]
N_d = D_full.shape[0]
sample_d = np.random.choice(N_d, int(sample_frac * N_d), replace=False)
X_full = X_full[sample_d, :]
D_full = D_full[sample_d]
y_varselectorfit = MultiTaskLassoCV(normalize=True, cv=n_v_cv, alphas=v_pens).fit(
X, Y
)
y_V = np.sqrt(
np.sum(np.square(y_varselectorfit.coef_), axis=0)
) # n_tasks x n_features -> n_feature
best_y_v_pen = y_varselectorfit.alpha_
d_varselectorfit = LassoCV(normalize=True, cv=n_v_cv, alphas=v_pens).fit(
X_full, D_full
)
d_V = np.abs(d_varselectorfit.coef_)
best_d_v_pen = d_varselectorfit.alpha_
m_sel = (y_V + d_V) != 0
transformer = SelMatchSpace(m_sel)
if y_V.sum() == 0:
V = d_V
elif d_V.sum() == 0:
V = y_V
else:
V = y_V_share * y_V / (y_V.sum()) + (1 - y_V_share) * d_V / (2 * d_V.sum())
return transformer, V[m_sel], (best_y_v_pen, best_d_v_pen), V
class SelMatchSpace:
def __init__(self, m_sel):
self.m_sel = m_sel
def transform(self, X):
return X[:, self.m_sel]
def MTLSTMMixed_MatchSpace_factory(
T0=None,
K_fixed=0,
M_sizes=None,
dropout_rate=0.2,
epochs=2,
verbose=0,
hidden_length=100,
):
"""
Return a MatchSpace function that will fit an LSTM of [X_fixed, X_time_varying, Y_pre] ~ Y with the hidden-layer size
optimized to reduce errors on goal units
:param T0: length of Y_pre
:param K_fixed: Number of fixed unit-covariates (rest will assume to be time-varying)
:param M_sizes: list of sizes of hidden layer (match-space) sizes to try. Default is range(1, 2*int(np.log(Y.shape[0])))
:param dropout_rate:
:param epochs:
:param verbose:
:param hidden_length:
:returns: MatchSpace fn, V vector, best_M_size, V
"""
def _MTLSTMMixed_MatchSpace_wrapper(X, Y, fit_model_wrapper, **kwargs):
return _MTLSTMMixed_MatchSpace(
X,
Y,
fit_model_wrapper,
T0=T0,
K_fixed=K_fixed,
M_sizes=M_sizes,
dropout_rate=dropout_rate,
epochs=epochs,
verbose=verbose,
hidden_length=hidden_length,
**kwargs
)
return _MTLSTMMixed_MatchSpace_wrapper
def _split_LSTM_x_data(X, T0, K_fixed=0):
N, K = X.shape
Cov_F = X[:, :K_fixed]
Cov_TV0 = X[:, K_fixed : (K - T0)]
assert Cov_TV0.shape[1] % T0 == 0, "Time-varying covariates not the right shape"
K_TV = int(Cov_TV0.shape[1] / T0)
Cov_TV = np.empty((N, T0, K_TV))
for i in range(K_TV):
Cov_TV[:, :, i] = Cov_TV0[:, (i * K_TV) : ((i + 1) * K_TV)]
Out_pre = X[:, (K - T0) :]
return Cov_F, Cov_TV, Out_pre
def _shape_LSTM_x_data(Cov_F, Cov_TV, Out_pre):
N, K_fixed = Cov_F.shape
T0 = Out_pre.shape[1]
K_TV = Cov_TV.shape[2]
LSTM_K = K_fixed + K_TV + 1
LSTM_x = np.empty((N, T0, LSTM_K))
for t in range(T0):
LSTM_x[:, t, :K_fixed] = Cov_F
LSTM_x[:, t, K_fixed : (K_fixed + K_TV)] = Cov_TV[:, t, :]
LSTM_x[:, t, (K_fixed + K_TV) :] = Out_pre[:, t, np.newaxis]
return LSTM_x
def _shape_LSTM_y_data(Y_pre, Y_post, T0):
_, T1 = Y_post.shape
Y = np.hstack((Y_pre, Y_post))
LSTM_y = []
for t in range(T1):
LSTM_y.append(Y[:, (t + 1) : (T0 + t + 1), np.newaxis])
return LSTM_y
def _MTLSTMMixed_MatchSpace(
X,
Y,
fit_model_wrapper,
T0=None,
K_fixed=0,
M_sizes=None,
dropout_rate=0.2,
epochs=2,
verbose=0,
hidden_length=100,
**kwargs
):
# could have just used the LSTM state units direclty, but having this be big and then timeDistributed to narrow down is more expressive/powerful
with capture_all(): # doesn't have quiet option
import keras
if M_sizes is None:
M_sizes = range(1, 2 * int(np.log(Y.shape[0])))
if T0 is None:
T0 = X.shape[1]
if verbose == 0:
import os
if (
"TF_CPP_MIN_LOG_LEVEL" in os.environ
and os.environ["TF_CPP_MIN_LOG_LEVEL"] != "2"
and os.environ["TF_CPP_MIN_LOG_LEVEL"] != "3"
):
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
# Otherwise prints random info about CPU instruction sets
Cov_F, Cov_TV, Out_pre = _split_LSTM_x_data(X, T0, K_fixed=K_fixed)
LSTM_x = _shape_LSTM_x_data(Cov_F, Cov_TV, Out_pre)
LSTM_y = _shape_LSTM_y_data(Out_pre, Y, T0)
LSTM_K = LSTM_x.shape[2]
T1 = Y.shape[1]
# fits_single = {}
int_layer_single = {}
Vs_single = {}
scores = np.zeros((len(M_sizes)))
for i, M_size in enumerate(M_sizes):
inp = keras.Input(
batch_shape=(1, T0, LSTM_K), name="input"
) # batch_shape=(1,..) ensures trained on one case at time
with capture_all(): # doesn't have quiet option
x1 = keras.layers.LSTM(units=hidden_length, return_sequences=True)(inp) ##
x2 = keras.layers.Dropout(rate=dropout_rate)(x1) ##
core = keras.layers.TimeDistributed(
keras.layers.Dense(units=M_size, activation="elu"), name="embedding"
)(x2)
output_vec = []
for t in range(T1):
new_output = keras.layers.Dense(
units=1, activation="linear", name="yp%s" % (t)
)(core)
output_vec.append(new_output)
model = keras.models.Model(inputs=inp, outputs=output_vec)
model.compile(loss="mse", optimizer="Adam", metrics=["mean_squared_error"])
model.fit(x=LSTM_x, y=LSTM_y, batch_size=1, epochs=epochs, verbose=verbose)
outputs_fit = model.get_layer(
name="embedding"
).output # .get_output_at(node_index = 1)
intermediate_layer_model = keras.models.Model(
inputs=model.input, outputs=outputs_fit
)
final_weights = np.empty((T1, M_size))
n_layers_w = len(model.get_weights())
for t in range(T1):
l_i = n_layers_w - 2 - 2 * t
final_weights[t, :] = model.get_weights()[l_i][:, 0]
V_i = np.mean(np.abs(final_weights), axis=0)
transformer_i = LSTMTransformer(T0, K_fixed, intermediate_layer_model)
sc_fit_i = fit_model_wrapper(transformer_i, V_i)
# fits_single[i] = sc_fit_i
int_layer_single[i] = intermediate_layer_model
Vs_single[i] = V_i
scores[i] = sc_fit_i.score # = sc_fit_i.score_R2
i_best = np.argmin(scores)
best_M_size = M_sizes[i_best]
V_best = Vs_single[i_best]
intermediate_layer_model = int_layer_single[i_best]
transformer = LSTMTransformer(T0, K_fixed, intermediate_layer_model)
return transformer, V_best, best_M_size, V_best
class LSTMTransformer:
def __init__(self, T0, K_fixed, intermediate_layer_model):
self.T0 = T0
self.K_fixed = K_fixed
self.intermediate_layer_model = intermediate_layer_model
def transform(self, X):
Cov_F, Cov_TV, Out_Pre = _split_LSTM_x_data(X, self.T0, K_fixed=self.K_fixed)
LSTM_x = _shape_LSTM_x_data(Cov_F, Cov_TV, Out_Pre)
M = self.intermediate_layer_model.predict(LSTM_x, batch_size=1)[
:, self.T0 - 1, :
]
return M
def MTLassoMixed_MatchSpace_factory(v_pens=None, n_v_cv=5):
"""
Return a MatchSpace function that will fit a MultiTaskLassoCV for Y ~ X with the penalization optimized to reduce errors on goal units
:param v_pens: Penalties to evaluate (default is to automatically determince)
:param n_v_cv: Number of Cross-Validation folds
:returns: MatchSpace fn, V vector, best_v_pen, V
"""
def _MTLassoMixed_MatchSpace_wrapper(X, Y, fit_model_wrapper, **kwargs):
return _MTLassoMixed_MatchSpace(
X, Y, fit_model_wrapper, v_pens=v_pens, n_v_cv=n_v_cv, **kwargs
)
return _MTLassoMixed_MatchSpace_wrapper
def _MTLassoMixed_MatchSpace(
X, Y, fit_model_wrapper, v_pens=None, n_v_cv=5, **kwargs
): # pylint: disable=missing-param-doc, unused-argument
# Note that MultiTaskLasso(CV).path with the same alpha doesn't produce same results as MultiTaskLasso(CV)
mtlasso_cv_fit = MultiTaskLassoCV(normalize=True, cv=n_v_cv, alphas=v_pens).fit(
X, Y
)
# V_cv = np.sqrt(np.sum(np.square(mtlasso_cv_fit.coef_), axis=0)) #n_tasks x n_features -> n_feature
# v_pen_cv = mtlasso_cv_fit.alpha_
# m_sel_cv = (V_cv!=0)
# sc_fit_cv = fit_model_wrapper(SelMatchSpace(m_sel_cv), V_cv[m_sel_cv])
v_pens = mtlasso_cv_fit.alphas_
# fits_single = {}
Vs_single = {}
scores = np.zeros((len(v_pens)))
# R2s = np.zeros((len(v_pens)))
for i, v_pen in enumerate(v_pens):
mtlasso_i_fit = MultiTaskLasso(alpha=v_pen, normalize=True).fit(X, Y)
V_i = np.sqrt(np.sum(np.square(mtlasso_i_fit.coef_), axis=0))
m_sel_i = V_i != 0
sc_fit_i = fit_model_wrapper(SelMatchSpace(m_sel_i), V_i[m_sel_i])
# fits_single[i] = sc_fit_i
Vs_single[i] = V_i
scores[i] = sc_fit_i.score
# R2s[i] = sc_fit_i.score_R2
i_best = np.argmin(scores)
# v_pen_best = v_pens[i_best]
# i_cv = np.where(v_pens==v_pen_cv)[0][0]
# print("CV alpha: " + str(v_pen_cv) + " (" + str(R2s[i_cv]) + ")." + " Best alpha: " + str(v_pen_best) + " (" + str(R2s[i_best]) + ") .")
best_v_pen = v_pens[i_best]
V_best = Vs_single[i_best]
m_sel_best = V_best != 0
return SelMatchSpace(m_sel_best), V_best[m_sel_best], best_v_pen, V_best
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL\"\n ",
"PYTHONHASHSEED",
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL\"\n ", "PYTHONHASHSEED", "TF_CPP_MIN_LOG_LEVEL"] | python | 3 | 0 | |
tezos/python-tests/daemons/node.py | from typing import Dict, List, Tuple, Optional
import sys
import subprocess
import os
import tempfile
import shutil
from . import utils
import time
import signal
from tools import paths
# Timeout before killing a node which doesn't react to SIGTERM
TERM_TIMEOUT = 10
def _run_and_print(cmd):
cmd_str = utils.format_command(cmd)
print(cmd_str)
completed_process = subprocess.run(cmd, capture_output=True, text=True,
check=False)
stdout = completed_process.stdout
stderr = completed_process.stderr
if stdout:
print(stdout)
if stderr:
print(stderr, file=sys.stderr)
completed_process.check_returncode()
class Node:
"""Wrapper for the tezos-node command.
This class manages the persistent state of a tezos-node
(the node directory) and provides an API which wraps the node commands.
Most commands are intended to be used synchronously, for instance:
- tezos-node identity generate
- tezos-node upgrade storage
tezos-node run is intended to be used asynchronously and forks a
subprocess.
Typical use.
node = Node(node_bin,
p2p_port=p2p_node,
rpc_port=rpc_node,
peers=peers_rpc,
log_file=log_file,
params=params,
log_levels=log_levels)
node.snapshot_import(snapshot) # optional, use a snapshot
node.init_id() # generate node id
node.init_config() # generate config file based on parameters
node.run() # run tezos-node process
node.terminate() # terminate process
node.run() # re-run using same process
node.terminate() # or node.kill()
node.cleanup() # cleanup temp files
"""
def __init__(self,
node: str,
expected_pow: float = 0.0,
node_dir: str = None,
use_tls: Tuple[str, str] = None,
params: List[str] = None,
log_file: str = None,
p2p_port: int = 9732,
rpc_port: int = 8732,
websocket_port: int = 4932,
peers: List[int] = None,
log_levels: Dict[str, str] = None,
singleprocess: bool = False,
env: Dict[str, str] = None):
"""Creates a new Popen instance for a tezos-node, and manages context.
args:
use_tls (tuple): None if no tls, else couple of strings
(certificate, key)
Creates a temporary node directory unless provided by caller.
Generate node identity.
"""
assert os.path.isfile(node), f'{node} not a file'
assert node_dir is None or os.path.isdir(node_dir), (f'{node_dir} not '
f'a dir')
if params is None:
params = ["--network=sandbox"]
self.log_file = log_file
self._temp_dir = node_dir is None
if node_dir is None:
node_dir = tempfile.mkdtemp(prefix='tezos-node.')
self.node_dir = node_dir
self.p2p_port = p2p_port
self.rpc_port = rpc_port
self.websocket_port = websocket_port
self.expected_pow = expected_pow
self.node = node
self._params = params
self._run_called_before = False
singleprocess_opt = ['--singleprocess'] if singleprocess else []
node_run = [node, 'run', '--data-dir', node_dir,
'--no-bootstrap-peers'] + singleprocess_opt + params
tezedge_node_run = [node, '--tezos-data-dir', node_dir, '--disable-bootstrap-lookup',
'--websocket-address', f'0.0.0.0:{websocket_port}',
'--identity-expected-pow', '0', '--p2p-port', str(p2p_port),
'--rpc-port', str(rpc_port), '--identity-file', node_dir + '/identity.json'] + params
if peers is not None:
tezedge_node_run.append('--peers')
tezedge_peers = []
for peer in peers:
node_run.append('--peer')
node_run.append(f'127.0.0.1:{peer}')
tezedge_peers.append(f'127.0.0.1:{peer}')
tezedge_node_run.append(','.join(tezedge_peers))
self.use_tls = use_tls
new_env = None
if env is not None:
new_env = os.environ.copy()
new_env.update(env)
if log_levels is not None:
new_env = os.environ.copy() if new_env is None else new_env
lwt_log = ";".join(f'{key} -> {values}' for key, values in
log_levels.items())
new_env['TEZOS_LOG'] = lwt_log
# TODO: FIXME
new_env = os.environ.copy()
new_env['LD_LIBRARY_PATH'] = paths.TEZOS_HOME
self._new_env = new_env
# TODO: differentiate tezedge
# self._node_run = node_run
self._node_run = tezedge_node_run
self._process = None # type: Optional[subprocess.Popen]
def run(self):
node_run_str = utils.format_command(self._node_run)
print(node_run_str)
print(self._new_env)
# overwrite old log on on first invocation only
overwrite_log = not self._run_called_before
stdout, stderr = utils.prepare_log(self._node_run,
self.log_file,
overwrite_log)
self._process = subprocess.Popen(self._node_run, stdout=stdout,
stderr=stderr, env=self._new_env)
self._run_called_before = True
# print("Sleeping")
# time.sleep(5)
def init_config(self):
# TODO: whitch between tezos and tezedge nodes
node_config = [self.node,
'config',
'init',
'--data-dir', self.node_dir,
'--net-addr', f'127.0.0.1:{self.p2p_port}',
'--rpc-addr', f'127.0.0.1:{self.rpc_port}',
'--expected-pow', str(self.expected_pow)] + self._params
if self.use_tls:
# We can't create tezos.crt/tezos.key here
# as node_dir has to be empty when we run node_config
node_config += ['--rpc-tls',
(f'{self.node_dir}/tezos.crt,'
f'{self.node_dir}/tezos.key')]
_run_and_print(node_config)
def init_id(self):
node_identity = [self.node,
'identity',
'generate',
str(self.expected_pow),
'--data-dir', self.node_dir]
_run_and_print(node_identity)
if self.use_tls:
with open(f'{self.node_dir}/tezos.crt', 'w+') as file:
file.write(self.use_tls[0])
with open(f'{self.node_dir}/tezos.key', 'w+') as file:
file.write(self.use_tls[1])
def upgrade_storage(self):
node_upgrade = [self.node, 'upgrade', 'storage', '--data-dir',
self.node_dir]
_run_and_print(node_upgrade)
def snapshot_export(self, file, params=None):
if params is None:
params = []
params = ['--data-dir', self.node_dir] + params
snapshot_cmd = ([self.node, 'snapshot',
'export'] + list(params) + [file])
_run_and_print(snapshot_cmd)
def snapshot_import(self, file, params=None):
if params is None:
params = []
params = ['--data-dir', self.node_dir] + params
snapshot_cmd = ([self.node, 'snapshot',
'import'] + list(params) + [file])
_run_and_print(snapshot_cmd)
def reconstruct(self, params=None):
if params is None:
params = []
params = ['--data-dir', self.node_dir] + params
reconstruct_cmd = ([self.node, 'reconstruct'] + list(params))
_run_and_print(reconstruct_cmd)
def cleanup(self):
"""Remove node directory (only if generated by constructor)"""
if self._temp_dir:
shutil.rmtree(self.node_dir)
def terminate(self) -> None:
"""Send SIGTERM to node, do nothing if node hasn't been run yet"""
if self._process is not None:
# self._process.terminate()
self._process.send_signal(signal.SIGINT)
time.sleep(3)
def kill(self) -> None:
"""Send SIGKILL to node, do nothing if node hasn't been run yet"""
if self._process is not None:
self._process.kill()
def terminate_or_kill(self) -> None:
"""Try to terminate node gently (SIGTERM) and kill it (SIGKILL)
if the node is still running after TERM_TIMEOUT. Do nothing
if node hasn't been run yet.
"""
if self._process is None:
return
# self._process.terminate()
self._process.send_signal(signal.SIGINT)
try:
self._process.wait(timeout=TERM_TIMEOUT)
except subprocess.TimeoutExpired:
self._process.kill()
def poll(self):
assert self._process
return self._process.poll()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pkg/addons/addons_gcpauth.go | /*
Copyright 2021 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package addons
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"strconv"
"strings"
"time"
gcr_config "github.com/GoogleCloudPlatform/docker-credential-gcr/config"
"github.com/pkg/errors"
"golang.org/x/oauth2/google"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/detect"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/mustload"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/reason"
"k8s.io/minikube/pkg/minikube/service"
"k8s.io/minikube/pkg/minikube/style"
)
const (
credentialsPath = "/var/lib/minikube/google_application_credentials.json"
projectPath = "/var/lib/minikube/google_cloud_project"
secretName = "gcp-auth"
namespaceName = "gcp-auth"
)
// enableOrDisableGCPAuth enables or disables the gcp-auth addon depending on the val parameter
func enableOrDisableGCPAuth(cfg *config.ClusterConfig, name string, val string) error {
enable, err := strconv.ParseBool(val)
if err != nil {
return errors.Wrapf(err, "parsing bool: %s", name)
}
if enable {
return enableAddonGCPAuth(cfg)
}
return disableAddonGCPAuth(cfg)
}
func enableAddonGCPAuth(cfg *config.ClusterConfig) error {
// Grab command runner from running cluster
cc := mustload.Running(cfg.Name)
r := cc.CP.Runner
// Grab credentials from where GCP would normally look
ctx := context.Background()
creds, err := google.FindDefaultCredentials(ctx)
if err != nil {
if detect.IsCloudShell() {
if c := os.Getenv("CLOUDSDK_CONFIG"); c != "" {
f, err := ioutil.ReadFile(path.Join(c, "application_default_credentials.json"))
if err == nil {
creds, _ = google.CredentialsFromJSON(ctx, f)
}
}
} else {
exit.Message(reason.InternalCredsNotFound, "Could not find any GCP credentials. Either run `gcloud auth application-default login` or set the GOOGLE_APPLICATION_CREDENTIALS environment variable to the path of your credentials file.")
}
}
// Create a registry secret in every namespace we can find
// Always create the pull secret, no matter where we are
err = createPullSecret(cfg, creds)
if err != nil {
return errors.Wrap(err, "pull secret")
}
// If the env var is explicitly set, even in GCE, then defer to the user and continue
if !Force && detect.IsOnGCE() && os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") == "" {
out.WarningT("It seems that you are running in GCE, which means authentication should work without the GCP Auth addon. If you would still like to authenticate using a credentials file, use the --force flag.")
return nil
}
if creds.JSON == nil {
out.WarningT("You have authenticated with a service account that does not have an associated JSON. The GCP Auth requires credentials with a JSON file to in order to continue. The image pull secret has been imported.")
return nil
}
// Actually copy the creds over
f := assets.NewMemoryAssetTarget(creds.JSON, credentialsPath, "0444")
err = r.Copy(f)
if err != nil {
return err
}
// First check if the project env var is explicitly set
projectEnv := os.Getenv("GOOGLE_CLOUD_PROJECT")
if projectEnv != "" {
f := assets.NewMemoryAssetTarget([]byte(projectEnv), projectPath, "0444")
return r.Copy(f)
}
// We're currently assuming gcloud is installed and in the user's path
proj, err := exec.Command("gcloud", "config", "get-value", "project").Output()
if err == nil && len(proj) > 0 {
f := assets.NewMemoryAssetTarget(bytes.TrimSpace(proj), projectPath, "0444")
return r.Copy(f)
}
out.WarningT("Could not determine a Google Cloud project, which might be ok.")
out.Styled(style.Tip, `To set your Google Cloud project, run:
gcloud config set project <project name>
or set the GOOGLE_CLOUD_PROJECT environment variable.`)
// Copy an empty file in to avoid errors about missing files
emptyFile := assets.NewMemoryAssetTarget([]byte{}, projectPath, "0444")
return r.Copy(emptyFile)
}
func createPullSecret(cc *config.ClusterConfig, creds *google.Credentials) error {
if creds == nil {
return errors.New("no credentials, skipping creating pull secret")
}
token, err := creds.TokenSource.Token()
// Only try to add secret if Token was found
if err == nil {
client, err := service.K8s.GetCoreClient(cc.Name)
if err != nil {
return err
}
namespaces, err := client.Namespaces().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return err
}
dockercfg := ""
registries := append(gcr_config.DefaultGCRRegistries[:], gcr_config.DefaultARRegistries[:]...)
for _, reg := range registries {
dockercfg += fmt.Sprintf(`"https://%s":{"username":"oauth2accesstoken","password":"%s","email":"none"},`, reg, token.AccessToken)
}
dockercfg = strings.TrimSuffix(dockercfg, ",")
data := map[string][]byte{
".dockercfg": []byte(fmt.Sprintf(`{%s}`, dockercfg)),
}
for _, n := range namespaces.Items {
if skipNamespace(n.Name) {
continue
}
secrets := client.Secrets(n.Name)
exists := false
secList, err := secrets.List(context.TODO(), metav1.ListOptions{})
if err != nil {
return err
}
for _, s := range secList.Items {
if s.Name == secretName {
exists = true
break
}
}
if !exists || Refresh {
secretObj := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
},
Data: data,
Type: "kubernetes.io/dockercfg",
}
if exists && Refresh {
_, err := secrets.Update(context.TODO(), secretObj, metav1.UpdateOptions{})
if err != nil {
return err
}
} else {
_, err = secrets.Create(context.TODO(), secretObj, metav1.CreateOptions{})
if err != nil {
return err
}
}
}
// Now patch the secret into all the service accounts we can find
serviceaccounts := client.ServiceAccounts(n.Name)
salist, err := serviceaccounts.List(context.TODO(), metav1.ListOptions{})
if err != nil {
return err
}
// Let's make sure we at least find the default service account
for len(salist.Items) == 0 {
salist, err = serviceaccounts.List(context.TODO(), metav1.ListOptions{})
if err != nil {
return err
}
time.Sleep(1 * time.Second)
}
ips := corev1.LocalObjectReference{Name: secretName}
for _, sa := range salist.Items {
add := true
for _, ps := range sa.ImagePullSecrets {
if ps.Name == secretName {
add = false
break
}
}
if add {
sa.ImagePullSecrets = append(sa.ImagePullSecrets, ips)
_, err := serviceaccounts.Update(context.TODO(), &sa, metav1.UpdateOptions{})
if err != nil {
return err
}
}
}
}
}
return nil
}
func refreshExistingPods(cc *config.ClusterConfig) error {
client, err := service.K8s.GetCoreClient(cc.Name)
if err != nil {
return err
}
namespaces, err := client.Namespaces().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return err
}
for _, n := range namespaces.Items {
// Ignore kube-system and gcp-auth namespaces
if skipNamespace(n.Name) {
continue
}
pods := client.Pods(n.Name)
podList, err := pods.List(context.TODO(), metav1.ListOptions{})
if err != nil {
return err
}
for _, p := range podList.Items {
// Skip pods we're explicitly told to skip
if _, ok := p.Labels["gcp-auth-skip-secret"]; ok {
continue
}
// Recreating the pod should pickup the necessary changes
err := pods.Delete(context.TODO(), p.Name, metav1.DeleteOptions{})
if err != nil {
return err
}
p.ResourceVersion = ""
_, err = pods.Get(context.TODO(), p.Name, metav1.GetOptions{})
for err == nil {
time.Sleep(time.Second)
_, err = pods.Get(context.TODO(), p.Name, metav1.GetOptions{})
}
_, err = pods.Create(context.TODO(), &p, metav1.CreateOptions{})
if err != nil {
return err
}
}
}
return nil
}
func disableAddonGCPAuth(cfg *config.ClusterConfig) error {
// Grab command runner from running cluster
cc := mustload.Running(cfg.Name)
r := cc.CP.Runner
// Clean up the files generated when enabling the addon
creds := assets.NewMemoryAssetTarget([]byte{}, credentialsPath, "0444")
err := r.Remove(creds)
if err != nil {
return err
}
project := assets.NewMemoryAssetTarget([]byte{}, projectPath, "0444")
err = r.Remove(project)
if err != nil {
return err
}
client, err := service.K8s.GetCoreClient(cfg.Name)
if err != nil {
return err
}
namespaces, err := client.Namespaces().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return err
}
// No need to check for an error here, if the secret doesn't exist, no harm done.
for _, n := range namespaces.Items {
if skipNamespace(n.Name) {
continue
}
secrets := client.Secrets(n.Name)
err := secrets.Delete(context.TODO(), secretName, metav1.DeleteOptions{})
if err != nil {
klog.Infof("error deleting secret: %v", err)
}
serviceaccounts := client.ServiceAccounts(n.Name)
salist, err := serviceaccounts.List(context.TODO(), metav1.ListOptions{})
if err != nil {
klog.Infof("error getting service accounts: %v", err)
return err
}
for _, sa := range salist.Items {
for i, ps := range sa.ImagePullSecrets {
if ps.Name == secretName {
sa.ImagePullSecrets = append(sa.ImagePullSecrets[:i], sa.ImagePullSecrets[i+1:]...)
_, err := serviceaccounts.Update(context.TODO(), &sa, metav1.UpdateOptions{})
if err != nil {
return err
}
break
}
}
}
}
return nil
}
func verifyGCPAuthAddon(cc *config.ClusterConfig, name string, val string) error {
enable, err := strconv.ParseBool(val)
if err != nil {
return errors.Wrapf(err, "parsing bool: %s", name)
}
// If we're in GCE and didn't actually start the gcp-auth pods, don't check for them.
// We also don't want to actually set the addon as enabled, so just exit completely.
if enable && !Force && detect.IsOnGCE() && os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") == "" {
return ErrSkipThisAddon
}
err = verifyAddonStatusInternal(cc, name, val, "gcp-auth")
if err != nil {
return err
}
if Refresh {
err = refreshExistingPods(cc)
if err != nil {
return err
}
}
if enable && err == nil {
out.Styled(style.Notice, "Your GCP credentials will now be mounted into every pod created in the {{.name}} cluster.", out.V{"name": cc.Name})
out.Styled(style.Notice, "If you don't want your credentials mounted into a specific pod, add a label with the `gcp-auth-skip-secret` key to your pod configuration.")
if !Refresh {
out.Styled(style.Notice, "If you want existing pods to be mounted with credentials, either recreate them or rerun addons enable with --refresh.")
}
}
return err
}
func skipNamespace(name string) bool {
return name == metav1.NamespaceSystem || name == namespaceName
}
| [
"\"CLOUDSDK_CONFIG\"",
"\"GOOGLE_APPLICATION_CREDENTIALS\"",
"\"GOOGLE_CLOUD_PROJECT\"",
"\"GOOGLE_APPLICATION_CREDENTIALS\""
]
| []
| [
"GOOGLE_CLOUD_PROJECT",
"CLOUDSDK_CONFIG",
"GOOGLE_APPLICATION_CREDENTIALS"
]
| [] | ["GOOGLE_CLOUD_PROJECT", "CLOUDSDK_CONFIG", "GOOGLE_APPLICATION_CREDENTIALS"] | go | 3 | 0 | |
cmd/dagger/cmd/version.go | package cmd
import (
"fmt"
"io/ioutil"
"net/http"
"os"
"path"
"runtime"
"strings"
"time"
goVersion "github.com/hashicorp/go-version"
"github.com/mitchellh/go-homedir"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"go.dagger.io/dagger/version"
"golang.org/x/term"
)
const (
versionFile = "~/.config/dagger/version-check"
versionURL = "https://releases.dagger.io/dagger/latest_version"
)
var (
versionMessage = ""
)
var versionCmd = &cobra.Command{
Use: "version",
Short: "Print dagger version",
// Disable version hook here to avoid double version check
PersistentPreRun: func(*cobra.Command, []string) {},
PersistentPostRun: func(*cobra.Command, []string) {},
Args: cobra.NoArgs,
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("dagger %s (%s) %s/%s\n",
version.Version,
version.Revision,
runtime.GOOS, runtime.GOARCH,
)
if check := viper.GetBool("check"); check {
versionFilePath, err := homedir.Expand(versionFile)
if err != nil {
panic(err)
}
_ = os.Remove(versionFilePath)
checkVersion()
if !warnVersion() {
fmt.Println("dagger is up to date.")
}
}
},
}
func init() {
versionCmd.Flags().Bool("check", false, "check if dagger is up to date")
if err := viper.BindPFlags(versionCmd.Flags()); err != nil {
panic(err)
}
}
func isCheckOutdated(path string) bool {
// Ignore if not in terminal
if !term.IsTerminal(int(os.Stdout.Fd())) || !term.IsTerminal(int(os.Stderr.Fd())) {
return false
}
// Ignore if CI
if os.Getenv("CI") != "" || os.Getenv("BUILD_NUMBER") != "" || os.Getenv("RUN_ID") != "" {
return false
}
data, err := ioutil.ReadFile(path)
if err != nil {
return true
}
lastCheck, err := time.Parse(time.RFC3339, string(data))
if err != nil {
return true
}
nextCheck := lastCheck.Add(24 * time.Hour)
return !time.Now().Before(nextCheck)
}
func getLatestVersion(currentVersion *goVersion.Version) (*goVersion.Version, error) {
req, err := http.NewRequest("GET", versionURL, nil)
if err != nil {
return nil, err
}
// dagger/<version> (<OS>; <ARCH>)
agent := fmt.Sprintf("dagger/%s (%s; %s)", currentVersion.String(), runtime.GOOS, runtime.GOARCH)
req.Header.Set("User-Agent", agent)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
latestVersion := strings.TrimSuffix(string(data), "\n")
return goVersion.NewVersion(latestVersion)
}
// Compare the binary version with the latest version online
// Return the latest version if current is outdated
func isVersionLatest() (string, error) {
currentVersion, err := goVersion.NewVersion(version.Version)
if err != nil {
return "", err
}
latestVersion, err := getLatestVersion(currentVersion)
if err != nil {
return "", err
}
if currentVersion.LessThan(latestVersion) {
return latestVersion.String(), nil
}
return "", nil
}
func checkVersion() {
if version.Version == version.DevelopmentVersion {
// running devel version
return
}
versionFilePath, err := homedir.Expand(versionFile)
if err != nil {
panic(err)
}
baseDir := path.Dir(versionFilePath)
if _, err := os.Stat(baseDir); os.IsNotExist(err) {
if err := os.MkdirAll(baseDir, 0700); err != nil {
// mkdir fails, ignore silently
return
}
}
if !isCheckOutdated(versionFilePath) {
return
}
// Check timestamp
latestVersion, err := isVersionLatest()
if err != nil {
return
}
if latestVersion != "" {
versionMessage = fmt.Sprintf("\nA new version is available (%s), please go to https://github.com/dagger/dagger/doc/install.md for instructions.", latestVersion)
}
// Update check timestamps file
now := time.Now().Format(time.RFC3339)
ioutil.WriteFile(path.Join(versionFilePath), []byte(now), 0600)
}
func warnVersion() bool {
if versionMessage == "" {
return false
}
if binPath, err := os.Executable(); err == nil {
if p, err := os.Readlink(binPath); err == nil {
// Homebrew detected, print custom message
if strings.Contains(p, "/Cellar/") {
fmt.Println("\nA new version is available, please run:\n\nbrew update && brew upgrade dagger")
return true
}
}
}
// Print default message
fmt.Println(versionMessage)
return true
}
| [
"\"CI\"",
"\"BUILD_NUMBER\"",
"\"RUN_ID\""
]
| []
| [
"BUILD_NUMBER",
"CI",
"RUN_ID"
]
| [] | ["BUILD_NUMBER", "CI", "RUN_ID"] | go | 3 | 0 | |
simpleRestApi/authAPI/main.go | package main
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"github.com/gorilla/mux"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
)
var user *http.Request
var (
googleOauthConfig *oauth2.Config
// TODO: randomize it
oauthStateString = "pseudo-random"
)
func init() {
//seting own
os.Setenv("GOOGLE_CLIENT_ID", "754201666766-ejo5bfvvkl0368vp21g28fa4mofrndam.apps.googleusercontent.com")
os.Setenv("GOOGLE_CLIENT_SECRET", "hYEkQVeK-THED8Y1mA_3oTYg")
googleOauthConfig = &oauth2.Config{
RedirectURL: "http://localhost:8080/callback",
ClientID: os.Getenv("GOOGLE_CLIENT_ID"),
ClientSecret: os.Getenv("GOOGLE_CLIENT_SECRET"),
Scopes: []string{"https://www.googleapis.com/auth/userinfo.email"},
Endpoint: google.Endpoint,
}
}
func main() {
myRouter := mux.NewRouter().StrictSlash(true)
myRouter.HandleFunc("/", handleMain)
myRouter.HandleFunc("/login", handleGoogleLogin)
myRouter.HandleFunc("/callback", handleGoogleCallback)
myRouter.HandleFunc("/usr", handleUserInformation)
log.Fatal(http.ListenAndServe(":8080", myRouter))
}
func handleUserInformation(w http.ResponseWriter, r *http.Request) {
content, _ := getUserInfo(user.FormValue("state"), user.FormValue("code"))
fmt.Fprintf(w, "Content: %s\n", content)
}
func handleMain(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/login", http.StatusTemporaryRedirect)
}
func handleGoogleLogin(w http.ResponseWriter, r *http.Request) {
url := googleOauthConfig.AuthCodeURL(oauthStateString)
fmt.Print(http.StatusTemporaryRedirect)
http.Redirect(w, r, url, http.StatusTemporaryRedirect)
}
func handleGoogleCallback(w http.ResponseWriter, r *http.Request) {
content, err := getUserInfo(r.FormValue("state"), r.FormValue("code"))
if err != nil {
fmt.Println(err.Error())
http.Redirect(w, r, "/", http.StatusTemporaryRedirect)
return
}
// setting request static
user = r
fmt.Fprintf(w, "Content: %s\n", content)
}
func getUserInfo(state string, code string) ([]byte, error) {
if state != oauthStateString {
return nil, fmt.Errorf("invalid oauth state")
}
token, err := googleOauthConfig.Exchange(oauth2.NoContext, code)
if err != nil {
return nil, fmt.Errorf("code exchange failed: %s", err.Error())
}
response, err := http.Get("https://www.googleapis.com/oauth2/v2/userinfo?access_token=" + token.AccessToken)
if err != nil {
return nil, fmt.Errorf("failed getting user info: %s", err.Error())
}
defer response.Body.Close()
contents, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, fmt.Errorf("failed reading response body: %s", err.Error())
}
return contents, nil
}
| [
"\"GOOGLE_CLIENT_ID\"",
"\"GOOGLE_CLIENT_SECRET\""
]
| []
| [
"GOOGLE_CLIENT_ID",
"GOOGLE_CLIENT_SECRET"
]
| [] | ["GOOGLE_CLIENT_ID", "GOOGLE_CLIENT_SECRET"] | go | 2 | 0 | |
providers/dns/dns_providers_test.go | package dns
import (
"os"
"reflect"
"testing"
"github.com/stretchr/testify/assert"
"github.com/nicky-dev/lego/providers/dns/exoscale"
)
var (
apiKey string
apiSecret string
)
func init() {
apiSecret = os.Getenv("EXOSCALE_API_SECRET")
apiKey = os.Getenv("EXOSCALE_API_KEY")
}
func restoreExoscaleEnv() {
os.Setenv("EXOSCALE_API_KEY", apiKey)
os.Setenv("EXOSCALE_API_SECRET", apiSecret)
}
func TestKnownDNSProviderSuccess(t *testing.T) {
os.Setenv("EXOSCALE_API_KEY", "abc")
os.Setenv("EXOSCALE_API_SECRET", "123")
provider, err := NewDNSChallengeProviderByName("exoscale")
assert.NoError(t, err)
assert.NotNil(t, provider)
if reflect.TypeOf(provider) != reflect.TypeOf(&exoscale.DNSProvider{}) {
t.Errorf("Not loaded correct DNS proviver: %v is not *exoscale.DNSProvider", reflect.TypeOf(provider))
}
restoreExoscaleEnv()
}
func TestKnownDNSProviderError(t *testing.T) {
os.Setenv("EXOSCALE_API_KEY", "")
os.Setenv("EXOSCALE_API_SECRET", "")
_, err := NewDNSChallengeProviderByName("exoscale")
assert.Error(t, err)
restoreExoscaleEnv()
}
func TestUnknownDNSProvider(t *testing.T) {
_, err := NewDNSChallengeProviderByName("foobar")
assert.Error(t, err)
}
| [
"\"EXOSCALE_API_SECRET\"",
"\"EXOSCALE_API_KEY\""
]
| []
| [
"EXOSCALE_API_SECRET",
"EXOSCALE_API_KEY"
]
| [] | ["EXOSCALE_API_SECRET", "EXOSCALE_API_KEY"] | go | 2 | 0 | |
examples/two-tier/main.go | package main
import (
"fmt"
"os"
"time"
"github.com/ucloud/ucloud-sdk-go/ucloud"
"github.com/ucloud/ucloud-sdk-go/ucloud/auth"
"github.com/ucloud/ucloud-sdk-go/ucloud/log"
"github.com/ucloud/ucloud-sdk-go/services/uhost"
"github.com/ucloud/ucloud-sdk-go/services/ulb"
"github.com/ucloud/ucloud-sdk-go/services/unet"
)
const region = "cn-bj2"
const zone = "cn-bj2-05"
var uhostClient *uhost.UHostClient
var unetClient *unet.UNetClient
var ulbClient *ulb.ULBClient
func init() {
cfg := ucloud.NewConfig()
cfg.LogLevel = log.DebugLevel
cfg.Region = region
cfg.ProjectId = os.Getenv("UCLOUD_PROJECT_ID")
credential := auth.NewCredential()
credential.PrivateKey = os.Getenv("UCLOUD_PRIVATE_KEY")
credential.PublicKey = os.Getenv("UCLOUD_PUBLIC_KEY")
uhostClient = uhost.NewClient(&cfg, &credential)
unetClient = unet.NewClient(&cfg, &credential)
ulbClient = ulb.NewClient(&cfg, &credential)
log.Info("setup clients ...")
}
func main() {
imageId, err := describeRandomImageId()
if err != nil {
panic(err)
}
uhostIDs, errs := createUHostBatch(imageId, 2)
if len(errs) > 0 {
log.Error(errs)
return
}
// teardown
defer deleteUHostBatch(uhostIDs)
ulbID, err := createULB()
if err != nil {
log.Error(err)
return
}
// teardown
defer deleteULB(ulbID)
vserverID, err := createVServer(ulbID)
if err != nil {
log.Error(err)
return
}
// teardown
defer deleteVServer(ulbID, vserverID)
backendIDs, errs := allocateBackendBatch(ulbID, vserverID, uhostIDs)
if len(errs) > 0 {
log.Error(errs)
return
}
// teardown
defer releaseBackendBatch(ulbID, vserverID, backendIDs)
}
func describeRandomImageId() (string, error) {
req := uhostClient.NewDescribeImageRequest()
req.ImageType = ucloud.String("Base")
req.OsType = ucloud.String("Linux")
resp, err := uhostClient.DescribeImage(req)
if err != nil {
return "", err
}
if len(resp.ImageSet) == 0 {
return "", fmt.Errorf("can not found any image")
}
return resp.ImageSet[0].ImageId, nil
}
func createULB() (string, error) {
req := ulbClient.NewCreateULBRequest()
req.Tag = ucloud.String("sdk-example")
resp, err := ulbClient.CreateULB(req)
if err != nil {
return "", err
}
// wait for async action is completed
time.Sleep(5 * time.Second)
return resp.ULBId, nil
}
func deleteULB(ulbID string) error {
req := ulbClient.NewDeleteULBRequest()
req.ULBId = ucloud.String(ulbID)
_, err := ulbClient.DeleteULB(req)
if err != nil {
return err
}
return nil
}
func createVServer(id string) (string, error) {
req := ulbClient.NewCreateVServerRequest()
req.ULBId = ucloud.String(id)
// req.Method = ucloud.String("ConsistentHash")
resp, err := ulbClient.CreateVServer(req)
if err != nil {
return "", err
}
// wait for async action is completed
time.Sleep(3 * time.Second)
return resp.VServerId, nil
}
func deleteVServer(ulbID, vserverID string) error {
req := ulbClient.NewDeleteVServerRequest()
req.ULBId = ucloud.String(ulbID)
req.VServerId = ucloud.String(vserverID)
_, err := ulbClient.DeleteVServer(req)
if err != nil {
return err
}
return nil
}
func allocateBackendBatch(ulbID, vserverID string, uhostIDs []string) (ids []string, errors []error) {
for _, uhostID := range uhostIDs {
id, err := allocateBackend(ulbID, vserverID, uhostID)
if err != nil {
errors = append(errors, err)
} else {
ids = append(ids, id)
}
}
return
}
func allocateBackend(ulbID, vserverID, uhostID string) (string, error) {
req := ulbClient.NewAllocateBackendRequest()
req.ULBId = ucloud.String(ulbID)
req.VServerId = ucloud.String(vserverID)
req.ResourceType = ucloud.String("UHost")
req.ResourceId = ucloud.String(uhostID)
req.Port = ucloud.Int(80)
resp, err := ulbClient.AllocateBackend(req)
if err != nil {
return "", err
}
return resp.BackendId, nil
}
func releaseBackendBatch(ulbID, vserverID string, backendIDs []string) (errors []error) {
for _, backendID := range backendIDs {
err := releaseBackend(ulbID, backendID)
if err != nil {
errors = append(errors, err)
}
}
return errors
}
func releaseBackend(ulbID, backendID string) error {
req := ulbClient.NewReleaseBackendRequest()
req.ULBId = ucloud.String(ulbID)
req.BackendId = ucloud.String(backendID)
_, err := ulbClient.ReleaseBackend(req)
if err != nil {
return err
}
return nil
}
func createUHostBatch(imageId string, count int) (ids []string, errors []error) {
for i := 0; i < count; i++ {
id, err := createUHost(fmt.Sprintf("sdk-example-%d", i), imageId)
if err != nil {
errors = append(errors, err)
} else {
ids = append(ids, id)
}
}
// wait all uhost instance is running
if len(ids) > 0 {
if err := waitForState(ids, uhost.StateRunning); err != nil {
errors = append(errors, err)
}
}
return
}
func createUHost(name, imageId string) (string, error) {
req := uhostClient.NewCreateUHostInstanceRequest()
req.Name = ucloud.String(name)
req.Zone = ucloud.String(zone) // TODO: use random zone
req.ImageId = ucloud.String(imageId) // TODO: use random image
req.LoginMode = ucloud.String("Password")
req.Password = ucloud.String("somePassword_")
req.ChargeType = ucloud.String("Dynamic")
req.CPU = ucloud.Int(1)
req.Memory = ucloud.Int(1024)
req.Tag = ucloud.String("sdk-example")
resp, err := uhostClient.CreateUHostInstance(req)
if err != nil {
return "", err
}
return resp.UHostIds[0], nil
}
func waitForState(ids []string, state uhost.State) error {
wait := uhostClient.NewWaitUntilUHostInstanceStateRequest()
wait.Interval = ucloud.TimeDuration(time.Second * 10)
wait.MaxAttempts = ucloud.Int(10)
wait.State = state
wait.IgnoreError = ucloud.Bool(true)
desc := uhostClient.NewDescribeUHostInstanceRequest()
desc.UHostIds = ids
wait.DescribeRequest = desc
err := uhostClient.WaitUntilUHostInstanceState(wait)
if err != nil {
return err
}
return nil
}
func deleteUHostBatch(ids []string) (errors []error) {
for _, id := range ids {
err := stopUHost(id)
if err != nil {
errors = append(errors, err)
}
}
if err := waitForState(ids, uhost.StateStopped); err != nil {
errors = append(errors, err)
}
for _, id := range ids {
err := deleteUHost(id)
if err != nil {
errors = append(errors, err)
}
}
return
}
func stopUHost(id string) error {
stop := uhostClient.NewStopUHostInstanceRequest()
stop.UHostId = ucloud.String(id)
stop.WithRetry(2)
_, err := uhostClient.StopUHostInstance(stop)
if err != nil {
return err
}
return nil
}
func deleteUHost(id string) error {
req := uhostClient.NewTerminateUHostInstanceRequest()
req.UHostId = ucloud.String(id)
_, err := uhostClient.TerminateUHostInstance(req)
if err != nil {
return err
}
return nil
}
| [
"\"UCLOUD_PROJECT_ID\"",
"\"UCLOUD_PRIVATE_KEY\"",
"\"UCLOUD_PUBLIC_KEY\""
]
| []
| [
"UCLOUD_PRIVATE_KEY",
"UCLOUD_PROJECT_ID",
"UCLOUD_PUBLIC_KEY"
]
| [] | ["UCLOUD_PRIVATE_KEY", "UCLOUD_PROJECT_ID", "UCLOUD_PUBLIC_KEY"] | go | 3 | 0 | |
internal/ingress/controller/template/template.go | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package template
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"math/rand"
"net"
"net/url"
"os"
"os/exec"
"reflect"
"regexp"
"sort"
"strings"
text_template "text/template"
"time"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/ingress-nginx/internal/file"
"k8s.io/ingress-nginx/internal/ingress"
"k8s.io/ingress-nginx/internal/ingress/annotations/influxdb"
"k8s.io/ingress-nginx/internal/ingress/annotations/ratelimit"
"k8s.io/ingress-nginx/internal/ingress/controller/config"
ing_net "k8s.io/ingress-nginx/internal/net"
"k8s.io/klog"
)
const (
slash = "/"
nonIdempotent = "non_idempotent"
defBufferSize = 65535
)
// TemplateWriter is the interface to render a template
type TemplateWriter interface {
Write(conf config.TemplateConfig) ([]byte, error)
}
// Template ...
type Template struct {
tmpl *text_template.Template
//fw watch.FileWatcher
bp *BufferPool
}
//NewTemplate returns a new Template instance or an
//error if the specified template file contains errors
func NewTemplate(file string, fs file.Filesystem) (*Template, error) {
data, err := fs.ReadFile(file)
if err != nil {
return nil, errors.Wrapf(err, "unexpected error reading template %v", file)
}
tmpl, err := text_template.New("nginx.tmpl").Funcs(funcMap).Parse(string(data))
if err != nil {
return nil, err
}
return &Template{
tmpl: tmpl,
bp: NewBufferPool(defBufferSize),
}, nil
}
// Write populates a buffer using a template with NGINX configuration
// and the servers and upstreams created by Ingress rules
func (t *Template) Write(conf config.TemplateConfig) ([]byte, error) {
tmplBuf := t.bp.Get()
defer t.bp.Put(tmplBuf)
outCmdBuf := t.bp.Get()
defer t.bp.Put(outCmdBuf)
if klog.V(3) {
b, err := json.Marshal(conf)
if err != nil {
klog.Errorf("unexpected error: %v", err)
}
klog.Infof("NGINX configuration: %v", string(b))
}
err := t.tmpl.Execute(tmplBuf, conf)
if err != nil {
return nil, err
}
// squeezes multiple adjacent empty lines to be single
// spaced this is to avoid the use of regular expressions
cmd := exec.Command("/ingress-controller/clean-nginx-conf.sh")
cmd.Stdin = tmplBuf
cmd.Stdout = outCmdBuf
if err := cmd.Run(); err != nil {
klog.Warningf("unexpected error cleaning template: %v", err)
return tmplBuf.Bytes(), nil
}
return outCmdBuf.Bytes(), nil
}
var (
funcMap = text_template.FuncMap{
"empty": func(input interface{}) bool {
check, ok := input.(string)
if ok {
return len(check) == 0
}
return true
},
"escapeLiteralDollar": escapeLiteralDollar,
"shouldConfigureLuaRestyWAF": shouldConfigureLuaRestyWAF,
"buildLuaSharedDictionaries": buildLuaSharedDictionaries,
"buildLocation": buildLocation,
"buildAuthLocation": buildAuthLocation,
"buildAuthResponseHeaders": buildAuthResponseHeaders,
"buildProxyPass": buildProxyPass,
"filterRateLimits": filterRateLimits,
"buildRateLimitZones": buildRateLimitZones,
"buildRateLimit": buildRateLimit,
"buildResolversForLua": buildResolversForLua,
"configForLua": configForLua,
"locationConfigForLua": locationConfigForLua,
"buildResolvers": buildResolvers,
"buildUpstreamName": buildUpstreamName,
"isLocationInLocationList": isLocationInLocationList,
"isLocationAllowed": isLocationAllowed,
"buildLogFormatUpstream": buildLogFormatUpstream,
"buildDenyVariable": buildDenyVariable,
"getenv": os.Getenv,
"contains": strings.Contains,
"hasPrefix": strings.HasPrefix,
"hasSuffix": strings.HasSuffix,
"trimSpace": strings.TrimSpace,
"toUpper": strings.ToUpper,
"toLower": strings.ToLower,
"formatIP": formatIP,
"buildNextUpstream": buildNextUpstream,
"getIngressInformation": getIngressInformation,
"serverConfig": func(all config.TemplateConfig, server *ingress.Server) interface{} {
return struct{ First, Second interface{} }{all, server}
},
"isValidByteSize": isValidByteSize,
"buildForwardedFor": buildForwardedFor,
"buildAuthSignURL": buildAuthSignURL,
"buildOpentracing": buildOpentracing,
"proxySetHeader": proxySetHeader,
"buildInfluxDB": buildInfluxDB,
"enforceRegexModifier": enforceRegexModifier,
"stripLocationModifer": stripLocationModifer,
"buildCustomErrorDeps": buildCustomErrorDeps,
"opentracingPropagateContext": opentracingPropagateContext,
"buildCustomErrorLocationsPerServer": buildCustomErrorLocationsPerServer,
}
)
// escapeLiteralDollar will replace the $ character with ${literal_dollar}
// which is made to work via the following configuration in the http section of
// the template:
// geo $literal_dollar {
// default "$";
// }
func escapeLiteralDollar(input interface{}) string {
inputStr, ok := input.(string)
if !ok {
return ""
}
return strings.Replace(inputStr, `$`, `${literal_dollar}`, -1)
}
// formatIP will wrap IPv6 addresses in [] and return IPv4 addresses
// without modification. If the input cannot be parsed as an IP address
// it is returned without modification.
func formatIP(input string) string {
ip := net.ParseIP(input)
if ip == nil {
return input
}
if v4 := ip.To4(); v4 != nil {
return input
}
return fmt.Sprintf("[%s]", input)
}
func shouldConfigureLuaRestyWAF(disableLuaRestyWAF bool, mode string) bool {
if !disableLuaRestyWAF && len(mode) > 0 {
return true
}
return false
}
func buildLuaSharedDictionaries(s interface{}, disableLuaRestyWAF bool) string {
servers, ok := s.([]*ingress.Server)
if !ok {
klog.Errorf("expected an '[]*ingress.Server' type but %T was returned", s)
return ""
}
out := []string{
"lua_shared_dict configuration_data 5M",
"lua_shared_dict certificate_data 16M",
}
if !disableLuaRestyWAF {
luaRestyWAFEnabled := func() bool {
for _, server := range servers {
for _, location := range server.Locations {
if len(location.LuaRestyWAF.Mode) > 0 {
return true
}
}
}
return false
}()
if luaRestyWAFEnabled {
out = append(out, "lua_shared_dict waf_storage 64M")
}
}
return strings.Join(out, ";\n\r") + ";"
}
func buildResolversForLua(res interface{}, disableIpv6 interface{}) string {
nss, ok := res.([]net.IP)
if !ok {
klog.Errorf("expected a '[]net.IP' type but %T was returned", res)
return ""
}
no6, ok := disableIpv6.(bool)
if !ok {
klog.Errorf("expected a 'bool' type but %T was returned", disableIpv6)
return ""
}
if len(nss) == 0 {
return ""
}
r := []string{}
for _, ns := range nss {
if ing_net.IsIPV6(ns) {
if no6 {
continue
}
r = append(r, fmt.Sprintf("\"[%v]\"", ns))
} else {
r = append(r, fmt.Sprintf("\"%v\"", ns))
}
}
return strings.Join(r, ", ")
}
// configForLua returns some general configuration as Lua table represented as string
func configForLua(input interface{}) string {
all, ok := input.(config.TemplateConfig)
if !ok {
klog.Errorf("expected a 'config.TemplateConfig' type but %T was given", input)
return "{}"
}
return fmt.Sprintf(`{
use_forwarded_headers = %t,
is_ssl_passthrough_enabled = %t,
http_redirect_code = %v,
listen_ports = { ssl_proxy = "%v", https = "%v" },
}`, all.Cfg.UseForwardedHeaders, all.IsSSLPassthroughEnabled, all.Cfg.HTTPRedirectCode, all.ListenPorts.SSLProxy, all.ListenPorts.HTTPS)
}
// locationConfigForLua formats some location specific configuration into Lua table represented as string
func locationConfigForLua(l interface{}, s interface{}, a interface{}) string {
location, ok := l.(*ingress.Location)
if !ok {
klog.Errorf("expected an '*ingress.Location' type but %T was given", l)
return "{}"
}
server, ok := s.(*ingress.Server)
if !ok {
klog.Errorf("expected an '*ingress.Server' type but %T was given", s)
return "{}"
}
all, ok := a.(config.TemplateConfig)
if !ok {
klog.Errorf("expected a 'config.TemplateConfig' type but %T was given", a)
return "{}"
}
forceSSLRedirect := location.Rewrite.ForceSSLRedirect || (len(server.SSLCert.PemFileName) > 0 && location.Rewrite.SSLRedirect)
forceSSLRedirect = forceSSLRedirect && !isLocationInLocationList(l, all.Cfg.NoTLSRedirectLocations)
return fmt.Sprintf(`{
force_ssl_redirect = %t,
use_port_in_redirects = %t,
}`, forceSSLRedirect, location.UsePortInRedirects)
}
// buildResolvers returns the resolvers reading the /etc/resolv.conf file
func buildResolvers(res interface{}, disableIpv6 interface{}) string {
// NGINX need IPV6 addresses to be surrounded by brackets
nss, ok := res.([]net.IP)
if !ok {
klog.Errorf("expected a '[]net.IP' type but %T was returned", res)
return ""
}
no6, ok := disableIpv6.(bool)
if !ok {
klog.Errorf("expected a 'bool' type but %T was returned", disableIpv6)
return ""
}
if len(nss) == 0 {
return ""
}
r := []string{"resolver"}
for _, ns := range nss {
if ing_net.IsIPV6(ns) {
if no6 {
continue
}
r = append(r, fmt.Sprintf("[%v]", ns))
} else {
r = append(r, fmt.Sprintf("%v", ns))
}
}
r = append(r, "valid=30s")
if no6 {
r = append(r, "ipv6=off")
}
return strings.Join(r, " ") + ";"
}
func needsRewrite(location *ingress.Location) bool {
if len(location.Rewrite.Target) > 0 && location.Rewrite.Target != location.Path {
return true
}
return false
}
func stripLocationModifer(path string) string {
return strings.TrimLeft(path, "~* ")
}
// enforceRegexModifier checks if the "rewrite-target" or "use-regex" annotation
// is used on any location path within a server
func enforceRegexModifier(input interface{}) bool {
locations, ok := input.([]*ingress.Location)
if !ok {
klog.Errorf("expected an '[]*ingress.Location' type but %T was returned", input)
return false
}
for _, location := range locations {
if needsRewrite(location) || location.Rewrite.UseRegex {
return true
}
}
return false
}
// buildLocation produces the location string, if the ingress has redirects
// (specified through the nginx.ingress.kubernetes.io/rewrite-target annotation)
func buildLocation(input interface{}, enforceRegex bool) string {
location, ok := input.(*ingress.Location)
if !ok {
klog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return slash
}
path := location.Path
if enforceRegex {
return fmt.Sprintf(`~* "^%s"`, path)
}
return path
}
func buildAuthLocation(input interface{}) string {
location, ok := input.(*ingress.Location)
if !ok {
klog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return ""
}
if location.ExternalAuth.URL == "" {
return ""
}
str := base64.URLEncoding.EncodeToString([]byte(location.Path))
// removes "=" after encoding
str = strings.Replace(str, "=", "", -1)
return fmt.Sprintf("/_external-auth-%v", str)
}
func buildAuthResponseHeaders(input interface{}) []string {
location, ok := input.(*ingress.Location)
res := []string{}
if !ok {
klog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return res
}
if len(location.ExternalAuth.ResponseHeaders) == 0 {
return res
}
for i, h := range location.ExternalAuth.ResponseHeaders {
hvar := strings.ToLower(h)
hvar = strings.NewReplacer("-", "_").Replace(hvar)
res = append(res, fmt.Sprintf("auth_request_set $authHeader%v $upstream_http_%v;", i, hvar))
res = append(res, fmt.Sprintf("proxy_set_header '%v' $authHeader%v;", h, i))
}
return res
}
func buildLogFormatUpstream(input interface{}) string {
cfg, ok := input.(config.Configuration)
if !ok {
klog.Errorf("expected a 'config.Configuration' type but %T was returned", input)
return ""
}
return cfg.BuildLogFormatUpstream()
}
// buildProxyPass produces the proxy pass string, if the ingress has redirects
// (specified through the nginx.ingress.kubernetes.io/rewrite-target annotation)
// If the annotation nginx.ingress.kubernetes.io/add-base-url:"true" is specified it will
// add a base tag in the head of the response from the service
func buildProxyPass(host string, b interface{}, loc interface{}) string {
backends, ok := b.([]*ingress.Backend)
if !ok {
klog.Errorf("expected an '[]*ingress.Backend' type but %T was returned", b)
return ""
}
location, ok := loc.(*ingress.Location)
if !ok {
klog.Errorf("expected a '*ingress.Location' type but %T was returned", loc)
return ""
}
path := location.Path
proto := "http://"
proxyPass := "proxy_pass"
switch location.BackendProtocol {
case "HTTPS":
proto = "https://"
case "GRPC":
proto = "grpc://"
proxyPass = "grpc_pass"
case "GRPCS":
proto = "grpcs://"
proxyPass = "grpc_pass"
case "AJP":
proto = ""
proxyPass = "ajp_pass"
}
upstreamName := "upstream_balancer"
for _, backend := range backends {
if backend.Name == location.Backend {
if backend.SSLPassthrough {
proto = "https://"
if location.BackendProtocol == "GRPCS" {
proto = "grpcs://"
}
}
break
}
}
// defProxyPass returns the default proxy_pass, just the name of the upstream
defProxyPass := fmt.Sprintf("%v %s%s;", proxyPass, proto, upstreamName)
// if the path in the ingress rule is equals to the target: no special rewrite
if path == location.Rewrite.Target {
return defProxyPass
}
if len(location.Rewrite.Target) > 0 {
var xForwardedPrefix string
if len(location.XForwardedPrefix) > 0 {
xForwardedPrefix = fmt.Sprintf("proxy_set_header X-Forwarded-Prefix \"%s\";\n", location.XForwardedPrefix)
}
return fmt.Sprintf(`
rewrite "(?i)%s" %s break;
%v%v %s%s;`, path, location.Rewrite.Target, xForwardedPrefix, proxyPass, proto, upstreamName)
}
// default proxy_pass
return defProxyPass
}
// TODO: Needs Unit Tests
func filterRateLimits(input interface{}) []ratelimit.Config {
ratelimits := []ratelimit.Config{}
found := sets.String{}
servers, ok := input.([]*ingress.Server)
if !ok {
klog.Errorf("expected a '[]ratelimit.RateLimit' type but %T was returned", input)
return ratelimits
}
for _, server := range servers {
for _, loc := range server.Locations {
if loc.RateLimit.ID != "" && !found.Has(loc.RateLimit.ID) {
found.Insert(loc.RateLimit.ID)
ratelimits = append(ratelimits, loc.RateLimit)
}
}
}
return ratelimits
}
// TODO: Needs Unit Tests
// buildRateLimitZones produces an array of limit_conn_zone in order to allow
// rate limiting of request. Each Ingress rule could have up to three zones, one
// for connection limit by IP address, one for limiting requests per minute, and
// one for limiting requests per second.
func buildRateLimitZones(input interface{}) []string {
zones := sets.String{}
servers, ok := input.([]*ingress.Server)
if !ok {
klog.Errorf("expected a '[]*ingress.Server' type but %T was returned", input)
return zones.List()
}
for _, server := range servers {
for _, loc := range server.Locations {
if loc.RateLimit.Connections.Limit > 0 {
zone := fmt.Sprintf("limit_conn_zone $limit_%s zone=%v:%vm;",
loc.RateLimit.ID,
loc.RateLimit.Connections.Name,
loc.RateLimit.Connections.SharedSize)
if !zones.Has(zone) {
zones.Insert(zone)
}
}
if loc.RateLimit.RPM.Limit > 0 {
zone := fmt.Sprintf("limit_req_zone $limit_%s zone=%v:%vm rate=%vr/m;",
loc.RateLimit.ID,
loc.RateLimit.RPM.Name,
loc.RateLimit.RPM.SharedSize,
loc.RateLimit.RPM.Limit)
if !zones.Has(zone) {
zones.Insert(zone)
}
}
if loc.RateLimit.RPS.Limit > 0 {
zone := fmt.Sprintf("limit_req_zone $limit_%s zone=%v:%vm rate=%vr/s;",
loc.RateLimit.ID,
loc.RateLimit.RPS.Name,
loc.RateLimit.RPS.SharedSize,
loc.RateLimit.RPS.Limit)
if !zones.Has(zone) {
zones.Insert(zone)
}
}
}
}
return zones.List()
}
// buildRateLimit produces an array of limit_req to be used inside the Path of
// Ingress rules. The order: connections by IP first, then RPS, and RPM last.
func buildRateLimit(input interface{}) []string {
limits := []string{}
loc, ok := input.(*ingress.Location)
if !ok {
klog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return limits
}
if loc.RateLimit.Connections.Limit > 0 {
limit := fmt.Sprintf("limit_conn %v %v;",
loc.RateLimit.Connections.Name, loc.RateLimit.Connections.Limit)
limits = append(limits, limit)
}
if loc.RateLimit.RPS.Limit > 0 {
limit := fmt.Sprintf("limit_req zone=%v burst=%v nodelay;",
loc.RateLimit.RPS.Name, loc.RateLimit.RPS.Burst)
limits = append(limits, limit)
}
if loc.RateLimit.RPM.Limit > 0 {
limit := fmt.Sprintf("limit_req zone=%v burst=%v nodelay;",
loc.RateLimit.RPM.Name, loc.RateLimit.RPM.Burst)
limits = append(limits, limit)
}
if loc.RateLimit.LimitRateAfter > 0 {
limit := fmt.Sprintf("limit_rate_after %vk;",
loc.RateLimit.LimitRateAfter)
limits = append(limits, limit)
}
if loc.RateLimit.LimitRate > 0 {
limit := fmt.Sprintf("limit_rate %vk;",
loc.RateLimit.LimitRate)
limits = append(limits, limit)
}
return limits
}
func isLocationInLocationList(location interface{}, rawLocationList string) bool {
loc, ok := location.(*ingress.Location)
if !ok {
klog.Errorf("expected an '*ingress.Location' type but %T was returned", location)
return false
}
locationList := strings.Split(rawLocationList, ",")
for _, locationListItem := range locationList {
locationListItem = strings.Trim(locationListItem, " ")
if locationListItem == "" {
continue
}
if strings.HasPrefix(loc.Path, locationListItem) {
return true
}
}
return false
}
func isLocationAllowed(input interface{}) bool {
loc, ok := input.(*ingress.Location)
if !ok {
klog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return false
}
return loc.Denied == nil
}
var (
denyPathSlugMap = map[string]string{}
)
// buildDenyVariable returns a nginx variable for a location in a
// server to be used in the whitelist check
// This method uses a unique id generator library to reduce the
// size of the string to be used as a variable in nginx to avoid
// issue with the size of the variable bucket size directive
func buildDenyVariable(a interface{}) string {
l, ok := a.(string)
if !ok {
klog.Errorf("expected a 'string' type but %T was returned", a)
return ""
}
if _, ok := denyPathSlugMap[l]; !ok {
denyPathSlugMap[l] = randomString()
}
return fmt.Sprintf("$deny_%v", denyPathSlugMap[l])
}
func buildUpstreamName(loc interface{}) string {
location, ok := loc.(*ingress.Location)
if !ok {
klog.Errorf("expected a '*ingress.Location' type but %T was returned", loc)
return ""
}
upstreamName := location.Backend
return upstreamName
}
func buildNextUpstream(i, r interface{}) string {
nextUpstream, ok := i.(string)
if !ok {
klog.Errorf("expected a 'string' type but %T was returned", i)
return ""
}
retryNonIdempotent := r.(bool)
parts := strings.Split(nextUpstream, " ")
nextUpstreamCodes := make([]string, 0, len(parts))
for _, v := range parts {
if v != "" && v != nonIdempotent {
nextUpstreamCodes = append(nextUpstreamCodes, v)
}
if v == nonIdempotent {
retryNonIdempotent = true
}
}
if retryNonIdempotent {
nextUpstreamCodes = append(nextUpstreamCodes, nonIdempotent)
}
return strings.Join(nextUpstreamCodes, " ")
}
// refer to http://nginx.org/en/docs/syntax.html
// Nginx differentiates between size and offset
// offset directives support gigabytes in addition
var nginxSizeRegex = regexp.MustCompile("^[0-9]+[kKmM]{0,1}$")
var nginxOffsetRegex = regexp.MustCompile("^[0-9]+[kKmMgG]{0,1}$")
// isValidByteSize validates size units valid in nginx
// http://nginx.org/en/docs/syntax.html
func isValidByteSize(input interface{}, isOffset bool) bool {
s, ok := input.(string)
if !ok {
klog.Errorf("expected an 'string' type but %T was returned", input)
return false
}
s = strings.TrimSpace(s)
if s == "" {
klog.V(2).Info("empty byte size, hence it will not be set")
return false
}
if isOffset {
return nginxOffsetRegex.MatchString(s)
}
return nginxSizeRegex.MatchString(s)
}
type ingressInformation struct {
Namespace string
Rule string
Service string
Annotations map[string]string
}
func (info *ingressInformation) Equal(other *ingressInformation) bool {
if info.Namespace != other.Namespace {
return false
}
if info.Rule != other.Rule {
return false
}
if info.Service != other.Service {
return false
}
if !reflect.DeepEqual(info.Annotations, other.Annotations) {
return false
}
return true
}
func getIngressInformation(i, h, p interface{}) *ingressInformation {
ing, ok := i.(*ingress.Ingress)
if !ok {
klog.Errorf("expected an '*ingress.Ingress' type but %T was returned", i)
return &ingressInformation{}
}
hostname, ok := h.(string)
if !ok {
klog.Errorf("expected a 'string' type but %T was returned", h)
return &ingressInformation{}
}
path, ok := p.(string)
if !ok {
klog.Errorf("expected a 'string' type but %T was returned", p)
return &ingressInformation{}
}
if ing == nil {
return &ingressInformation{}
}
info := &ingressInformation{
Namespace: ing.GetNamespace(),
Rule: ing.GetName(),
Annotations: ing.Annotations,
}
if ing.Spec.Backend != nil {
info.Service = ing.Spec.Backend.ServiceName
}
for _, rule := range ing.Spec.Rules {
if rule.HTTP == nil {
continue
}
if hostname != "" && hostname != rule.Host {
continue
}
for _, rPath := range rule.HTTP.Paths {
if path == rPath.Path {
info.Service = rPath.Backend.ServiceName
return info
}
}
}
return info
}
func buildForwardedFor(input interface{}) string {
s, ok := input.(string)
if !ok {
klog.Errorf("expected a 'string' type but %T was returned", input)
return ""
}
ffh := strings.Replace(s, "-", "_", -1)
ffh = strings.ToLower(ffh)
return fmt.Sprintf("$http_%v", ffh)
}
func buildAuthSignURL(input interface{}) string {
s, ok := input.(string)
if !ok {
klog.Errorf("expected an 'string' type but %T was returned", input)
return ""
}
u, _ := url.Parse(s)
q := u.Query()
if len(q) == 0 {
return fmt.Sprintf("%v?rd=$pass_access_scheme://$http_host$escaped_request_uri", s)
}
if q.Get("rd") != "" {
return s
}
return fmt.Sprintf("%v&rd=$pass_access_scheme://$http_host$escaped_request_uri", s)
}
var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
func init() {
rand.Seed(time.Now().UnixNano())
}
func randomString() string {
b := make([]rune, 32)
for i := range b {
b[i] = letters[rand.Intn(len(letters))]
}
return string(b)
}
func buildOpentracing(input interface{}) string {
cfg, ok := input.(config.Configuration)
if !ok {
klog.Errorf("expected a 'config.Configuration' type but %T was returned", input)
return ""
}
if !cfg.EnableOpentracing {
return ""
}
buf := bytes.NewBufferString("")
if cfg.ZipkinCollectorHost != "" {
buf.WriteString("opentracing_load_tracer /usr/local/lib/libzipkin_opentracing.so /etc/nginx/opentracing.json;")
} else if cfg.JaegerCollectorHost != "" {
buf.WriteString("opentracing_load_tracer /usr/local/lib/libjaegertracing_plugin.so /etc/nginx/opentracing.json;")
} else if cfg.DatadogCollectorHost != "" {
buf.WriteString("opentracing_load_tracer /usr/local/lib/libdd_opentracing.so /etc/nginx/opentracing.json;")
}
buf.WriteString("\r\n")
return buf.String()
}
// buildInfluxDB produces the single line configuration
// needed by the InfluxDB module to send request's metrics
// for the current resource
func buildInfluxDB(input interface{}) string {
cfg, ok := input.(influxdb.Config)
if !ok {
klog.Errorf("expected an 'influxdb.Config' type but %T was returned", input)
return ""
}
if !cfg.InfluxDBEnabled {
return ""
}
return fmt.Sprintf(
"influxdb server_name=%s host=%s port=%s measurement=%s enabled=true;",
cfg.InfluxDBServerName,
cfg.InfluxDBHost,
cfg.InfluxDBPort,
cfg.InfluxDBMeasurement,
)
}
func proxySetHeader(loc interface{}) string {
location, ok := loc.(*ingress.Location)
if !ok {
klog.Errorf("expected a '*ingress.Location' type but %T was returned", loc)
return "proxy_set_header"
}
if location.BackendProtocol == "GRPC" || location.BackendProtocol == "GRPCS" {
return "grpc_set_header"
}
return "proxy_set_header"
}
// buildCustomErrorDeps is a utility function returning a struct wrapper with
// the data required to build the 'CUSTOM_ERRORS' template
func buildCustomErrorDeps(upstreamName string, errorCodes []int, enableMetrics bool) interface{} {
return struct {
UpstreamName string
ErrorCodes []int
EnableMetrics bool
}{
UpstreamName: upstreamName,
ErrorCodes: errorCodes,
EnableMetrics: enableMetrics,
}
}
type errorLocation struct {
UpstreamName string
Codes []int
}
// buildCustomErrorLocationsPerServer is a utility function which will collect all
// custom error codes for all locations of a server block, deduplicates them,
// and returns a set which is unique by default-upstream and error code. It returns an array
// of errorLocations, each of which contain the upstream name and a list of
// error codes for that given upstream, so that sufficiently unique
// @custom error location blocks can be created in the template
func buildCustomErrorLocationsPerServer(input interface{}) interface{} {
server, ok := input.(*ingress.Server)
if !ok {
klog.Errorf("expected a '*ingress.Server' type but %T was returned", input)
return nil
}
codesMap := make(map[string]map[int]bool)
for _, loc := range server.Locations {
backendUpstream := loc.DefaultBackendUpstreamName
var dedupedCodes map[int]bool
if existingMap, ok := codesMap[backendUpstream]; ok {
dedupedCodes = existingMap
} else {
dedupedCodes = make(map[int]bool)
}
for _, code := range loc.CustomHTTPErrors {
dedupedCodes[code] = true
}
codesMap[backendUpstream] = dedupedCodes
}
errorLocations := []errorLocation{}
for upstream, dedupedCodes := range codesMap {
codesForUpstream := []int{}
for code := range dedupedCodes {
codesForUpstream = append(codesForUpstream, code)
}
sort.Ints(codesForUpstream)
errorLocations = append(errorLocations, errorLocation{
UpstreamName: upstream,
Codes: codesForUpstream,
})
}
sort.Slice(errorLocations, func(i, j int) bool {
return errorLocations[i].UpstreamName < errorLocations[j].UpstreamName
})
return errorLocations
}
func opentracingPropagateContext(loc interface{}) string {
location, ok := loc.(*ingress.Location)
if !ok {
klog.Errorf("expected a '*ingress.Location' type but %T was returned", loc)
return "opentracing_propagate_context"
}
if location.BackendProtocol == "GRPC" || location.BackendProtocol == "GRPCS" {
return "opentracing_grpc_propagate_context"
}
return "opentracing_propagate_context"
}
| []
| []
| []
| [] | [] | go | 0 | 0 | |
soracom/generated/cmd/shipping_addresses_update.go | // Code generated by soracom-cli generate-cmd. DO NOT EDIT.
package cmd
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/url"
"os"
"strings"
"github.com/spf13/cobra"
)
// ShippingAddressesUpdateCmdAddressLine1 holds value of 'addressLine1' option
var ShippingAddressesUpdateCmdAddressLine1 string
// ShippingAddressesUpdateCmdAddressLine2 holds value of 'addressLine2' option
var ShippingAddressesUpdateCmdAddressLine2 string
// ShippingAddressesUpdateCmdBuilding holds value of 'building' option
var ShippingAddressesUpdateCmdBuilding string
// ShippingAddressesUpdateCmdCity holds value of 'city' option
var ShippingAddressesUpdateCmdCity string
// ShippingAddressesUpdateCmdCompanyName holds value of 'companyName' option
var ShippingAddressesUpdateCmdCompanyName string
// ShippingAddressesUpdateCmdCountryCode holds value of 'countryCode' option
var ShippingAddressesUpdateCmdCountryCode string
// ShippingAddressesUpdateCmdDepartment holds value of 'department' option
var ShippingAddressesUpdateCmdDepartment string
// ShippingAddressesUpdateCmdEmail holds value of 'email' option
var ShippingAddressesUpdateCmdEmail string
// ShippingAddressesUpdateCmdFullName holds value of 'fullName' option
var ShippingAddressesUpdateCmdFullName string
// ShippingAddressesUpdateCmdOperatorId holds value of 'operator_id' option
var ShippingAddressesUpdateCmdOperatorId string
// ShippingAddressesUpdateCmdPhoneNumber holds value of 'phoneNumber' option
var ShippingAddressesUpdateCmdPhoneNumber string
// ShippingAddressesUpdateCmdShippingAddressId holds value of 'shipping_address_id' option
var ShippingAddressesUpdateCmdShippingAddressId string
// ShippingAddressesUpdateCmdState holds value of 'state' option
var ShippingAddressesUpdateCmdState string
// ShippingAddressesUpdateCmdZipCode holds value of 'zipCode' option
var ShippingAddressesUpdateCmdZipCode string
// ShippingAddressesUpdateCmdBody holds contents of request body to be sent
var ShippingAddressesUpdateCmdBody string
func init() {
ShippingAddressesUpdateCmd.Flags().StringVar(&ShippingAddressesUpdateCmdAddressLine1, "address-line1", "", TRAPI(""))
ShippingAddressesUpdateCmd.Flags().StringVar(&ShippingAddressesUpdateCmdAddressLine2, "address-line2", "", TRAPI(""))
ShippingAddressesUpdateCmd.Flags().StringVar(&ShippingAddressesUpdateCmdBuilding, "building", "", TRAPI(""))
ShippingAddressesUpdateCmd.Flags().StringVar(&ShippingAddressesUpdateCmdCity, "city", "", TRAPI(""))
ShippingAddressesUpdateCmd.Flags().StringVar(&ShippingAddressesUpdateCmdCompanyName, "company-name", "", TRAPI(""))
ShippingAddressesUpdateCmd.Flags().StringVar(&ShippingAddressesUpdateCmdCountryCode, "country-code", "", TRAPI(""))
ShippingAddressesUpdateCmd.Flags().StringVar(&ShippingAddressesUpdateCmdDepartment, "department", "", TRAPI(""))
ShippingAddressesUpdateCmd.Flags().StringVar(&ShippingAddressesUpdateCmdEmail, "email", "", TRAPI(""))
ShippingAddressesUpdateCmd.Flags().StringVar(&ShippingAddressesUpdateCmdFullName, "full-name", "", TRAPI(""))
ShippingAddressesUpdateCmd.Flags().StringVar(&ShippingAddressesUpdateCmdOperatorId, "operator-id", "", TRAPI("Operator ID"))
ShippingAddressesUpdateCmd.Flags().StringVar(&ShippingAddressesUpdateCmdPhoneNumber, "phone-number", "", TRAPI(""))
ShippingAddressesUpdateCmd.Flags().StringVar(&ShippingAddressesUpdateCmdShippingAddressId, "shipping-address-id", "", TRAPI("shipping_address_id"))
ShippingAddressesUpdateCmd.Flags().StringVar(&ShippingAddressesUpdateCmdState, "state", "", TRAPI(""))
ShippingAddressesUpdateCmd.Flags().StringVar(&ShippingAddressesUpdateCmdZipCode, "zip-code", "", TRAPI(""))
ShippingAddressesUpdateCmd.Flags().StringVar(&ShippingAddressesUpdateCmdBody, "body", "", TRCLI("cli.common_params.body.short_help"))
ShippingAddressesCmd.AddCommand(ShippingAddressesUpdateCmd)
}
// ShippingAddressesUpdateCmd defines 'update' subcommand
var ShippingAddressesUpdateCmd = &cobra.Command{
Use: "update",
Short: TRAPI("/operators/{operator_id}/shipping_addresses/{shipping_address_id}:put:summary"),
Long: TRAPI(`/operators/{operator_id}/shipping_addresses/{shipping_address_id}:put:description`),
RunE: func(cmd *cobra.Command, args []string) error {
opt := &apiClientOptions{
BasePath: "/v1",
Language: getSelectedLanguage(),
}
ac := newAPIClient(opt)
if v := os.Getenv("SORACOM_VERBOSE"); v != "" {
ac.SetVerbose(true)
}
err := authHelper(ac, cmd, args)
if err != nil {
cmd.SilenceUsage = true
return err
}
param, err := collectShippingAddressesUpdateCmdParams(ac)
if err != nil {
return err
}
body, err := ac.callAPI(param)
if err != nil {
cmd.SilenceUsage = true
return err
}
if body == "" {
return nil
}
if rawOutput {
_, err = os.Stdout.Write([]byte(body))
} else {
return prettyPrintStringAsJSON(body)
}
return err
},
}
func collectShippingAddressesUpdateCmdParams(ac *apiClient) (*apiParams, error) {
if ShippingAddressesUpdateCmdOperatorId == "" {
ShippingAddressesUpdateCmdOperatorId = ac.OperatorID
}
body, err := buildBodyForShippingAddressesUpdateCmd()
if err != nil {
return nil, err
}
contentType := "application/json"
if ShippingAddressesUpdateCmdAddressLine1 == "" {
if body == "" {
return nil, fmt.Errorf("required parameter '%s' is not specified", "address-line1")
}
}
if ShippingAddressesUpdateCmdCity == "" {
if body == "" {
return nil, fmt.Errorf("required parameter '%s' is not specified", "city")
}
}
if ShippingAddressesUpdateCmdShippingAddressId == "" {
if body == "" {
return nil, fmt.Errorf("required parameter '%s' is not specified", "shipping-address-id")
}
}
if ShippingAddressesUpdateCmdState == "" {
if body == "" {
return nil, fmt.Errorf("required parameter '%s' is not specified", "state")
}
}
if ShippingAddressesUpdateCmdZipCode == "" {
if body == "" {
return nil, fmt.Errorf("required parameter '%s' is not specified", "zip-code")
}
}
return &apiParams{
method: "PUT",
path: buildPathForShippingAddressesUpdateCmd("/operators/{operator_id}/shipping_addresses/{shipping_address_id}"),
query: buildQueryForShippingAddressesUpdateCmd(),
contentType: contentType,
body: body,
noRetryOnError: noRetryOnError,
}, nil
}
func buildPathForShippingAddressesUpdateCmd(path string) string {
escapedOperatorId := url.PathEscape(ShippingAddressesUpdateCmdOperatorId)
path = strReplace(path, "{"+"operator_id"+"}", escapedOperatorId, -1)
escapedShippingAddressId := url.PathEscape(ShippingAddressesUpdateCmdShippingAddressId)
path = strReplace(path, "{"+"shipping_address_id"+"}", escapedShippingAddressId, -1)
return path
}
func buildQueryForShippingAddressesUpdateCmd() url.Values {
result := url.Values{}
return result
}
func buildBodyForShippingAddressesUpdateCmd() (string, error) {
var result map[string]interface{}
if ShippingAddressesUpdateCmdBody != "" {
var b []byte
var err error
if strings.HasPrefix(ShippingAddressesUpdateCmdBody, "@") {
fname := strings.TrimPrefix(ShippingAddressesUpdateCmdBody, "@")
// #nosec
b, err = ioutil.ReadFile(fname)
} else if ShippingAddressesUpdateCmdBody == "-" {
b, err = ioutil.ReadAll(os.Stdin)
} else {
b = []byte(ShippingAddressesUpdateCmdBody)
}
if err != nil {
return "", err
}
err = json.Unmarshal(b, &result)
if err != nil {
return "", err
}
}
if result == nil {
result = make(map[string]interface{})
}
if ShippingAddressesUpdateCmdAddressLine1 != "" {
result["addressLine1"] = ShippingAddressesUpdateCmdAddressLine1
}
if ShippingAddressesUpdateCmdAddressLine2 != "" {
result["addressLine2"] = ShippingAddressesUpdateCmdAddressLine2
}
if ShippingAddressesUpdateCmdBuilding != "" {
result["building"] = ShippingAddressesUpdateCmdBuilding
}
if ShippingAddressesUpdateCmdCity != "" {
result["city"] = ShippingAddressesUpdateCmdCity
}
if ShippingAddressesUpdateCmdCompanyName != "" {
result["companyName"] = ShippingAddressesUpdateCmdCompanyName
}
if ShippingAddressesUpdateCmdCountryCode != "" {
result["countryCode"] = ShippingAddressesUpdateCmdCountryCode
}
if ShippingAddressesUpdateCmdDepartment != "" {
result["department"] = ShippingAddressesUpdateCmdDepartment
}
if ShippingAddressesUpdateCmdEmail != "" {
result["email"] = ShippingAddressesUpdateCmdEmail
}
if ShippingAddressesUpdateCmdFullName != "" {
result["fullName"] = ShippingAddressesUpdateCmdFullName
}
if ShippingAddressesUpdateCmdPhoneNumber != "" {
result["phoneNumber"] = ShippingAddressesUpdateCmdPhoneNumber
}
if ShippingAddressesUpdateCmdState != "" {
result["state"] = ShippingAddressesUpdateCmdState
}
if ShippingAddressesUpdateCmdZipCode != "" {
result["zipCode"] = ShippingAddressesUpdateCmdZipCode
}
resultBytes, err := json.Marshal(result)
if err != nil {
return "", err
}
return string(resultBytes), nil
}
| [
"\"SORACOM_VERBOSE\""
]
| []
| [
"SORACOM_VERBOSE"
]
| [] | ["SORACOM_VERBOSE"] | go | 1 | 0 | |
providers/ibm/ibm_is_instance_template.go | // Copyright 2019 The Terraformer Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ibm
import (
"fmt"
"os"
"github.com/GoogleCloudPlatform/terraformer/terraformutils"
"github.com/IBM/go-sdk-core/v4/core"
"github.com/IBM/vpc-go-sdk/vpcv1"
)
// InstanceTemplateGenerator ...
type InstanceTemplateGenerator struct {
IBMService
}
func (g InstanceTemplateGenerator) createInstanceTemplateResources(templateID, templateName string) terraformutils.Resource {
var resources terraformutils.Resource
resources = terraformutils.NewSimpleResource(
templateID,
templateName,
"ibm_is_instance_template",
"ibm",
[]string{})
return resources
}
// InitResources ...
func (g *InstanceTemplateGenerator) InitResources() error {
region := envFallBack([]string{"IC_REGION"}, "us-south")
apiKey := os.Getenv("IC_API_KEY")
if apiKey == "" {
return fmt.Errorf("No API key set")
}
vpcurl := fmt.Sprintf("https://%s.iaas.cloud.ibm.com/v1", region)
vpcoptions := &vpcv1.VpcV1Options{
URL: envFallBack([]string{"IBMCLOUD_IS_API_ENDPOINT"}, vpcurl),
Authenticator: &core.IamAuthenticator{
ApiKey: apiKey,
},
}
vpcclient, err := vpcv1.NewVpcV1(vpcoptions)
if err != nil {
return err
}
options := &vpcv1.ListInstanceTemplatesOptions{}
templates, response, err := vpcclient.ListInstanceTemplates(options)
if err != nil {
return fmt.Errorf("Error Fetching Instance Templates %s\n%s", err, response)
}
for _, template := range templates.Templates {
instemp := template.(*vpcv1.InstanceTemplate)
g.Resources = append(g.Resources, g.createInstanceTemplateResources(*instemp.ID, *instemp.Name))
}
return nil
}
| [
"\"IC_API_KEY\""
]
| []
| [
"IC_API_KEY"
]
| [] | ["IC_API_KEY"] | go | 1 | 0 | |
eventsourcing/persistence.py | import json
import os
import uuid
from abc import ABC, abstractmethod
from dataclasses import dataclass
from datetime import datetime
from decimal import Decimal
from typing import (
Any,
Dict,
Generic,
Iterator,
List,
Mapping,
Optional,
Type,
cast,
)
from uuid import UUID
from eventsourcing.domain import DomainEvent, TDomainEvent
from eventsourcing.utils import get_topic, resolve_topic, strtobool
class Transcoding(ABC):
# noinspection SpellCheckingInspection
"""
Abstract base class for custom transcodings.
"""
@property
@abstractmethod
def type(self) -> type:
# noinspection SpellCheckingInspection
"""Object type of transcoded object."""
@property
@abstractmethod
def name(self) -> str:
"""Name of transcoding."""
@abstractmethod
def encode(self, obj: Any) -> Any:
"""Encodes given object."""
@abstractmethod
def decode(self, data: Any) -> Any:
"""Decodes encoded object."""
class Transcoder(ABC):
"""
Abstract base class for transcoders.
"""
def __init__(self) -> None:
self.types: Dict[type, Transcoding] = {}
self.names: Dict[str, Transcoding] = {}
def register(self, transcoding: Transcoding) -> None:
"""
Registers given transcoding with the transcoder.
"""
self.types[transcoding.type] = transcoding
self.names[transcoding.name] = transcoding
@abstractmethod
def encode(self, obj: Any) -> bytes:
"""Encodes obj as bytes."""
@abstractmethod
def decode(self, data: bytes) -> Any:
"""Decodes obj from bytes."""
class JSONTranscoder(Transcoder):
"""
Extensible transcoder that uses the Python :mod:`json` module.
"""
def __init__(self) -> None:
super().__init__()
self.encoder = json.JSONEncoder(default=self._encode_obj)
self.decoder = json.JSONDecoder(object_hook=self._decode_obj)
def encode(self, obj: Any) -> bytes:
"""
Encodes given object as a bytes array.
"""
return self.encoder.encode(obj).encode("utf8")
def decode(self, data: bytes) -> Any:
"""
Decodes bytes array as previously encoded object.
"""
return self.decoder.decode(data.decode("utf8"))
def _encode_obj(self, o: Any) -> Dict[str, Any]:
try:
transcoding = self.types[type(o)]
except KeyError:
raise TypeError(
f"Object of type {type(o)} is not "
"serializable. Please define and register "
"a custom transcoding for this type."
)
else:
return {
"_type_": transcoding.name,
"_data_": transcoding.encode(o),
}
def _decode_obj(self, d: Dict[str, Any]) -> Any:
if set(d.keys()) == {
"_type_",
"_data_",
}:
t = d["_type_"]
t = cast(str, t)
try:
transcoding = self.names[t]
except KeyError:
raise TypeError(
f"Data serialized with name '{t}' is not "
"deserializable. Please register a "
"custom transcoding for this type."
)
return transcoding.decode(d["_data_"])
else:
return d
class UUIDAsHex(Transcoding):
"""
Transcoding that represents :class:`UUID` objects as hex values.
"""
type = UUID
name = "uuid_hex"
def encode(self, obj: UUID) -> str:
return obj.hex
def decode(self, data: str) -> UUID:
assert isinstance(data, str)
return UUID(data)
class DecimalAsStr(Transcoding):
"""
Transcoding that represents :class:`Decimal` objects as strings.
"""
type = Decimal
name = "decimal_str"
def encode(self, obj: Decimal) -> str:
return str(obj)
def decode(self, data: str) -> Decimal:
return Decimal(data)
class DatetimeAsISO(Transcoding):
"""
Transcoding that represents :class:`datetime` objects as ISO strings.
"""
type = datetime
name = "datetime_iso"
def encode(self, obj: datetime) -> str:
return obj.isoformat()
def decode(self, data: str) -> datetime:
assert isinstance(data, str)
return datetime.fromisoformat(data)
@dataclass(frozen=True)
class StoredEvent:
# noinspection PyUnresolvedReferences
"""
Frozen dataclass that represents :class:`~eventsourcing.domain.DomainEvent`
objects, such as aggregate :class:`~eventsourcing.domain.Aggregate.Event`
objects and :class:`~eventsourcing.domain.Snapshot` objects.
Constructor parameters:
:param UUID originator_id: ID of the originating aggregate
:param int originator_version: version of the originating aggregate
:param str topic: topic of the domain event object class
:param bytes state: serialised state of the domain event object
"""
originator_id: uuid.UUID
originator_version: int
topic: str
state: bytes
class Compressor(ABC):
"""
Base class for compressors.
"""
@abstractmethod
def compress(self, data: bytes) -> bytes:
"""
Compress bytes.
"""
@abstractmethod
def decompress(self, data: bytes) -> bytes:
"""
Decompress bytes.
"""
class Cipher(ABC):
"""
Base class for ciphers.
"""
# noinspection PyUnusedLocal
@abstractmethod
def __init__(self, cipher_key: str):
"""
Initialises cipher with given key.
"""
@abstractmethod
def encrypt(self, plaintext: bytes) -> bytes:
"""
Return ciphertext for given plaintext.
"""
@abstractmethod
def decrypt(self, ciphertext: bytes) -> bytes:
"""
Return plaintext for given ciphertext.
"""
class Mapper(Generic[TDomainEvent]):
"""
Converts between domain event objects and :class:`StoredEvent` objects.
Uses a :class:`Transcoder`, and optionally a cryptographic cipher and compressor.
"""
def __init__(
self,
transcoder: Transcoder,
compressor: Optional[Compressor] = None,
cipher: Optional[Cipher] = None,
):
self.transcoder = transcoder
self.compressor = compressor
self.cipher = cipher
def from_domain_event(self, domain_event: TDomainEvent) -> StoredEvent:
"""
Converts the given domain event to a :class:`StoredEvent` object.
"""
topic: str = get_topic(domain_event.__class__)
event_state = domain_event.__dict__.copy()
originator_id = event_state.pop("originator_id")
originator_version = event_state.pop("originator_version")
class_version = getattr(type(domain_event), "class_version", 1)
if class_version > 1:
event_state["class_version"] = class_version
stored_state: bytes = self.transcoder.encode(event_state)
if self.compressor:
stored_state = self.compressor.compress(stored_state)
if self.cipher:
stored_state = self.cipher.encrypt(stored_state)
return StoredEvent(
originator_id=originator_id,
originator_version=originator_version,
topic=topic,
state=stored_state,
)
def to_domain_event(self, stored: StoredEvent) -> TDomainEvent:
"""
Converts the given :class:`StoredEvent` to a domain event object.
"""
stored_state: bytes = stored.state
if self.cipher:
stored_state = self.cipher.decrypt(stored_state)
if self.compressor:
stored_state = self.compressor.decompress(stored_state)
event_state: dict = self.transcoder.decode(stored_state)
event_state["originator_id"] = stored.originator_id
event_state["originator_version"] = stored.originator_version
cls = resolve_topic(stored.topic)
assert issubclass(cls, DomainEvent)
class_version = getattr(cls, "class_version", 1)
from_version = event_state.pop("class_version", 1)
while from_version < class_version:
getattr(cls, f"upcast_v{from_version}_v{from_version + 1}")(event_state)
from_version += 1
domain_event = object.__new__(cls)
domain_event.__dict__.update(event_state)
return domain_event
class RecordConflictError(Exception):
"""
Legacy exception, replaced with IntegrityError.
"""
class PersistenceError(Exception):
"""
The base class of the other exceptions in this module.
Exception class names follow https://www.python.org/dev/peps/pep-0249/#exceptions
"""
class InterfaceError(PersistenceError):
"""
Exception raised for errors that are related to the database
interface rather than the database itself.
"""
class DatabaseError(PersistenceError):
"""
Exception raised for errors that are related to the database.
"""
class DataError(DatabaseError):
"""
Exception raised for errors that are due to problems with the
processed data like division by zero, numeric value out of range, etc.
"""
class OperationalError(DatabaseError):
"""
Exception raised for errors that are related to the database’s
operation and not necessarily under the control of the programmer,
e.g. an unexpected disconnect occurs, the data source name is not
found, a transaction could not be processed, a memory allocation
error occurred during processing, etc.
"""
class IntegrityError(DatabaseError, RecordConflictError):
"""
Exception raised when the relational integrity of the
database is affected, e.g. a foreign key check fails.
"""
class InternalError(DatabaseError):
"""
Exception raised when the database encounters an internal
error, e.g. the cursor is not valid anymore, the transaction
is out of sync, etc.
"""
class ProgrammingError(DatabaseError):
"""
Exception raised for programming errors, e.g. table not
found or already exists, syntax error in the SQL statement,
wrong number of parameters specified, etc.
"""
class NotSupportedError(DatabaseError):
"""
Exception raised in case a method or database API was used
which is not supported by the database, e.g. calling the
rollback() method on a connection that does not support
transaction or has transactions turned off.
"""
class Recorder(ABC):
"""
Abstract base class for stored event recorders.
"""
class AggregateRecorder(Recorder):
"""
Abstract base class for recorders that record and
retrieve stored events for domain model aggregates.
"""
@abstractmethod
def insert_events(self, stored_events: List[StoredEvent], **kwargs: Any) -> None:
"""
Writes stored events into database.
"""
# Todo: Change the implementations to get in batches, in case lots of events.
@abstractmethod
def select_events(
self,
originator_id: UUID,
gt: Optional[int] = None,
lte: Optional[int] = None,
desc: bool = False,
limit: Optional[int] = None,
) -> List[StoredEvent]:
"""
Reads stored events from database.
"""
@dataclass(frozen=True)
class Notification(StoredEvent):
"""
Frozen dataclass that represents domain event notifications.
"""
id: int
class ApplicationRecorder(AggregateRecorder):
"""
Abstract base class for recorders that record and
retrieve stored events for domain model aggregates.
Extends the behaviour of aggregate recorders by
recording aggregate events in a total order that
allows the stored events also to be retrieved
as event notifications.
"""
@abstractmethod
def select_notifications(self, start: int, limit: int) -> List[Notification]:
"""
Returns a list of event notifications
from 'start', limited by 'limit'.
"""
@abstractmethod
def max_notification_id(self) -> int:
"""
Returns the maximum notification ID.
"""
class ProcessRecorder(ApplicationRecorder):
"""
Abstract base class for recorders that record and
retrieve stored events for domain model aggregates.
Extends the behaviour of applications recorders by
recording aggregate events with tracking information
that records the position of a processed event
notification in a notification log.
"""
@abstractmethod
def max_tracking_id(self, application_name: str) -> int:
"""
Returns the last recorded notification ID from given application.
"""
class EventStore(Generic[TDomainEvent]):
"""
Stores and retrieves domain events.
"""
def __init__(
self,
mapper: Mapper[TDomainEvent],
recorder: AggregateRecorder,
):
self.mapper = mapper
self.recorder = recorder
def put(self, events: List[TDomainEvent], **kwargs: Any) -> None:
"""
Stores domain events in aggregate sequence.
"""
self.recorder.insert_events(
list(
map(
self.mapper.from_domain_event,
events,
)
),
**kwargs,
)
def get(
self,
originator_id: UUID,
gt: Optional[int] = None,
lte: Optional[int] = None,
desc: bool = False,
limit: Optional[int] = None,
) -> Iterator[TDomainEvent]:
"""
Retrieves domain events from aggregate sequence.
"""
return map(
self.mapper.to_domain_event,
self.recorder.select_events(
originator_id=originator_id,
gt=gt,
lte=lte,
desc=desc,
limit=limit,
),
)
class InfrastructureFactory(ABC):
"""
Abstract base class for infrastructure factories.
"""
TOPIC = "INFRASTRUCTURE_FACTORY"
MAPPER_TOPIC = "MAPPER_TOPIC"
CIPHER_TOPIC = "CIPHER_TOPIC"
CIPHER_KEY = "CIPHER_KEY"
COMPRESSOR_TOPIC = "COMPRESSOR_TOPIC"
IS_SNAPSHOTTING_ENABLED = "IS_SNAPSHOTTING_ENABLED"
@classmethod
def construct(
cls,
application_name: str = "",
env: Optional[Mapping] = None,
) -> "InfrastructureFactory":
"""
Constructs concrete infrastructure factory for given
named application. Reads and resolves infrastructure
factory class topic from environment variable 'INFRASTRUCTURE_FACTORY'.
"""
# noinspection SpellCheckingInspection
env = env if env is not None else os.environ
topic = env.get(
cls.TOPIC,
"eventsourcing.popo:Factory",
)
try:
factory_cls = resolve_topic(topic)
except (ModuleNotFoundError, AttributeError):
raise EnvironmentError(
"Failed to resolve "
"infrastructure factory topic: "
f"'{topic}' from environment "
f"variable '{cls.TOPIC}'"
)
if not issubclass(factory_cls, InfrastructureFactory):
raise AssertionError(f"Not an infrastructure factory: {topic}")
return factory_cls(application_name=application_name, env=env)
def __init__(self, application_name: str, env: Mapping):
"""
Initialises infrastructure factory object with given application name.
"""
self.application_name = application_name
self.env = env
# noinspection SpellCheckingInspection
def getenv(
self, key: str, default: Optional[str] = None, application_name: str = ""
) -> Optional[str]:
"""
Returns value of environment variable defined by given key.
"""
if not application_name:
application_name = self.application_name
keys = [
application_name.upper() + "_" + key,
key,
]
for key in keys:
value = self.env.get(key)
if value is not None:
return value
return default
def mapper(
self,
transcoder: Transcoder,
application_name: str = "",
) -> Mapper:
"""
Constructs a mapper.
"""
return Mapper(
transcoder=transcoder,
cipher=self.cipher(application_name),
compressor=self.compressor(application_name),
)
def cipher(self, application_name: str) -> Optional[Cipher]:
"""
Reads environment variables 'CIPHER_TOPIC'
and 'CIPHER_KEY' to decide whether or not
to construct a cipher.
"""
cipher_topic = self.getenv(self.CIPHER_TOPIC, application_name=application_name)
cipher_key = self.getenv(self.CIPHER_KEY, application_name=application_name)
cipher: Optional[Cipher] = None
if cipher_topic:
if not cipher_key:
raise EnvironmentError(
f"'{self.CIPHER_KEY}' not set in env, "
f"although '{self.CIPHER_TOPIC}' was set"
)
elif cipher_key:
cipher_topic = "eventsourcing.cipher:AESCipher"
if cipher_topic and cipher_key:
cipher_cls: Type[Cipher] = resolve_topic(cipher_topic)
cipher = cipher_cls(cipher_key=cipher_key)
return cipher
def compressor(self, application_name: str) -> Optional[Compressor]:
"""
Reads environment variable 'COMPRESSOR_TOPIC' to
decide whether or not to construct a compressor.
"""
compressor: Optional[Compressor] = None
compressor_topic = self.getenv(
self.COMPRESSOR_TOPIC, application_name=application_name
)
if compressor_topic:
compressor_cls: Type[Compressor] = resolve_topic(compressor_topic)
if callable(compressor_cls):
compressor = compressor_cls()
else:
compressor = compressor_cls
return compressor
@staticmethod
def event_store(**kwargs: Any) -> EventStore:
"""
Constructs an event store.
"""
return EventStore(**kwargs)
@abstractmethod
def aggregate_recorder(self, purpose: str = "events") -> AggregateRecorder:
"""
Constructs an aggregate recorder.
"""
@abstractmethod
def application_recorder(self) -> ApplicationRecorder:
"""
Constructs an application recorder.
"""
@abstractmethod
def process_recorder(self) -> ProcessRecorder:
"""
Constructs a process recorder.
"""
def is_snapshotting_enabled(self) -> bool:
"""
Decides whether or not snapshotting is enabled by
reading environment variable 'IS_SNAPSHOTTING_ENABLED'.
Snapshotting is not enabled by default.
"""
default = "no"
return bool(
strtobool(self.getenv(self.IS_SNAPSHOTTING_ENABLED, default) or default)
)
@dataclass(frozen=True)
class Tracking:
"""
Frozen dataclass representing the position of a domain
event :class:`Notification` in an application's notification log.
"""
application_name: str
notification_id: int
| []
| []
| []
| [] | [] | python | 0 | 0 | |
wsgi.py | import os
from app.main import app
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(host="0.0.0.0", port=port) | []
| []
| [
"PORT"
]
| [] | ["PORT"] | python | 1 | 0 | |
tests/abstract_test.py | # ATTENTION: the tests are supposed to be run inside a container!
# all dependencies have to be installed before running the tests by `./entrypoint install -n benchmark_name`
from __future__ import absolute_import
import logging
import os
import subprocess
from core.environment import set_all_environments
set_all_environments()
os.environ['NUM_THREADS'] = '1'
PROJ_ROOT = os.environ.get("PROJ_ROOT")
CONF_DIR = "%s/experiments/makefiles" % PROJ_ROOT
BUILD_DIR = "%s/experiments/build" % PROJ_ROOT
DATA_PATH = os.environ.get("DATA_PATH")
INPUT_PATH = DATA_PATH + "/inputs/"
class BuildAndRun:
# tested applications
benchmarks = {}
# build types
actions = (
'gcc_native',
'gcc_mpx',
'gcc_mpx_no_narrow_bounds',
'gcc_mpx_only_write',
'gcc_asan',
'icc_native',
'icc_mpx',
'icc_mpx_no_narrow_bounds',
'icc_mpx_only_write',
'clang_native',
'clang_asan',
'clang_softbound',
)
logger = logging.getLogger("Test")
def test_01_builds(self):
set_all_environments(env_type='build')
benchmarks = current_benchmarks(self)
actions = current_action(self)
for name, params in benchmarks.items():
for action in actions:
yield self.check_build, action, params[0]
def test_02_run(self):
set_all_environments(env_type='both')
benchmarks = current_benchmarks(self)
actions = current_action(self)
for name, params in benchmarks.items():
for action in actions:
yield self.check_run, action, name, params[1]
def check_build(self, action, path):
threads = thread_num()
make_command = 'make -j%s ACTION=%s -I %s -C %s' % (threads, action, CONF_DIR, path)
try:
output = subprocess.check_output(make_command, stderr=subprocess.STDOUT, shell=True)
assert True
except subprocess.CalledProcessError as e:
self.logger.error("\n [[ BUILD command failed ]]\n Command: %s\n" % make_command)
assert False
def check_run(self, action, name, args):
exe = BUILD_DIR + "/" + name + "/" + action + "/" + name
run_command = exe + " " + args
try:
output = subprocess.check_output(run_command, stderr=subprocess.STDOUT, shell=True)
assert True
except subprocess.CalledProcessError as e:
self.logger.error("\n [[ RUN command failed with code %d]]\n Command: %s\n" % (e.returncode, run_command))
assert False
def current_benchmarks(obj):
if os.environ.get("NAME"):
benchmarks = (lambda x: {x: obj.benchmarks[x]})(os.environ.get("NAME"))
else:
benchmarks = obj.benchmarks
return benchmarks
def current_action(obj):
if os.environ.get("ACTION"):
actions = (os.environ.get("ACTION"),)
else:
actions = obj.actions
return actions
def thread_num():
if os.environ.get("BUILD_THREADS_NUM"):
threads = os.environ.get("BUILD_THREADS_NUM")
else:
threads = "1"
return threads
| []
| []
| [
"BUILD_THREADS_NUM",
"NUM_THREADS",
"ACTION",
"DATA_PATH",
"PROJ_ROOT",
"NAME"
]
| [] | ["BUILD_THREADS_NUM", "NUM_THREADS", "ACTION", "DATA_PATH", "PROJ_ROOT", "NAME"] | python | 6 | 0 | |
tests/data_tests.py | """
Testing multi-modal datasources of Plato framework.
"""
import os
import unittest
os.environ['config_file'] = 'tests/TestsConfig/flickr30k_entities.yml'
# os.environ['config_file'] = 'tests/TestsConfig/coco.yml'
# os.environ['config_file'] = 'tests/TestsConfig/referitgame.yml'
# os.environ['config_file'] = 'tests/TestsConfig/kinetics.yml'
# os.environ['config_file'] = 'tests/TestsConfig/gym.yml'
# Note: the plato will search the dir './config' for Pipeline and other configuration files
# directly by default. This is achieved by the code in line 83 of 'config.py'
import numpy as np
import torch
from plato.config import Config
from plato.datasources.flickr30k_entities import DataSource as f30ke_DataSource
# from plato.datasources.referitgame import DataSource as refer_Datasource
# from plato.datasources.coco import DataSource as coco_Datasource
# from plato.datasources.kinetics import DataSource as kinetics_Datasource
# from plato.datasources.gym import DataSource as GymDataSource
from plato.samplers import registry as samplers_registry
from plato.samplers import modality_iid
from sampler_test_utils import define_sampler
class DatasetsTest(unittest.TestCase):
""" Aiming to test the correcness of implemented samplers and datasets """
def setUp(self):
super().setUp()
_ = Config()
self.total_clients = Config().clients.total_clients
# randomly client id
clients_id = list(range(self.total_clients))
self.client_id = np.random.choice(clients_id, 1)[0]
# the datasource being tested
self.utest_datasource = None
def assertDataSourceDefinition(self, data_source):
""" Test whether the dataset can be correctly defined.
This verifies:
1.1- The datasource can be defined
1.2- The raw data can be downloaded
1.3- The correct data store structure can be set.
2.1- The datasource can work with defined samplers
2.2- The defined data loader can load correct samples
2.4- The visualization of samples are correct.
"""
# Test 1
self.utest_datasource = data_source
# Test 2
modality_sampler = modality_iid.Sampler(
datasource=self.utest_datasource, client_id=self.client_id)
test_dataset = self.utest_datasource.get_test_set(
modality_sampler.get())
_ = test_dataset.get_one_multimodal_sample(sample_idx=0)
_ = test_dataset[0]
batch_size = Config().trainer.batch_size
# define the sampler
defined_sampler = define_sampler(Sampler=samplers_registry,
dataset_source=self.utest_datasource,
client_id=self.client_id,
is_testing=True)
testset_loader = torch.utils.data.DataLoader(
dataset=test_dataset,
shuffle=False,
batch_size=batch_size,
sampler=defined_sampler.get())
obtained_mm_sample = next(iter(testset_loader))
print("obtained_sample: ", obtained_mm_sample)
return True
def test_f30ke_datasource(self):
""" Test the flickr30k entities dataset. """
self.utest_datasource = f30ke_DataSource()
assert self.assertDataSourceDefinition(self.utest_datasource)
# def test_coco_datasource(self):
# """ Test the MSCOCO dataset. """
# # set the specific
# self.utest_datasource = coco_Datasource()
# # assert self.assertDataSourceDefinition(self.utest_datasource)
# def test_ref_datasource(self):
# """ Test the ReferItGmae dataset. """
# # set the specific
# self.utest_datasource = refer_Datasource()
# assert self.assertDataSourceDefinition(self.utest_datasource)
# def test_kinetics_datasource(self):
# """ Test the kinetics700 dataset. """
# # set the specific
# self.utest_datasource = kinetics_Datasource()
# kinetics_train_dataset = self.utest_datasource.get_train_set(
# modality_sampler=None)
# iter_data = kinetics_train_dataset[0]
# print("rgb: ")
# print(iter_data["rgb"]["imgs"].shape)
# print(iter_data["rgb"]["label"].shape)
# assert self.assertDataSourceDefinition(self.utest_datasource)
# def test_gym_datasource(self):
# """ Test the Gym dataset. """
# # set the specific
# self.utest_datasource = GymDataSource()
# assert self.assertDataSourceDefinition(self.utest_datasource)
if __name__ == '__main__':
unittest.main()
| []
| []
| [
"config_file"
]
| [] | ["config_file"] | python | 1 | 0 | |
cmd/pre-commit/main.go | package main
import (
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"path/filepath"
"time"
"github.com/ONSdigital/git-diff-check/diffcheck"
)
const (
// Return codes - a non-zero will cause the commit hook to reject the commit
accepted = 0
rejected = 1
)
const (
// Repository defines the github repo where the source code is located
Repository = "ONSdigital/git-diff-check"
// LatestVersion gives the api location of the most recent tag in github
LatestVersion = "https://api.github.com/repos/" + Repository + "/releases/latest"
)
var target = flag.String("p", "", "(optional) path to repository")
var showVersion bool
var showHelp bool
func init() {
flag.BoolVar(&showVersion, "version", false, "show current version")
flag.BoolVar(&showHelp, "help", false, "show usage")
}
// Version is injected at build time
var Version string
func main() {
flag.Parse()
if showHelp {
flag.PrintDefaults()
os.Exit(0)
}
if showVersion {
if len(Version) == 0 {
fmt.Println(errors.New("No version set in binary! You may have a broken release"))
os.Exit(1)
}
fmt.Println(Version)
os.Exit(0)
}
// Attempt to check for a new version and inform the user if this is so.
// If we can't connect or get the version for some reason then this is non-fatal
versionCheck()
if *target == "" {
*target = "."
}
fmt.Printf("Running precommit diff check on '%s'\n", *target)
// Import environmental feature flags
if useEntropyFeature := os.Getenv("DC_ENTROPY_EXPERIMENT"); useEntropyFeature == "1" {
fmt.Println("i) Experimental entropy checking enabled")
diffcheck.UseEntropy = true
}
// Get where we are so we can get back
ex, err := os.Executable()
if err != nil {
log.Fatal("Couldn't get current dir:", err)
}
here := filepath.Dir(ex)
err = os.Chdir(*target)
if err != nil {
log.Fatal("Failed to change to target dir:", err)
}
patch, err := exec.Command("git", "diff", "-U0", "--staged").CombinedOutput()
if err != nil {
log.Fatalf("Failed to run git command: %v (%s)", err, patch)
}
os.Chdir(here)
if len(patch) == 0 {
fmt.Println("No changes to test - exiting")
os.Exit(accepted)
}
ok, reports, err := diffcheck.SnoopPatch(patch)
if err != nil {
log.Fatal("Failed to snoop:", err)
}
if len(reports) > 0 {
fmt.Println("WARNING! Potential sensitive data found:")
for _, r := range reports {
fmt.Printf("Found in (%s)\n", r.Path)
for _, w := range r.Warnings {
if w.Type == "line" {
fmt.Printf("\t> [%s] %s (line %d)\n", w.Type, w.Description, w.Line)
} else {
fmt.Printf("\t> [%s] %s\n", w.Type, w.Description)
}
}
fmt.Println()
}
}
if ok {
fmt.Println("Diff probably ok!")
os.Exit(accepted)
}
fmt.Println("If you're VERY SURE these files are ok, rerun commit with --no-verify")
os.Exit(rejected)
}
// VersionResponse is the response from the github verson call
type VersionResponse struct {
TagName string `json:"tag_name"`
}
func versionCheck() bool {
var netClient = &http.Client{
Timeout: time.Second * 2,
}
resp, err := netClient.Get(LatestVersion)
if err != nil {
fmt.Println("Failed to check for new versions" + err.Error())
return false
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Println("Failed to check for new versions" + err.Error())
return false
}
var v VersionResponse
err = json.Unmarshal(body, &v)
if err != nil {
fmt.Println("Failed to check for new versions" + err.Error())
return false
}
if len(v.TagName) == 0 {
fmt.Println("Failed to parse version from github response")
return false
}
if v.TagName != Version {
fmt.Printf("\n** Precommit: New version %s available (installed %s) **\n\n", v.TagName, Version)
return true
}
return false
}
| [
"\"DC_ENTROPY_EXPERIMENT\""
]
| []
| [
"DC_ENTROPY_EXPERIMENT"
]
| [] | ["DC_ENTROPY_EXPERIMENT"] | go | 1 | 0 | |
mgr/runner_test.go | package mgr
import (
"encoding/json"
"fmt"
"io/ioutil"
"log/syslog"
"math/rand"
"os"
"path/filepath"
"strings"
"sync"
"testing"
"time"
"github.com/json-iterator/go"
"github.com/stretchr/testify/assert"
"github.com/qiniu/log"
"github.com/qiniu/logkit/cleaner"
"github.com/qiniu/logkit/conf"
"github.com/qiniu/logkit/parser"
parserConf "github.com/qiniu/logkit/parser/config"
"github.com/qiniu/logkit/parser/qiniu"
"github.com/qiniu/logkit/reader"
readerConf "github.com/qiniu/logkit/reader/config"
"github.com/qiniu/logkit/router"
"github.com/qiniu/logkit/sender"
_ "github.com/qiniu/logkit/sender/builtin"
senderConf "github.com/qiniu/logkit/sender/config"
"github.com/qiniu/logkit/sender/discard"
"github.com/qiniu/logkit/sender/mock"
"github.com/qiniu/logkit/sender/pandora"
"github.com/qiniu/logkit/transforms"
_ "github.com/qiniu/logkit/transforms/builtin"
"github.com/qiniu/logkit/transforms/ip"
"github.com/qiniu/logkit/transforms/mutate"
"github.com/qiniu/logkit/utils/equeue"
. "github.com/qiniu/logkit/utils/models"
)
func cleanMetaFolder(path string) {
err := os.Remove(path + "/buf.dat")
if err != nil {
log.Println(err)
}
err = os.Remove(path + "/buf.meta")
if err != nil {
log.Println(err)
}
err = os.Remove(path + "/file.meta")
if err != nil {
log.Println(err)
}
}
func Test_Run(t *testing.T) {
t.Parallel()
dir := "Test_Run"
if err := os.Mkdir(dir, DefaultDirPerm); err != nil {
log.Fatalf("Test_Run error mkdir %v %v", dir, err)
}
defer os.RemoveAll(dir)
logpath := dir + "/logdir"
logpathLink := dir + "/logdirlink"
metapath := dir + "/meta_mock_csv"
if err := os.Mkdir(logpath, DefaultDirPerm); err != nil {
log.Fatalf("Test_Run error mkdir %v %v", logpath, err)
}
absLogpath, err := filepath.Abs(logpath)
if err != nil {
t.Fatalf("filepath.Abs %v, %v", logpath, err)
}
absLogpathLink, err := filepath.Abs(logpathLink)
if err != nil {
t.Fatalf("filepath.Abs %v, %v", logpathLink, err)
}
if err := os.Symlink(absLogpath, absLogpathLink); err != nil {
log.Fatalf("Test_Run error symbol link %v to %v: %v", absLogpathLink, logpath, err)
}
if err := os.Mkdir(metapath, DefaultDirPerm); err != nil {
log.Fatalf("Test_Run error mkdir %v %v", metapath, err)
}
log1 := `hello 123
xx 1
`
log2 := `
`
log3 := `h 456
x 789`
if err := ioutil.WriteFile(filepath.Join(logpath, "log1"), []byte(log1), 0666); err != nil {
log.Fatalf("write log1 fail %v", err)
}
time.Sleep(time.Second)
if err := ioutil.WriteFile(filepath.Join(logpath, "log2"), []byte(log2), 0666); err != nil {
log.Fatalf("write log3 fail %v", err)
}
time.Sleep(time.Second)
if err := ioutil.WriteFile(filepath.Join(logpath, "log3"), []byte(log3), 0666); err != nil {
log.Fatalf("write log2 fail %v", err)
}
exppath1 := filepath.Join(absLogpath, "log1")
exppath3 := filepath.Join(absLogpath, "log3")
exppaths := []string{exppath1, exppath1, exppath3, exppath3}
rinfo := RunnerInfo{
RunnerName: "test_runner",
MaxBatchLen: 1,
MaxBatchSize: 2048,
}
readerConfig := conf.MapConf{
"log_path": logpathLink,
"meta_path": metapath,
"mode": "dir",
"read_from": "oldest",
"datasource_tag": "testtag",
"reader_buf_size": "16",
}
meta, err := reader.NewMetaWithConf(readerConfig)
if err != nil {
t.Error(err)
}
isFromWeb := false
r, err := reader.NewFileBufReader(readerConfig, isFromWeb)
if err != nil {
t.Error(err)
}
cleanChan := make(chan cleaner.CleanSignal)
cleanerConfig := conf.MapConf{
"delete_enable": "true",
}
c, err := cleaner.NewCleaner(cleanerConfig, meta, cleanChan, meta.LogPath())
if err != nil {
t.Error(err)
}
parseConf := conf.MapConf{
"name": "req_csv",
"type": parserConf.TypeCSV,
"csv_schema": "logtype string, xx long",
"csv_splitter": " ",
"disable_record_errdata": "true",
}
ps := parser.NewRegistry()
pparser, err := ps.NewLogParser(parseConf)
if err != nil {
t.Error(err)
}
senderConfigs := []conf.MapConf{
{
"name": "mock_sender",
"sender_type": "mock",
},
}
var senders []sender.Sender
raws, err := mock.NewSender(senderConfigs[0])
s, succ := raws.(*mock.Sender)
if !succ {
t.Error("sender should be mock sender")
}
if err != nil {
t.Error(err)
}
senders = append(senders, s)
runner, err := NewLogExportRunnerWithService(rinfo, r, c, pparser, nil, senders, nil, meta)
if err != nil {
t.Error(err)
}
cleanInfo := CleanInfo{
enable: true,
logdir: absLogpath,
}
assert.Equal(t, cleanInfo, runner.Cleaner())
go runner.Run()
timer := time.NewTimer(20 * time.Second).C
for {
if s.SendCount() >= 4 {
break
}
select {
case <-timer:
t.Error("runner didn't stop within ticker time")
return
default:
time.Sleep(time.Second)
}
}
var dts []Data
rawData := runner.senders[0].Name()[len("mock_sender "):]
err = jsoniter.Unmarshal([]byte(rawData), &dts)
if err != nil {
t.Error(err)
}
if len(dts) != 4 {
t.Errorf("got sender data not match error,expect 4 but %v", len(dts))
}
for idx, dt := range dts {
assert.Equal(t, exppaths[idx], dt["testtag"])
}
}
func Test_RunForEnvTag(t *testing.T) {
t.Parallel()
dir := "Test_RunForEnvTag"
if err := os.Mkdir(dir, DefaultDirPerm); err != nil {
log.Fatalf("Test_RunForEnvTag error mkdir %v %v", dir, err)
}
defer os.RemoveAll(dir)
originEnv := os.Getenv("Test_RunForEnvTag")
defer func() {
os.Setenv("Test_RunForEnvTag", originEnv)
}()
if err := os.Setenv("Test_RunForEnvTag", "{\"Test_RunForEnvTag\":\"env_value\"}"); err != nil {
t.Fatalf("set env %v to %v error %v", "Test_RunForEnvTag", "env_value", err)
}
logpath := dir + "/logdir"
logpathLink := dir + "/logdirlink"
metapath := dir + "/meta_mock_csv"
if err := os.Mkdir(logpath, DefaultDirPerm); err != nil {
log.Fatalf("Test_RunForEnvTag error mkdir %v %v", logpath, err)
}
absLogpath, err := filepath.Abs(logpath)
if err != nil {
t.Fatalf("filepath.Abs %v, %v", logpath, err)
}
absLogpathLink, err := filepath.Abs(logpathLink)
if err != nil {
t.Fatalf("filepath.Abs %v, %v", logpathLink, err)
}
if err := os.Symlink(absLogpath, absLogpathLink); err != nil {
log.Fatalf("Test_Run error symbol link %v to %v: %v", absLogpathLink, logpath, err)
}
if err := os.Mkdir(metapath, DefaultDirPerm); err != nil {
log.Fatalf("Test_Run error mkdir %v %v", metapath, err)
}
log1 := `hello 123
xx 1
`
log2 := `
`
log3 := `h 456
x 789`
if err := ioutil.WriteFile(filepath.Join(logpath, "log1"), []byte(log1), 0666); err != nil {
log.Fatalf("write log1 fail %v", err)
}
time.Sleep(time.Second)
if err := ioutil.WriteFile(filepath.Join(logpath, "log2"), []byte(log2), 0666); err != nil {
log.Fatalf("write log3 fail %v", err)
}
time.Sleep(time.Second)
if err := ioutil.WriteFile(filepath.Join(logpath, "log3"), []byte(log3), 0666); err != nil {
log.Fatalf("write log2 fail %v", err)
}
rinfo := RunnerInfo{
RunnerName: "test_runner",
MaxBatchLen: 1,
MaxBatchSize: 2048,
ExtraInfo: true,
EnvTag: "Test_RunForEnvTag",
}
readerConfig := conf.MapConf{
"log_path": logpathLink,
"meta_path": metapath,
"mode": "dir",
"read_from": "oldest",
"reader_buf_size": "16",
}
meta, err := reader.NewMetaWithConf(readerConfig)
if err != nil {
t.Error(err)
}
isFromWeb := false
reader, err := reader.NewFileBufReader(readerConfig, isFromWeb)
if err != nil {
t.Error(err)
}
cleanChan := make(chan cleaner.CleanSignal)
cleanerConfig := conf.MapConf{
"delete_enable": "true",
}
cleaner, err := cleaner.NewCleaner(cleanerConfig, meta, cleanChan, meta.LogPath())
if err != nil {
t.Error(err)
}
parseConf := conf.MapConf{
"name": "req_csv",
"type": "csv",
"csv_schema": "logtype string, xx long",
"csv_splitter": " ",
"disable_record_errdata": "true",
}
ps := parser.NewRegistry()
pparser, err := ps.NewLogParser(parseConf)
if err != nil {
t.Error(err)
}
senderConfigs := []conf.MapConf{
{
"name": "mock_sender",
"sender_type": "mock",
},
}
var senders []sender.Sender
raws, err := mock.NewSender(senderConfigs[0])
s, succ := raws.(*mock.Sender)
if !succ {
t.Error("sender should be mock sender")
}
if err != nil {
t.Error(err)
}
senders = append(senders, s)
r, err := NewLogExportRunnerWithService(rinfo, reader, cleaner, pparser, nil, senders, nil, meta)
if err != nil {
t.Error(err)
}
cleanInfo := CleanInfo{
enable: true,
logdir: absLogpath,
}
assert.Equal(t, cleanInfo, r.Cleaner())
go r.Run()
timer := time.NewTimer(20 * time.Second).C
for {
if s.SendCount() >= 4 {
break
}
select {
case <-timer:
t.Error("runner didn't stop within ticker time")
return
default:
time.Sleep(time.Second)
}
}
var dts []Data
rawData := r.senders[0].Name()[len("mock_sender "):]
err = jsoniter.Unmarshal([]byte(rawData), &dts)
if err != nil {
t.Error(err)
}
if len(dts) != 4 {
t.Errorf("got sender data not match error,expect 4 but %v", len(dts))
}
for _, d := range dts {
if v, ok := d["Test_RunForEnvTag"]; !ok {
t.Fatalf("Test_RunForEnvTag error, exp got Test_RunForEnvTag:env_value, but not found")
} else {
assert.Equal(t, "env_value", v)
}
}
}
func Test_RunForErrData(t *testing.T) {
t.Parallel()
dir := "Test_RunForErrData"
if err := os.Mkdir(dir, DefaultDirPerm); err != nil {
log.Fatalf("Test_Run error mkdir %v %v", dir, err)
}
defer os.RemoveAll(dir)
logpath := dir + "/logdir"
logpathLink := dir + "/logdirlink"
metapath := dir + "/meta_mock_csv"
if err := os.Mkdir(logpath, DefaultDirPerm); err != nil {
log.Fatalf("Test_Run error mkdir %v %v", logpath, err)
}
absLogpath, err := filepath.Abs(logpath)
if err != nil {
t.Fatalf("filepath.Abs %v, %v", logpath, err)
}
absLogpathLink, err := filepath.Abs(logpathLink)
if err != nil {
t.Fatalf("filepath.Abs %v, %v", logpathLink, err)
}
if err := os.Symlink(absLogpath, absLogpathLink); err != nil {
log.Fatalf("Test_Run error symbol link %v to %v: %v", absLogpathLink, logpath, err)
}
if err := os.Mkdir(metapath, DefaultDirPerm); err != nil {
log.Fatalf("Test_Run error mkdir %v %v", metapath, err)
}
log1 := `hello 123
xx 1
`
log2 := `
`
log3 := `h 456
x 789`
if err := ioutil.WriteFile(filepath.Join(logpath, "log1"), []byte(log1), 0666); err != nil {
log.Fatalf("write log1 fail %v", err)
}
time.Sleep(time.Second)
if err := ioutil.WriteFile(filepath.Join(logpath, "log2"), []byte(log2), 0666); err != nil {
log.Fatalf("write log2 fail %v", err)
}
time.Sleep(time.Second)
if err := ioutil.WriteFile(filepath.Join(logpath, "log3"), []byte(log3), 0666); err != nil {
log.Fatalf("write log3 fail %v", err)
}
exppath1 := filepath.Join(absLogpath, "log1")
exppath3 := filepath.Join(absLogpath, "log3")
exppaths := []string{exppath1, exppath1, exppath3, exppath3}
rinfo := RunnerInfo{
RunnerName: "test_runner",
MaxBatchLen: 1,
MaxBatchSize: 2048,
}
readerConfig := conf.MapConf{
"log_path": logpathLink,
"meta_path": metapath,
"mode": "dir",
"read_from": "oldest",
"datasource_tag": "testtag",
"reader_buf_size": "16",
}
meta, err := reader.NewMetaWithConf(readerConfig)
if err != nil {
t.Error(err)
}
isFromWeb := false
reader, err := reader.NewFileBufReader(readerConfig, isFromWeb)
if err != nil {
t.Error(err)
}
cleanChan := make(chan cleaner.CleanSignal)
cleanerConfig := conf.MapConf{
"delete_enable": "true",
}
cleaner, err := cleaner.NewCleaner(cleanerConfig, meta, cleanChan, meta.LogPath())
if err != nil {
t.Error(err)
}
parseConf := conf.MapConf{
"name": "req_csv",
"type": "csv",
"csv_schema": "logtype string, xx long",
"csv_splitter": " ",
"disable_record_errdata": "false",
}
ps := parser.NewRegistry()
pparser, err := ps.NewLogParser(parseConf)
if err != nil {
t.Error(err)
}
senderConfigs := []conf.MapConf{
{
"name": "mock_sender",
"sender_type": "mock",
},
}
var senders []sender.Sender
raws, err := mock.NewSender(senderConfigs[0])
s, succ := raws.(*mock.Sender)
if !succ {
t.Error("sender should be mock sender")
}
if err != nil {
t.Error(err)
}
senders = append(senders, s)
r, err := NewLogExportRunnerWithService(rinfo, reader, cleaner, pparser, nil, senders, nil, meta)
if err != nil {
t.Error(err)
}
cleanInfo := CleanInfo{
enable: true,
logdir: absLogpath,
}
assert.Equal(t, cleanInfo, r.Cleaner())
go r.Run()
timer := time.NewTimer(20 * time.Second).C
for {
if s.SendCount() >= 4 {
break
}
select {
case <-timer:
t.Error("runner didn't stop within ticker time")
return
default:
time.Sleep(time.Second)
}
}
var dts []Data
rawData := r.senders[0].Name()[len("mock_sender "):]
err = jsoniter.Unmarshal([]byte(rawData), &dts)
if err != nil {
t.Error(err)
}
assert.Equal(t, 4, len(dts), "got sender data not match")
for idx, dt := range dts {
if _, ok := dt[KeyPandoraStash]; ok {
if dt["testtag"] == nil {
t.Errorf("data source should be added")
}
} else {
assert.Equal(t, exppaths[idx], dt["testtag"])
}
}
}
func Test_Compatible(t *testing.T) {
t.Parallel()
rc := RunnerConfig{
ReaderConfig: conf.MapConf{
"log_path": "/path1",
"meta_path": "meta",
"mode": "dir",
"read_from": "oldest",
"datasource_tag": "testtag",
},
ParserConf: conf.MapConf{
"type": "qiniulog",
},
}
exprc := RunnerConfig{
ReaderConfig: conf.MapConf{
"log_path": "/path1",
"meta_path": "meta",
"mode": "dir",
"read_from": "oldest",
"datasource_tag": "testtag",
"head_pattern": "^" + qiniu.HeadPatthern,
},
ParserConf: conf.MapConf{
"type": "qiniulog",
},
}
rc = Compatible(rc)
assert.Equal(t, exprc, rc)
rc2 := RunnerConfig{
ReaderConfig: conf.MapConf{
"log_path": "/path1",
"meta_path": "meta",
"mode": "dir",
"read_from": "oldest",
"datasource_tag": "testtag",
},
ParserConf: conf.MapConf{
"type": "qiniulog",
"qiniulog_prefix": "PREX",
},
}
exprc2 := RunnerConfig{
ReaderConfig: conf.MapConf{
"log_path": "/path1",
"meta_path": "meta",
"mode": "dir",
"read_from": "oldest",
"datasource_tag": "testtag",
"head_pattern": "^PREX " + qiniu.HeadPatthern,
},
ParserConf: conf.MapConf{
"type": "qiniulog",
"qiniulog_prefix": "PREX",
},
}
rc2 = Compatible(rc2)
assert.Equal(t, exprc2, rc2)
}
func Test_QiniulogRun(t *testing.T) {
t.Parallel()
dir := "Test_QiniulogRun"
//clean dir first
os.RemoveAll(dir)
if err := os.Mkdir(dir, DefaultDirPerm); err != nil {
log.Errorf("Test_QiniulogRun error mkdir %v %v", dir, err)
}
defer os.RemoveAll(dir)
logpath := dir + "/logdir"
logpathLink := dir + "/logdirlink"
metapath := dir + "/meta_mock_csv"
if err := os.Mkdir(logpath, DefaultDirPerm); err != nil {
log.Errorf("Test_Run error mkdir %v %v", logpath, err)
}
absLogpath, err := filepath.Abs(logpath)
if err != nil {
t.Fatalf("filepath.Abs %v, %v", logpath, err)
}
absLogpathLink, err := filepath.Abs(logpathLink)
if err != nil {
t.Fatalf("filepath.Abs %v, %v", logpathLink, err)
}
if err := os.Symlink(absLogpath, absLogpathLink); err != nil {
log.Fatalf("Test_Run error symbol link %v to %v: %v", absLogpathLink, logpath, err)
}
if err := os.Mkdir(metapath, DefaultDirPerm); err != nil {
log.Fatalf("Test_Run error mkdir %v %v", metapath, err)
}
log1 := `2017/01/22 11:16:08.885550 [X-ZsU][INFO] disk.go:123: [REQ_END] 200 0.010k 3.792ms
[WARN][SLdoIrCDZj7pmZsU] disk.go <job.freezeDeamon> pop() failed: not found
2017/01/22 11:15:54.947217 [2pyKMukqvwSd-ZsU][INFO] disk.go:124: Service: POST 10.200.20.25:9100/user/info, Code: 200, Xlog: AC, Time: 1ms
`
log2 := `2016/10/20 17:20:30.642666 [ERROR] disk.go:125: github.com/qiniu/logkit/queue/disk.go:241
1234 3243xsaxs
2016/10/20 17:20:30.642662 [123][WARN] disk.go:241: github.com/qiniu/logkit/queue/disk.go 1
`
log3 := `2016/10/20 17:20:30.642662 [124][WARN] disk.go:456: xxxxxx`
expfiles := []string{`[REQ_END] 200 0.010k 3.792ms
[WARN][SLdoIrCDZj7pmZsU] disk.go <job.freezeDeamon> pop() failed: not found`,
`Service: POST 10.200.20.25:9100/user/info, Code: 200, Xlog: AC, Time: 1ms`,
`github.com/qiniu/logkit/queue/disk.go:241
1234 3243xsaxs`, `github.com/qiniu/logkit/queue/disk.go 1`,
`xxxxxx`}
expreqid := []string{"X-ZsU", "2pyKMukqvwSd-ZsU", "", "123", "124"}
if err := ioutil.WriteFile(filepath.Join(logpath, "log1"), []byte(log1), 0666); err != nil {
log.Fatalf("write log1 fail %v", err)
}
time.Sleep(time.Second)
if err := ioutil.WriteFile(filepath.Join(logpath, "log2"), []byte(log2), 0666); err != nil {
log.Fatalf("write log2 fail %v", err)
}
rinfo := RunnerInfo{
RunnerName: "test_runner",
MaxBatchLen: 1,
MaxBatchSize: 2048,
}
readerConfig := conf.MapConf{
"log_path": logpathLink,
"meta_path": metapath,
"mode": "dir",
"read_from": "oldest",
"datasource_tag": "testtag",
}
parseConf := conf.MapConf{
"name": "qiniu",
"type": parserConf.TypeLogv1,
}
senderConfigs := []conf.MapConf{
{
"name": "mock_sender",
"sender_type": "mock",
},
}
rc := RunnerConfig{
RunnerInfo: rinfo,
ReaderConfig: readerConfig,
ParserConf: parseConf,
SendersConfig: senderConfigs,
IsInWebFolder: false,
}
rc = Compatible(rc)
meta, err := reader.NewMetaWithConf(rc.ReaderConfig)
if err != nil {
t.Error(err)
}
r, err := reader.NewFileBufReader(rc.ReaderConfig, rc.IsInWebFolder)
if err != nil {
t.Error(err)
}
ps := parser.NewRegistry()
pparser, err := ps.NewLogParser(parseConf)
if err != nil {
t.Error(err)
}
var senders []sender.Sender
raws, err := mock.NewSender(senderConfigs[0])
s, succ := raws.(*mock.Sender)
if !succ {
t.Error("sender should be mock sender")
}
if err != nil {
t.Error(err)
}
senders = append(senders, s)
runner, err := NewLogExportRunnerWithService(rinfo, r, nil, pparser, nil, senders, nil, meta)
if err != nil {
t.Error(err)
}
go runner.Run()
time.Sleep(time.Second)
if err := ioutil.WriteFile(filepath.Join(logpath, "log3"), []byte(log3), 0666); err != nil {
log.Fatalf("write log3 fail %v", err)
}
time.Sleep(time.Second)
timer := time.NewTimer(20 * time.Second).C
for {
if s.SendCount() >= 4 {
break
}
select {
case <-timer:
t.Error("runner didn't stop within ticker time")
return
default:
time.Sleep(time.Second)
}
}
var dts []Data
rawData := runner.senders[0].Name()[len("mock_sender "):]
err = jsoniter.Unmarshal([]byte(rawData), &dts)
if err != nil {
t.Error(err)
}
if len(dts) != 5 {
t.Errorf("got sender data not match error,expect 5 but %v", len(dts))
}
for idx, dt := range dts {
assert.Equal(t, expfiles[idx], dt["log"], "equl log test")
if expreqid[idx] == "" {
assert.Nil(t, dt["reqid"])
} else {
assert.Equal(t, expreqid[idx], dt["reqid"], "equal reqid test")
}
}
ls, err := runner.LagStats()
assert.NoError(t, err)
assert.Equal(t, &LagInfo{0, "bytes", 0, 0}, ls)
}
func TestCreateTransforms(t *testing.T) {
t.Parallel()
config1 := `{
"name":"test2.csv",
"reader":{
"log_path":"./tests/logdir",
"mode":"dir"
},
"parser":{
"name":"test2_csv_parser",
"type":"csv",
"csv_schema":"t1 string"
},
"transforms":[{
"type":"IP",
"key": "ip",
"data_path": "../transforms/ip/test_data/17monipdb.dat"
}],
"senders":[{
"name":"file_sender",
"sender_type":"file",
"file_send_path":"./test2/test2_csv_file.txt"
}]
}`
rc := RunnerConfig{}
err := jsoniter.Unmarshal([]byte(config1), &rc)
assert.NoError(t, err)
transformers, _ := createTransformers(rc)
datas := []Data{{"ip": "111.2.3.4"}}
exp := []Data{{
"ip": "111.2.3.4",
"Region": "浙江",
"City": "宁波",
"Country": "中国",
"Isp": "N/A"}}
for k := range transformers {
datas, err = transformers[k].Transform(datas)
assert.NoError(t, err)
}
assert.Equal(t, exp, datas)
}
func TestReplaceTransforms(t *testing.T) {
t.Parallel()
config1 := `{
"name":"test2.csv",
"reader":{
"log_path":"./tests/logdir",
"mode":"dir"
},
"parser":{
"name":"jsonps",
"type":"json"
},
"transforms":[{
"type":"replace",
"stage":"before_parser",
"old":"\\x",
"new":"\\\\x"
}],
"senders":[{
"name":"file_sender",
"sender_type":"file",
"file_send_path":"./test2/test2_csv_file.txt"
}]
}`
newData := make([]Data, 0)
rc := RunnerConfig{}
err := jsoniter.Unmarshal([]byte(config1), &rc)
assert.NoError(t, err)
transformers, _ := createTransformers(rc)
datas := []string{`{"status":"200","request_method":"POST","request_body":"<xml>\x0A","content_type":"text/xml"}`, `{"status":"200","request_method":"POST","request_body":"<xml>x0A","content_type":"text/xml"}`}
for k := range transformers {
datas, err = transformers[k].RawTransform(datas)
assert.NoError(t, err)
for i := range datas {
var da Data
err = jsoniter.Unmarshal([]byte(datas[i]), &da)
assert.NoError(t, err)
newData = append(newData, da)
}
}
exp := []Data{
{
"status": "200",
"request_method": "POST",
"request_body": "<xml>\\x0A",
"content_type": "text/xml",
},
{
"status": "200",
"request_method": "POST",
"request_body": "<xml>x0A",
"content_type": "text/xml",
},
}
assert.Equal(t, exp, newData)
}
func TestDateTransforms(t *testing.T) {
t.Parallel()
config1 := `{
"name":"test2.csv",
"reader":{
"log_path":"./tests/logdir",
"mode":"dir"
},
"parser":{
"name":"jsonps",
"type":"json"
},
"transforms":[{
"type":"date",
"key":"status",
"offset":1,
"time_layout_before":"",
"time_layout_after":"2006-01-02T15:04:05"
}],
"senders":[{
"name":"file_sender",
"sender_type":"file",
"file_send_path":"./test2/test2_csv_file.txt"
}]
}`
rc := RunnerConfig{}
err := jsoniter.Unmarshal([]byte(config1), &rc)
assert.NoError(t, err)
transformers, _ := createTransformers(rc)
datas := []Data{{"status": "02/01/2016--15:04:05"}, {"status": "2006-01-02 15:04:15"}}
for k := range transformers {
datas, err = transformers[k].Transform(datas)
assert.Nil(t, err)
}
exp := []Data{
{
"status": "2016-01-02T16:04:05",
},
{
"status": "2006-01-02T16:04:15",
},
}
assert.Equal(t, exp, datas)
}
func TestSplitAndConvertTransforms(t *testing.T) {
t.Parallel()
config1 := `{
"name":"test2.csv",
"reader":{
"log_path":"./tests/logdir",
"mode":"dir"
},
"parser":{
"name":"jsonps",
"type":"json"
},
"transforms":[{
"type":"split",
"key":"status",
"sep":",",
"newfield":"newarray"
},{
"type":"convert",
"dsl":"newarray array(long)"
}],
"senders":[{
"name":"file_sender",
"sender_type":"file",
"file_send_path":"./test2/test2_csv_file.txt"
}]
}`
rc := RunnerConfig{}
err := jsoniter.Unmarshal([]byte(config1), &rc)
assert.NoError(t, err)
transformers, _ := createTransformers(rc)
datas := []Data{{"status": "1,2,3"}, {"status": "4,5,6"}}
for k := range transformers {
datas, err = transformers[k].Transform(datas)
assert.Nil(t, err)
}
exp := []Data{
{
"status": "1,2,3",
"newarray": []interface{}{int64(1), int64(2), int64(3)},
},
{
"status": "4,5,6",
"newarray": []interface{}{int64(4), int64(5), int64(6)},
},
}
assert.Equal(t, exp, datas)
}
func TestGetTrend(t *testing.T) {
t.Parallel()
assert.Equal(t, SpeedUp, getTrend(0, 1))
assert.Equal(t, SpeedDown, getTrend(1, 0))
assert.Equal(t, SpeedStable, getTrend(1, 1))
}
func TestSpeedTrend(t *testing.T) {
t.Parallel()
tests := []struct {
olds StatsInfo
news StatsInfo
etime float64
exp StatsInfo
}{
{
olds: StatsInfo{
Success: 1,
Speed: 1.0,
},
news: StatsInfo{
Success: 2,
},
etime: 1.0,
exp: StatsInfo{
Success: 2,
Speed: 1.0,
Trend: SpeedStable,
},
},
{
olds: StatsInfo{},
news: StatsInfo{},
etime: 0,
exp: StatsInfo{
Success: 0,
Speed: 0,
Trend: SpeedStable,
},
},
{
olds: StatsInfo{
Success: 1,
Speed: 1.0,
},
news: StatsInfo{
Success: 10,
},
etime: 1.0,
exp: StatsInfo{
Success: 10,
Speed: 9.0,
Trend: SpeedUp,
},
},
{
olds: StatsInfo{
Success: 10,
Speed: 10.0,
},
news: StatsInfo{
Success: 11,
},
etime: 1.0,
exp: StatsInfo{
Success: 11,
Speed: 1.0,
Trend: SpeedDown,
},
},
}
for _, ti := range tests {
ti.news.Speed, ti.news.Trend = calcSpeedTrend(ti.olds, ti.news, ti.etime)
assert.Equal(t, ti.exp, ti.news)
}
}
func TestCopyStats(t *testing.T) {
t.Parallel()
tests := []struct {
src RunnerStatus
dst RunnerStatus
exp RunnerStatus
}{
{
src: RunnerStatus{
ReadDataSize: 10,
ReadDataCount: 10,
SenderStats: map[string]StatsInfo{
"a": {
Success: 11,
Speed: 1.0,
Trend: SpeedDown,
},
"c": {
Success: 12,
Speed: 1.0,
Trend: SpeedDown,
},
},
TransformStats: map[string]StatsInfo{
"x": {
Success: 2,
Speed: 5.0,
Trend: SpeedDown,
},
},
ReadSpeedKB: 10,
ReadSpeed: 10,
},
exp: RunnerStatus{
ReadDataSize: 10,
ReadDataCount: 10,
SenderStats: map[string]StatsInfo{
"a": {
Success: 11,
Speed: 1.0,
Trend: SpeedDown,
},
"c": {
Success: 12,
Speed: 1.0,
Trend: SpeedDown,
},
},
TransformStats: map[string]StatsInfo{
"x": {
Success: 2,
Speed: 5.0,
Trend: SpeedDown,
},
},
ReadSpeedKB: 10,
ReadSpeed: 10,
},
dst: RunnerStatus{
ReadDataSize: 5,
ReadDataCount: 0,
SenderStats: map[string]StatsInfo{
"x": {
Success: 0,
Speed: 2.0,
Trend: SpeedDown,
},
"b": {
Success: 5,
Speed: 1.0,
Trend: SpeedDown,
},
},
TransformStats: map[string]StatsInfo{
"s": {
Success: 21,
Speed: 50.0,
Trend: SpeedUp,
},
},
ReadSpeedKB: 11,
ReadSpeed: 2,
},
},
}
for _, ti := range tests {
ti.dst = (&ti.src).Clone()
for i, v := range ti.src.SenderStats {
v.Speed = 0
v.Success = 0
ti.src.SenderStats[i] = v
}
assert.Equal(t, ti.exp, ti.dst)
}
}
func TestSyslogRunnerX(t *testing.T) {
t.Parallel()
metaDir := "TestSyslogRunner"
os.Mkdir(metaDir, DefaultDirPerm)
defer os.RemoveAll(metaDir)
config := `{
"name":"TestSyslogRunner",
"batch_len":1,
"reader":{
"mode":"socket",
"meta_path":"TestSyslogRunner",
"socket_service_address":"tcp://:5142"
},
"parser":{
"name":"syslog",
"type":"raw"
},
"senders":[{
"name":"file_sender",
"sender_type":"file",
"file_send_path":"./TestSyslogRunner/syslog.txt"
}]
}`
rc := RunnerConfig{}
err := jsoniter.Unmarshal([]byte(config), &rc)
assert.NoError(t, err)
rr, err := NewCustomRunner(rc, make(chan cleaner.CleanSignal), reader.NewRegistry(), parser.NewRegistry(), sender.NewRegistry())
assert.NoError(t, err)
go rr.Run()
time.Sleep(1 * time.Second)
sysLog, err := syslog.Dial("tcp", "127.0.0.1:5142",
syslog.LOG_WARNING|syslog.LOG_DAEMON, "demotag")
if err != nil {
log.Fatal(err)
}
err = sysLog.Emerg("And this is a daemon emergency with demotag.")
assert.NoError(t, err)
err = sysLog.Emerg("this is OK")
assert.NoError(t, err)
time.Sleep(2 * time.Second)
data, err := ioutil.ReadFile("./TestSyslogRunner/syslog.txt")
assert.NoError(t, err)
if !strings.Contains(string(data), "this is OK") || !strings.Contains(string(data), "And this is a daemon emergency with demotag.") {
t.Error("syslog parse error")
}
}
func TestAddDatasource(t *testing.T) {
t.Parallel()
sourceFroms := []string{"a", "b", "c", "d", "e", "f"}
se := &StatsError{
DatasourceSkipIndex: []int{0, 3, 5},
}
datas := []Data{
{
"f1": "2",
},
{
"f2": "1",
},
{
"f3": "3",
},
}
datasourceTagName := "source"
runnername := "runner1"
exp := []Data{
{
"f1": "2",
"source": "b",
},
{
"f2": "1",
"source": "c",
},
{
"f3": "3",
"source": "e",
},
}
gots := addSourceToData(sourceFroms, se, datas, datasourceTagName, runnername)
assert.Equal(t, exp, gots)
se = nil
exp = []Data{
{
"f1": "2",
"source": "a",
},
{
"f2": "1",
"source": "b",
},
{
"f3": "3",
"source": "c",
},
}
datas = []Data{
{
"f1": "2",
},
{
"f2": "1",
},
{
"f3": "3",
},
}
gots = addSourceToData(sourceFroms, se, datas, datasourceTagName, runnername)
assert.Equal(t, exp, gots)
}
func TestAddEncode(t *testing.T) {
t.Parallel()
datas := []Data{
{
"f1": "2",
},
{
"f2": "1",
},
{
"f3": "3",
},
}
encodeTagName := "encode"
runnername := "runner1"
encode := "gbk"
exp := []Data{
{
"f1": "2",
"encode": "gbk",
},
{
"f2": "1",
"encode": "gbk",
},
{
"f3": "3",
"encode": "gbk",
},
}
addEncodeToData(datas, encodeTagName, encode, runnername)
assert.Equal(t, exp, datas)
}
func TestAddDatasourceForErrData(t *testing.T) {
t.Parallel()
sourceFroms := []string{"a", "b", "c", "d", "e", "f"}
se := &StatsError{
DatasourceSkipIndex: []int{0, 3, 5},
}
datas := []Data{
{
"pandora_stash": "rawdata1",
},
{
"f1": "2",
},
{
"f2": "1",
},
{
"pandora_stash": "rawdata2",
},
{
"f3": "3",
},
{
"pandora_stash": "rawdata3",
},
}
datasourceTagName := "source"
runnername := "runner1"
exp := []Data{
{
"pandora_stash": "rawdata1",
"source": "a",
},
{
"f1": "2",
"source": "b",
},
{
"f2": "1",
"source": "c",
},
{
"pandora_stash": "rawdata2",
"source": "d",
},
{
"f3": "3",
"source": "e",
},
{
"pandora_stash": "rawdata3",
"source": "f",
},
}
gots := addSourceToData(sourceFroms, se, datas, datasourceTagName, runnername)
assert.Equal(t, exp, gots)
}
func TestAddDatasourceForRawData(t *testing.T) {
t.Parallel()
dir := "TestAddDatasource"
metaDir := filepath.Join(dir, "meta")
if err := os.Mkdir(dir, DefaultDirPerm); err != nil {
log.Fatalf("TestAddDatasource error mkdir %v %v", dir, err)
}
filename := []string{"test1.log", "test2.log", "test3.log", "test4.log"}
content := []string{"1 fufu 3.14\n", "3 fufu 3.16\n", "hfdjsak,dadiajd,dsaud\n", "4 fufu 3.17\n"}
var realPaths []string
for i := range filename {
logPath := filepath.Join(dir, filename[i])
readPath, err := filepath.Abs(logPath)
assert.NoError(t, err)
realPaths = append(realPaths, readPath)
err = ioutil.WriteFile(logPath, []byte(content[i]), DefaultDirPerm)
assert.NoError(t, err)
}
defer os.RemoveAll(dir)
defer os.RemoveAll(metaDir)
config1 := `{
"name":"TestAddDatasource",
"batch_len":4,
"reader":{
"mode":"dir",
"log_path":"./TestAddDatasource/",
"datasource_tag":"datasource"
},
"parser":{
"name":"testcsv",
"type":"csv",
"csv_schema":"a long,b string,c float",
"csv_splitter":" ",
"disable_record_errdata":"true",
"keep_raw_data":"true"
},
"senders":[{
"name":"file_sender",
"sender_type":"file",
"file_send_path":"./TestAddDatasource/filesend.csv"
}]
}`
rc := RunnerConfig{}
err := jsoniter.Unmarshal([]byte(config1), &rc)
assert.NoError(t, err)
rr, err := NewCustomRunner(rc, make(chan cleaner.CleanSignal), reader.NewRegistry(), parser.NewRegistry(), sender.NewRegistry())
assert.NoError(t, err)
go rr.Run()
time.Sleep(2 * time.Second)
data, err := ioutil.ReadFile("./TestAddDatasource/filesend.csv")
assert.Nil(t, err)
var res []Data
err = jsoniter.Unmarshal(data, &res)
if err != nil {
t.Error(err)
}
exp := []Data{
{
"c": float64(3.14),
"raw_data": content[0],
"datasource": realPaths[0],
"a": float64(1),
"b": "fufu",
},
{
"a": float64(3),
"b": "fufu",
"c": float64(3.16),
"raw_data": content[1],
"datasource": realPaths[1],
},
{
"raw_data": content[2],
"datasource": realPaths[2],
},
{
"b": "fufu",
"c": float64(3.17),
"raw_data": content[3],
"datasource": realPaths[3],
"a": float64(4),
},
}
assert.Equal(t, len(exp), len(res))
// res 多了 lst 键值对
for idx := range exp {
for expKey, expVal := range exp[idx] {
assert.Equal(t, expVal, res[idx][expKey])
}
}
}
func TestAddDatatags(t *testing.T) {
t.Parallel()
dir := "TestAddDatatags"
metaDir := filepath.Join(dir, "meta")
if err := os.Mkdir(dir, DefaultDirPerm); err != nil {
log.Fatalf("TestAddDatatags error mkdir %v %v", dir, err)
}
tagFile := filepath.Join(dir, "tagFile.json")
err := ioutil.WriteFile(tagFile, []byte(`{
"Title":"tags",
"Author":["john","ada","alice"],
"IsTrue":true,
"Host":99
}`), DefaultDirPerm)
assert.NoError(t, err)
logPath := filepath.Join(dir, "test.log")
err = ioutil.WriteFile(logPath, []byte(`{"f1": "2","f2": "1","f3": "3"}`), DefaultDirPerm)
assert.NoError(t, err)
defer os.RemoveAll(dir)
defer os.RemoveAll(metaDir)
config1 := `{
"name":"TestAddDatatags",
"batch_len":1,
"reader":{
"mode":"file",
"meta_path":"./TestAddDatatags/meta",
"file_done":"./TestAddDatatags/meta",
"log_path":"./TestAddDatatags/test.log",
"tag_file":"./TestAddDatatags/tagFile.json"
},
"parser":{
"name":"testjson",
"type":"json"
},
"senders":[{
"name":"file_sender",
"sender_type":"file",
"file_send_path":"./TestAddDatatags/filesend.json"
}]
}`
rc := RunnerConfig{}
err = jsoniter.Unmarshal([]byte(config1), &rc)
assert.NoError(t, err)
rr, err := NewCustomRunner(rc, make(chan cleaner.CleanSignal), reader.NewRegistry(), parser.NewRegistry(), sender.NewRegistry())
assert.NoError(t, err)
go rr.Run()
time.Sleep(2 * time.Second)
data, err := ioutil.ReadFile("./TestAddDatatags/filesend.json")
assert.Nil(t, err)
var res []Data
err = jsoniter.Unmarshal(data, &res)
if err != nil {
t.Error(err)
}
exp := []Data{
{
"f1": "2",
"f2": "1",
"f3": "3",
"Title": "tags",
"Author": []interface{}{"john", "ada", "alice"},
"IsTrue": bool(true),
"Host": float64(99),
},
}
assert.Equal(t, len(exp), len(res))
// res 多了 lst 键值对
for idx := range exp {
for expKey, expVal := range exp[idx] {
assert.Equal(t, expVal, res[idx][expKey])
}
}
}
func TestRunWithExtra(t *testing.T) {
t.Parallel()
dir := "TestRunWithExtra"
metaDir := filepath.Join(dir, "meta")
if err := os.Mkdir(dir, DefaultDirPerm); err != nil {
log.Fatalf("TestRunWithExtra error mkdir %v %v", dir, err)
}
logPath := filepath.Join(dir, "test.log")
err := ioutil.WriteFile(logPath, []byte(`{"f1": "2","f2": "1","f3": "3"}`), DefaultDirPerm)
assert.NoError(t, err)
defer os.RemoveAll(dir)
defer os.RemoveAll(metaDir)
config1 := `{
"name":"TestRunWithExtra",
"batch_len":1,
"extra_info":true,
"reader":{
"mode":"file",
"meta_path":"./TestRunWithExtra/meta",
"log_path":"./TestRunWithExtra/test.log"
},
"parser":{
"name":"testjson",
"type":"json"
},
"senders":[{
"name":"file_sender",
"sender_type":"file",
"file_send_path":"./TestRunWithExtra/filesend.json"
}]
}`
rc := RunnerConfig{}
err = jsoniter.Unmarshal([]byte(config1), &rc)
assert.NoError(t, err)
rr, err := NewCustomRunner(rc, make(chan cleaner.CleanSignal), reader.NewRegistry(), parser.NewRegistry(), sender.NewRegistry())
assert.NoError(t, err)
go rr.Run()
time.Sleep(2 * time.Second)
data, err := ioutil.ReadFile("./TestRunWithExtra/filesend.json")
assert.Nil(t, err)
var res []Data
err = jsoniter.Unmarshal(data, &res)
if err != nil {
t.Error(err)
}
// res 多了 lst 键值对
assert.Equal(t, 8, len(res[0]))
}
func TestRunWithDataSource(t *testing.T) {
t.Parallel()
cur, err := os.Getwd()
assert.NoError(t, err)
dir := filepath.Join(cur, "TestRunWithDataSource")
os.RemoveAll(dir)
metaDir := filepath.Join(dir, "meta")
if err := os.Mkdir(dir, DefaultDirPerm); err != nil {
log.Fatalf("TestRunWithDataSource error mkdir %v %v", dir, err)
}
logPath := filepath.Join(dir, "test.log")
err = ioutil.WriteFile(logPath, []byte("a\nb\n\n\nc\n"), DefaultDirPerm)
assert.NoError(t, err)
defer os.RemoveAll(dir)
defer os.RemoveAll(metaDir)
config1 := `{
"name":"TestRunWithDataSource",
"batch_len":3,
"reader":{
"mode":"file",
"meta_path":"./TestRunWithDataSource/meta",
"log_path":"` + logPath + `",
"datasource_tag":"datasource"
},
"parser":{
"name":"testjson",
"type":"raw",
"timestamp":"false"
},
"senders":[{
"name":"file_sender",
"sender_type":"file",
"file_send_path":"./TestRunWithDataSource/filesend.json"
}]
}`
rc := RunnerConfig{}
err = jsoniter.Unmarshal([]byte(config1), &rc)
assert.NoError(t, err)
rr, err := NewCustomRunner(rc, make(chan cleaner.CleanSignal), reader.NewRegistry(), parser.NewRegistry(), sender.NewRegistry())
assert.NoError(t, err)
assert.NotNil(t, rr)
go rr.Run()
time.Sleep(2 * time.Second)
data, err := ioutil.ReadFile("./TestRunWithDataSource/filesend.json")
assert.Nil(t, err)
var res []Data
err = json.Unmarshal(data, &res)
if err != nil {
t.Error(err, string(data))
}
exp := []Data{
{
"raw": "a\n",
"datasource": logPath,
},
{
"raw": "b\n",
"datasource": logPath,
},
{
"raw": "c\n",
"datasource": logPath,
},
}
assert.Equal(t, len(exp), len(res))
// res 多了 lst 键值对
for idx := range exp {
for expKey, expVal := range exp[idx] {
assert.Equal(t, expVal, res[idx][expKey])
}
}
}
func TestRunWithDataSourceFail(t *testing.T) {
t.Parallel()
cur, err := os.Getwd()
assert.NoError(t, err)
dir := filepath.Join(cur, "TestRunWithDataSourceFail")
metaDir := filepath.Join(dir, "meta")
os.RemoveAll(dir)
if err := os.Mkdir(dir, DefaultDirPerm); err != nil {
log.Fatalf("TestRunWithDataSource error mkdir %v %v", dir, err)
}
logPath := filepath.Join(dir, "test.log")
err = ioutil.WriteFile(logPath, []byte("a\n"), DefaultDirPerm)
assert.NoError(t, err)
defer os.RemoveAll(dir)
defer os.RemoveAll(metaDir)
config1 := `{
"name":"TestRunWithDataSourceFail",
"batch_len":1,
"reader":{
"mode":"file",
"log_path":"` + logPath + `",
"meta_path":"./TestRunWithDataSourceFail/meta",
"datasource_tag":"datasource"
},
"parser":{
"name":"testjson",
"type":"json"
},
"senders":[{
"name":"file_sender",
"sender_type":"file",
"file_send_path":"./TestRunWithDataSourceFail/filesend.json"
}]
}`
rc := RunnerConfig{}
err = jsoniter.Unmarshal([]byte(config1), &rc)
assert.NoError(t, err)
rr, err := NewCustomRunner(rc, make(chan cleaner.CleanSignal), reader.NewRegistry(), parser.NewRegistry(), sender.NewRegistry())
assert.NoError(t, err)
assert.NotNil(t, rr)
go rr.Run()
time.Sleep(2 * time.Second)
data, err := ioutil.ReadFile("./TestRunWithDataSourceFail/filesend.json")
assert.Nil(t, err)
var res []Data
err = json.Unmarshal(data, &res)
if err != nil {
t.Error(err, string(data))
}
exp := []Data{
{
"pandora_stash": "a",
"datasource": logPath,
},
}
assert.Equal(t, len(exp), len(res))
// res 多了 lst 键值对
for idx := range exp {
for expKey, expVal := range exp[idx] {
assert.Equal(t, expVal, res[idx][expKey])
}
}
}
func TestClassifySenderData(t *testing.T) {
t.Parallel()
{
senders := []sender.Sender{&mock.Sender{}, &mock.Sender{}, &mock.Sender{}}
numSenders := len(senders)
datas := []Data{
{
"a": "a",
"b": "b",
"c": "c",
"d": "d",
},
{
"a": "A",
"b": "b",
"c": "c",
"d": "d",
},
{
"a": "B",
"b": "b",
"c": "c",
"d": "d",
},
{
"a": "C",
"b": "b",
"c": "c",
"d": "d",
},
}
routerConf := router.RouterConfig{
KeyName: "a",
MatchType: "equal",
DefaultIndex: 0,
Routes: map[string]int{
"a": 2,
"A": 1,
},
}
r, err := router.NewSenderRouter(routerConf, numSenders)
assert.Nil(t, err)
senderDataList := classifySenderData(senders, datas, r)
assert.Equal(t, numSenders, len(senderDataList))
assert.Equal(t, 2, len(senderDataList[0]))
assert.Equal(t, 1, len(senderDataList[1]))
assert.Equal(t, 1, len(senderDataList[2]))
// 测试没有配置 router 的情况
routerConf.KeyName = ""
r, err = router.NewSenderRouter(routerConf, numSenders)
assert.Nil(t, r)
assert.NoError(t, err)
senderDataList = classifySenderData(senders, datas, r)
assert.Equal(t, numSenders, len(senderDataList))
assert.Equal(t, 4, len(senderDataList[0]))
assert.Equal(t, 4, len(senderDataList[1]))
assert.Equal(t, 4, len(senderDataList[2]))
}
// --> 测试 SkipDeepCopySender 检查是否生效 <--
// 存在数据改动的 sender 后有其它 sender
{
senders := []sender.Sender{&mock.Sender{}, &pandora.Sender{}, &mock.Sender{}}
datas := []Data{
{
"a": "a",
"b": "b",
"c": "c",
"d": "d",
},
}
senderDataList := classifySenderData(senders, datas, nil)
assert.Len(t, senderDataList, len(senders))
assert.True(t, fmt.Sprintf("%p", datas) == fmt.Sprintf("%p", senderDataList[0]))
assert.False(t, fmt.Sprintf("%p", datas) == fmt.Sprintf("%p", senderDataList[1]))
assert.True(t, fmt.Sprintf("%p", datas) == fmt.Sprintf("%p", senderDataList[2]))
}
// 存在数据改动的 sender 为最后一个
{
senders := []sender.Sender{&mock.Sender{}, &pandora.Sender{}}
datas := []Data{
{
"a": "a",
"b": "b",
"c": "c",
"d": "d",
},
}
senderDataList := classifySenderData(senders, datas, nil)
assert.Len(t, senderDataList, len(senders))
assert.True(t, fmt.Sprintf("%p", datas) == fmt.Sprintf("%p", senderDataList[0]))
assert.True(t, fmt.Sprintf("%p", datas) == fmt.Sprintf("%p", senderDataList[1]))
}
// 仅存在数据改动的 sender
{
senders := []sender.Sender{&pandora.Sender{}}
datas := []Data{
{
"a": "a",
"b": "b",
"c": "c",
"d": "d",
},
}
senderDataList := classifySenderData(senders, datas, nil)
assert.Len(t, senderDataList, len(senders))
assert.True(t, fmt.Sprintf("%p", datas) == fmt.Sprintf("%p", senderDataList[0]))
}
}
// Response from Clearbit API. Size: 2.4kb
var mediumFixture = []byte(`{
"person": {
"id": "d50887ca-a6ce-4e59-b89f-14f0b5d03b03",
"name": {
"fullName": "Leonid Bugaev",
"givenName": "Leonid",
"familyName": "Bugaev"
},
"email": "[email protected]",
"gender": "male",
"location": "Saint Petersburg, Saint Petersburg, RU",
"geo": {
"city": "Saint Petersburg",
"state": "Saint Petersburg",
"country": "Russia",
"lat": 59.9342802,
"lng": 30.3350986
},
"bio": "Senior engineer at Granify.com",
"site": "http://flickfaver.com",
"avatar": "https://d1ts43dypk8bqh.cloudfront.net/v1/avatars/d50887ca-a6ce-4e59-b89f-14f0b5d03b03",
"employment": {
"name": "www.latera.ru",
"title": "Software Engineer",
"domain": "gmail.com"
},
"facebook": {
"handle": "leonid.bugaev"
},
"github": {
"handle": "buger",
"id": 14009,
"avatar": "https://avatars.githubusercontent.com/u/14009?v=3",
"company": "Granify",
"blog": "http://leonsbox.com",
"followers": 95,
"following": 10
},
"twitter": {
"handle": "flickfaver",
"id": 77004410,
"bio": null,
"followers": 2,
"following": 1,
"statuses": 5,
"favorites": 0,
"location": "",
"site": "http://flickfaver.com",
"avatar": null
},
"linkedin": {
"handle": "in/leonidbugaev"
},
"googleplus": {
"handle": null
},
"angellist": {
"handle": "leonid-bugaev",
"id": 61541,
"bio": "Senior engineer at Granify.com",
"blog": "http://buger.github.com",
"site": "http://buger.github.com",
"followers": 41,
"avatar": "https://d1qb2nb5cznatu.cloudfront.net/users/61541-medium_jpg?1405474390"
},
"klout": {
"handle": null,
"score": null
},
"foursquare": {
"handle": null
},
"aboutme": {
"handle": "leonid.bugaev",
"bio": null,
"avatar": null
},
"gravatar": {
"handle": "buger",
"urls": [
],
"avatar": "http://1.gravatar.com/avatar/f7c8edd577d13b8930d5522f28123510",
"avatars": [
{
"url": "http://1.gravatar.com/avatar/f7c8edd577d13b8930d5522f28123510",
"type": "thumbnail"
}
]
},
"fuzzy": false
},
"company": null
}`)
type CBAvatar struct {
Url string `json:"url"`
}
type CBGravatar struct {
Avatars []*CBAvatar `json:"avatars"`
}
type CBGithub struct {
Followers int `json:"followers"`
}
type CBName struct {
FullName string `json:"fullName"`
}
type CBPerson struct {
Name *CBName `json:"name"`
Github *CBGithub `json:"github"`
Gravatar *CBGravatar `json:"gravatar"`
}
type MediumPayload struct {
Person *CBPerson `json:"person"`
Company string `json:"company"`
}
func BenchmarkDecodeStdStructMedium(b *testing.B) {
b.ReportAllocs()
var data MediumPayload
for i := 0; i < b.N; i++ {
jsoniter.Unmarshal(mediumFixture, &data)
}
}
func BenchmarkEncodeStdStructMedium(b *testing.B) {
var data MediumPayload
jsoniter.Unmarshal(mediumFixture, &data)
b.ReportAllocs()
for i := 0; i < b.N; i++ {
jsoniter.Marshal(data)
}
}
func BenchmarkDecodeJsoniterStructMedium(b *testing.B) {
b.ReportAllocs()
var data MediumPayload
for i := 0; i < b.N; i++ {
jsoniter.Unmarshal(mediumFixture, &data)
}
}
func BenchmarkEncodeJsoniterStructMedium(b *testing.B) {
var data MediumPayload
jsoniter.Unmarshal(mediumFixture, &data)
b.ReportAllocs()
for i := 0; i < b.N; i++ {
jsoniter.Marshal(data)
}
}
func BenchmarkEncodeJsoniterCompatibleStructMedium(b *testing.B) {
var data MediumPayload
jsoniter.Unmarshal(mediumFixture, &data)
b.ReportAllocs()
jsonc := jsoniter.ConfigCompatibleWithStandardLibrary
for i := 0; i < b.N; i++ {
jsonc.Marshal(data)
}
}
/*
BenchmarkDecodeStdStructMedium-4 50000 39162 ns/op 1960 B/op 99 allocs/op
BenchmarkEncodeStdStructMedium-4 1000000 2106 ns/op 712 B/op 5 allocs/op
BenchmarkDecodeJsoniterStructMedium-4 200000 7676 ns/op 320 B/op 36 allocs/op
BenchmarkEncodeJsoniterStructMedium-4 1000000 1046 ns/op 240 B/op 3 allocs/op
BenchmarkEncodeJsoniterCompatibleStructMedium-4 1000000 1023 ns/op 240 B/op 3 allocs/op
PASS
性能明显提升
*/
func TestMergeEnvTags(t *testing.T) {
t.Parallel()
key := "TestMergeEnvTags"
os.Setenv(key, `{"a":"hello"}`)
defer os.Unsetenv(key)
tags := MergeEnvTags(key, nil)
assert.Equal(t, map[string]interface{}{"a": "hello"}, tags)
os.Setenv(key, `{"b":"123","c":"nihao"}`)
tags = MergeEnvTags(key, tags)
assert.Equal(t, map[string]interface{}{"a": "hello", "b": "123", "c": "nihao"}, tags)
}
func TestMergeExtraInfoTags(t *testing.T) {
t.Parallel()
meta, err := reader.NewMetaWithConf(conf.MapConf{
ExtraInfo: "true",
readerConf.KeyMode: readerConf.ModeMySQL,
})
assert.NoError(t, err)
tags := MergeExtraInfoTags(meta, nil)
assert.Equal(t, 4, len(tags))
//再次写入,应该不会产生变化。
tags = MergeExtraInfoTags(meta, tags)
assert.Equal(t, 4, len(tags))
}
func TestTailxCleaner(t *testing.T) {
t.Parallel()
cur, err := os.Getwd()
assert.NoError(t, err)
dir := filepath.Join(cur, "TestTailxCleaner")
metaDir := filepath.Join(dir, "meta")
os.RemoveAll(dir)
if err := os.Mkdir(dir, DefaultDirPerm); err != nil {
log.Fatalf("TestTailxCleaner error mkdir %v %v", dir, err)
}
defer os.RemoveAll(dir)
dira := filepath.Join(dir, "a")
os.MkdirAll(dira, DefaultDirPerm)
logPatha := filepath.Join(dira, "a.log")
assert.NoError(t, ioutil.WriteFile(logPatha, []byte("a\n"), 0666))
dirb := filepath.Join(dir, "b")
os.MkdirAll(dirb, DefaultDirPerm)
logPathb := filepath.Join(dirb, "b.log")
assert.NoError(t, ioutil.WriteFile(logPathb, []byte("b\n"), 0666))
readfile := filepath.Join(dir, "*", "*.log")
config := `
{
"name": "TestTailxCleaner",
"batch_size": 2097152,
"batch_interval": 1,
"reader": {
"expire": "24h",
"log_path": "` + readfile + `",
"meta_path":"` + metaDir + `",
"mode": "tailx",
"read_from": "oldest",
"stat_interval": "1s"
},
"cleaner": {
"delete_enable": "true",
"delete_interval": "1",
"reserve_file_number": "1",
"reserve_file_size": "2048"
},
"parser": {
"disable_record_errdata": "false",
"timestamp": "true",
"type": "raw"
},
"senders": [
{
"sender_type": "discard"
}
]
}`
rc := RunnerConfig{}
assert.NoError(t, jsoniter.Unmarshal([]byte(config), &rc))
cleanChan := make(chan cleaner.CleanSignal)
rr, err := NewLogExportRunner(rc, cleanChan, reader.NewRegistry(), parser.NewRegistry(), sender.NewRegistry())
assert.NoError(t, err)
assert.NotNil(t, rr)
go rr.Run()
time.Sleep(2 * time.Second)
logPatha1 := filepath.Join(dira, "a.log.1")
assert.NoError(t, os.Rename(logPatha, logPatha1))
assert.NoError(t, ioutil.WriteFile(logPatha, []byte("bbbb\n"), 0666))
time.Sleep(5 * time.Second)
logPatha2 := filepath.Join(dira, "a.log.2")
assert.NoError(t, os.Rename(logPatha, logPatha2))
assert.NoError(t, ioutil.WriteFile(logPatha, []byte("cccc\n"), 0666))
time.Sleep(2 * time.Second)
assert.NotNil(t, rr.Cleaner())
var ret, dft int
DONE:
for {
select {
case sig := <-cleanChan:
ret++
assert.Equal(t, "a.log.1", sig.Filename)
assert.NoError(t, os.Remove(filepath.Join(sig.Logdir, sig.Filename)))
assert.Equal(t, readerConf.ModeTailx, sig.ReadMode)
break DONE
default:
dft++
}
time.Sleep(50 * time.Millisecond)
if dft > 200 {
break
}
}
assert.Equal(t, 1, ret)
}
func Test_setSenderConfig(t *testing.T) {
t.Parallel()
senderConfig := conf.MapConf{
senderConf.KeySenderType: senderConf.TypePandora,
}
serverConfigs := []map[string]interface{}{
{
KeyType: ip.Name,
ProcessAt: Server,
},
}
actualConfig, err := setPandoraServerConfig(senderConfig, serverConfigs)
assert.NoError(t, err)
assert.Equal(t, "", actualConfig[senderConf.KeyPandoraAutoCreate])
serverConfigs = []map[string]interface{}{
{
KeyType: ip.Name,
ProcessAt: Server,
"key": "ip",
},
}
actualConfig, err = setPandoraServerConfig(senderConfig, serverConfigs)
assert.NoError(t, err)
assert.Equal(t, "ip ip", actualConfig[senderConf.KeyPandoraAutoCreate])
senderConfig = conf.MapConf{
senderConf.KeySenderType: senderConf.TypePandora,
}
serverConfigs = []map[string]interface{}{
{
KeyType: ip.Name,
ProcessAt: Local,
"key": "a.b",
},
}
actualConfig, err = setPandoraServerConfig(senderConfig, serverConfigs)
assert.NoError(t, err)
assert.Equal(t, "", actualConfig[senderConf.KeyPandoraAutoCreate])
serverConfigs = []map[string]interface{}{
{
KeyType: "other",
},
}
actualConfig, err = setPandoraServerConfig(senderConfig, serverConfigs)
assert.NoError(t, err)
assert.Equal(t, "", actualConfig[senderConf.KeyPandoraAutoCreate])
serverConfigs = []map[string]interface{}{
{
KeyType: ip.Name,
ProcessAt: Server,
"key": "ip.ip",
},
}
_, err = setPandoraServerConfig(senderConfig, serverConfigs)
assert.Error(t, err)
}
func Test_removeServerIPSchema(t *testing.T) {
t.Parallel()
tests := []struct {
autoCreate string
key string
expect string
}{
{
autoCreate: "a ip,a ip",
key: "a",
expect: "",
},
{
autoCreate: "pandora_stash string,a ip,b string",
key: "a",
expect: "pandora_stash string,b string",
},
{
autoCreate: "",
key: "a",
expect: "",
},
{
autoCreate: "a ip,b string",
key: "a",
expect: "b string",
},
{
autoCreate: "a ip",
key: "a",
expect: "",
},
}
for _, test := range tests {
res := removeServerIPSchema(test.autoCreate, test.key)
assert.Equal(t, test.expect, res)
}
}
//之前:5000 242788 ns/op 126474 B/op 758 allocs/op
//现在:5000 266301 ns/op 145645 B/op 1572 allocs/op
// 需要优化
func BenchmarkStatusRestore(b *testing.B) {
logkitConf := conf.MapConf{
readerConf.KeyMetaPath: "BenchmarkStatusRestore",
readerConf.KeyMode: readerConf.ModeMongo,
}
meta, err := reader.NewMetaWithConf(logkitConf)
if err != nil {
b.Fatal(err)
}
defer os.RemoveAll("BenchmarkStatusRestore")
r1 := &LogExportRunner{
historyMutex: new(sync.RWMutex),
meta: meta,
rs: &RunnerStatus{},
lastRs: &RunnerStatus{},
historyError: &ErrorsList{},
}
r2 := &LogExportRunner{
historyMutex: new(sync.RWMutex),
meta: meta,
rs: &RunnerStatus{},
lastRs: &RunnerStatus{},
historyError: &ErrorsList{},
}
b.ReportAllocs()
for i := 0; i < b.N; i++ {
r1.StatusRestore()
r2.StatusRestore()
}
}
func randinsert(l *equeue.ErrorQueue, num int) {
for i := 0; i < num; i++ {
l.Put(equeue.ErrorInfo{
Error: fmt.Sprintf("err %v", rand.Intn(100)),
Count: int64(rand.Intn(100) + 1),
})
}
}
func TestBackupRestoreHistory(t *testing.T) {
t.Parallel()
logkitConf := conf.MapConf{
readerConf.KeyMetaPath: "meta",
readerConf.KeyMode: readerConf.ModeMongo,
}
meta, err := reader.NewMetaWithConf(logkitConf)
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll("meta")
rq := equeue.New(10)
randinsert(rq, 12)
pq := equeue.New(10)
randinsert(pq, 12)
tq := equeue.New(10)
randinsert(tq, 12)
sq := equeue.New(10)
randinsert(sq, 12)
s1, _ := discard.NewSender(conf.MapConf{"name": "s1"})
r1 := &LogExportRunner{
meta: meta,
rsMutex: new(sync.RWMutex),
historyMutex: new(sync.RWMutex),
rs: &RunnerStatus{
TransformStats: map[string]StatsInfo{"pick-0": {Success: 1}},
SenderStats: map[string]StatsInfo{"s1": {Success: 1}},
},
historyError: &ErrorsList{
ReadErrors: rq,
ParseErrors: pq,
TransformErrors: map[string]*equeue.ErrorQueue{
"pick-0": tq,
},
SendErrors: map[string]*equeue.ErrorQueue{
"s1": sq,
},
},
lastRs: &RunnerStatus{},
transformers: []transforms.Transformer{&mutate.Pick{}},
senders: []sender.Sender{s1},
}
r1.StatusBackup()
r2 := &LogExportRunner{
meta: meta,
historyMutex: new(sync.RWMutex),
rs: &RunnerStatus{
TransformStats: map[string]StatsInfo{},
SenderStats: map[string]StatsInfo{},
},
historyError: &ErrorsList{},
lastRs: &RunnerStatus{},
transformers: []transforms.Transformer{&mutate.Pick{}},
senders: []sender.Sender{s1},
}
r2.StatusRestore()
//保证restore与前面一致
assert.Equal(t, r1.historyError.ReadErrors.List(), r2.historyError.ReadErrors.List())
assert.Equal(t, r1.historyError.ParseErrors.List(), r2.historyError.ParseErrors.List())
for k, v := range r1.historyError.TransformErrors {
assert.Equal(t, v.List(), r2.historyError.TransformErrors[k].List())
}
for k, v := range r1.historyError.SendErrors {
assert.Equal(t, v.List(), r2.historyError.SendErrors[k].List())
}
}
| [
"\"Test_RunForEnvTag\""
]
| []
| [
"Test_RunForEnvTag"
]
| [] | ["Test_RunForEnvTag"] | go | 1 | 0 | |
projects/faster_rcnn_swin_transformer/train_net_swint_full_reso.py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Detection Training Script.
This scripts reads a given config file and runs the training or evaluation.
It is an entry point that is made to train standard models in detectron2.
In order to let one script support training of many models,
this script contains logic that are specific to these built-in models and therefore
may not be suitable for your own project.
For example, your research project perhaps only needs a single "evaluator".
Therefore, we recommend you to use detectron2 as an library and take
this file as an example of how to use the library.
You may want to write your own script with your datasets and other customizations.
"""
import itertools
import logging
import os
from collections import OrderedDict
import torch
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, hooks, launch
from detectron2.evaluation import (
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
LVISEvaluator,
PascalVOCDetectionEvaluator,
SemSegEvaluator,
verify_results,
)
from detectron2.modeling import GeneralizedRCNNWithTTA
from detectron2.solver.build import maybe_add_gradient_clipping, get_default_optimizer_params
from swint import add_swint_config
os.environ['CUDA_VISIBLE_DEVICES'] = '2,3'
from detectron2.modeling import GeneralizedRCNNWithTTA
##===============注册自定义数据集================##
from detectron2.data.datasets import register_coco_instances
register_coco_instances("SSLAD-2D_train", {}, json_file=r"/data/cenzhaojun/dataset/SODA10M/SSLAD-2D/labeled/annotations/instance_train.json",
image_root = r"/data/cenzhaojun/dataset/SODA10M/SSLAD-2D/labeled/train")
register_coco_instances("SSLAD-2D_test", {}, r"/data/cenzhaojun/dataset/SODA10M/SSLAD-2D/labeled/annotations/instance_val.json",
r"/data/cenzhaojun/dataset/SODA10M/SSLAD-2D/labeled/val")
# 设置类别
from detectron2.data import MetadataCatalog
MetadataCatalog.get("SSLAD-2D_train").thing_classes = ['Pedestrian','Cyclist','Car','Truck','Tram','Tricycle']
MetadataCatalog.get("SSLAD-2D_test").thing_classes = ['Pedestrian','Cyclist','Car','Truck','Tram','Tricycle']
# python tools/train_augmentationv2.py --config-file configs/Misc/cascade_rcnn_R_50_FPN_1x.yaml --num-gpus 2 OUTPUT_DIR training_dir/cascade_rcnn_R_50_FPN_1x_augmentation
class Trainer(DefaultTrainer):
"""
We use the "DefaultTrainer" which contains pre-defined default logic for
standard training workflow. They may not work for you, especially if you
are working on a new research project. In that case you can write your
own training loop. You can use "tools/plain_train_net.py" as an example.
"""
# def build_train_loader(cls, cfg):
# return build_detection_train_loader(cfg, mapper=DatasetMapper(cfg, is_train=True, augmentations=[
# T.RandomBrightness(0.3, 2.0),
# T.RandomContrast(0.3, 2.5),
# # T.ColorTransform
# # RandomGaussianNoise(),
# # RandomPepperNoise(),
# # T.RandomRotation([-90,90]),
# # RandomResize(0.5,1.5),
# # T.RandomCrop('relative_range',(0.3,0.3)),
# # T.RandomExtent(scale_range=(0.3, 1), shift_range=(1, 1))
# ]))
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
evaluator_list.append(
SemSegEvaluator(
dataset_name,
distributed=True,
num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
output_dir=output_folder,
)
)
if evaluator_type in ["coco", "coco_panoptic_seg"]:
evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))
if evaluator_type == "coco_panoptic_seg":
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
if evaluator_type == "cityscapes_instance":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesInstanceEvaluator(dataset_name)
if evaluator_type == "cityscapes_sem_seg":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesSemSegEvaluator(dataset_name)
elif evaluator_type == "pascal_voc":
return PascalVOCDetectionEvaluator(dataset_name)
elif evaluator_type == "lvis":
return LVISEvaluator(dataset_name, cfg, True, output_folder)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(
dataset_name, evaluator_type
)
)
elif len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
@classmethod
def test_with_TTA(cls, cfg, model):
logger = logging.getLogger("detectron2.trainer")
# In the end of training, run an evaluation with TTA
# Only support some R-CNN models.
logger.info("Running inference with test-time augmentation ...")
model = GeneralizedRCNNWithTTA(cfg, model)
evaluators = [
cls.build_evaluator(
cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA")
)
for name in cfg.DATASETS.TEST
]
res = cls.test(cfg, model, evaluators)
res = OrderedDict({k + "_TTA": v for k, v in res.items()})
return res
@classmethod
def build_optimizer(cls, cfg, model):
params = get_default_optimizer_params(
model,
base_lr=cfg.SOLVER.BASE_LR,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
weight_decay_norm=cfg.SOLVER.WEIGHT_DECAY_NORM,
bias_lr_factor=cfg.SOLVER.BIAS_LR_FACTOR,
weight_decay_bias=cfg.SOLVER.WEIGHT_DECAY_BIAS,
)
def maybe_add_full_model_gradient_clipping(optim): # optim: the optimizer class
# detectron2 doesn't have full model gradient clipping now
clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE
enable = (
cfg.SOLVER.CLIP_GRADIENTS.ENABLED
and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model"
and clip_norm_val > 0.0
)
class FullModelGradientClippingOptimizer(optim):
def step(self, closure=None):
all_params = itertools.chain(*[x["params"] for x in self.param_groups])
torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val)
super().step(closure=closure)
return FullModelGradientClippingOptimizer if enable else optim
optimizer_type = cfg.SOLVER.OPTIMIZER
if optimizer_type == "SGD":
optimizer = maybe_add_gradient_clipping(torch.optim.SGD)(
params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM,
nesterov=cfg.SOLVER.NESTEROV,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
)
elif optimizer_type == "AdamW":
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)(
params, cfg.SOLVER.BASE_LR, betas=(0.9, 0.999),
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
)
else:
raise NotImplementedError(f"no optimizer type {optimizer_type}")
return optimizer
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
args.config_file = '../../configs/SwinT/faster_rcnn_swint_T_FPN_3x.yaml'
add_swint_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.MODEL.WEIGHTS = "/data/cenzhaojun/detectron2/weights/faster_rcnn_swint_T.pth"
cfg.DATASETS.TRAIN = ("SSLAD-2D_train",) # 训练数据集名称
cfg.DATASETS.TEST = ("SSLAD-2D_test",)
cfg.OUTPUT_DIR = '/data/cenzhaojun/detectron2/training_dir/faster_rcnn_swint_T_FPN_3x_full_reso'
ITERS_IN_ONE_EPOCH = int(cfg.SOLVER.MAX_ITER / cfg.SOLVER.IMS_PER_BATCH)
cfg.TEST.EVAL_PERIOD = ITERS_IN_ONE_EPOCH
cfg.INPUT.MAX_SIZE_TRAIN = 1920
cfg.INPUT.MAX_SIZE_TEST = 1920
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 6
cfg.SOLVER.IMS_PER_BATCH = 22
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
if cfg.TEST.AUG.ENABLED:
res.update(Trainer.test_with_TTA(cfg, model))
if comm.is_main_process():
verify_results(cfg, res)
return res
"""
If you'd like to do anything fancier than the standard training logic,
consider writing your own training loop (see plain_train_net.py) or
subclassing the trainer.
"""
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
if cfg.TEST.AUG.ENABLED:
trainer.register_hooks(
[hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))]
)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
args.num_gpus = 2
args.resume = True
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
src/functions/install/install.py | import os
import boto3
def get_parameter(name, decrypt=False):
ssm_client = boto3.client('ssm')
resp = ssm_client.get_parameter(Name=name, WithDecryption=decrypt)
return resp['Parameter']['Value']
CLIENT_ID = get_parameter(os.getenv('CLIENT_ID_PARAM'))
DOMAIN_NAME = os.getenv('DOMAIN_NAME')
def lambda_handler(event, context):
redirect_url = f'https://slack.com/oauth/authorize?client_id={CLIENT_ID}&' \
f'scope=bot,chat:write:bot&' \
f'redirect_uri=https://{DOMAIN_NAME}/slack/oauth/redirect'
return {
'isBase64Encoded': False,
'statusCode': 302,
'body': '',
'headers': {'Location': redirect_url}
}
| []
| []
| [
"DOMAIN_NAME",
"CLIENT_ID_PARAM"
]
| [] | ["DOMAIN_NAME", "CLIENT_ID_PARAM"] | python | 2 | 0 | |
do_like_javac/tools/wpi.py | import datetime
from filecmp import dircmp
from datetime import datetime
from . import common
import os
import pprint
import shutil
import tempfile
from distutils import dir_util
# re-use existing CF build logic
from . import check
argparser = None
# all options passed to javac by the build system are copied to the invocations
# of javac that run the Checker Framework, except those that either exactly match
# an element of ignored_options or start with an element of ignored_options_prefixes
ignored_options = ("classpath",
"nowarn", "Xmaxerrs", "Xmaxwarns", "Werror",
"processorpath", "processor", "proc:none",
"XepDisableAllChecks", "Xplugin:ErrorProne")
ignored_options_prefixes = ("Xep:", "XepExcludedPaths:")
def run(args, javac_commands, jars):
# checker-framework javac.
javacheck = os.environ['CHECKERFRAMEWORK']+"/checker/bin/javac"
checker_command = [javacheck, "-Xmaxerrs", "10000", "-Xmaxwarns", "10000"]
if args.checker is not None:
processorArg = ["-processor", args.checker]
else:
# checker should run via auto-discovery
processorArg = []
if args.jdkVersion is not None:
jdkVersion = int(args.jdkVersion)
else:
jdkVersion = 8
if args.extraJavacArgs is not None:
checker_command += args.extraJavacArgs.split()
for jc in javac_commands:
# something searchable to delineate different javac commands
common.run_cmd(["echo", "\"-----------------------------------------------------------\""], args, "wpi")
wpiDir = os.path.join(os.getcwd(), 'build/whole-program-inference')
# if there is already a WPI directory, delete it and start over
if os.path.isdir(wpiDir):
shutil.rmtree(wpiDir)
iteration = 0
diffResult = True
ajavaDirs = []
resultsDir = tempfile.mkdtemp(prefix="wpi-ajava-" + datetime.now().strftime("%Y%m%d-%H%M%S") + "-")
print("Directory for generated annotation files: " + str(resultsDir))
javac_switches = jc['javac_switches']
cp = javac_switches['classpath']
if 'processor' in javac_switches and len(processorArg) == 2:
processorArg[1] += "," + javac_switches['processor']
java_files = jc['java_files']
# delombok
delombok = False
jars = cp.split(":")
lombokjar = ""
for jar in jars:
# This should catch only the Lombok jar, because it's based
# on Lombok's Maven coordinates. First is the Maven repo file structure;
# second is the gradle cache's file structure.
lombok_dirs = ["/org/projectlombok/lombok/", "/org.projectlombok/lombok/"]
if any([x in jar for x in lombok_dirs]):
lombokjar = jar
break
# must wait until here to supply the classpath without lombok
if lombokjar != "":
# delombok takes a directory as input rather than individual source files,
# so this guesses at what the correct top-level directory is. It's a hack,
# but it should work for Maven and Gradle projects that follow the default
# conventions. For compilation to make sense, there must be at least one
# Java file, so this access should be safe.
anySrcFile = java_files[0]
standardSrcDir = "src/main/java/"
standardSrcIndex = anySrcFile.index(standardSrcDir)
if standardSrcDir != -1:
srcDir = anySrcFile[:standardSrcIndex]
lombok_cmd = ["java", "-jar", lombokjar, "delombok",
srcDir + "/src/main/java/", "-d", srcDir + "/delombok/main/java",
"-c", cp]
common.run_cmd(lombok_cmd, args, "wpi")
# replace the original source files with the delombok'd code, so that
# the actual javac commands don't need to be modified
dir_util.copy_tree(srcDir + "/delombok/", srcDir + "/src/")
# for modifying the checker command in each iteration
delombok = True
# include processor path in the class path if it is present
pp = ''
if 'processorpath' in javac_switches:
pp = javac_switches['processorpath'] + ':'
if args.quals:
cp += args.quals + ':'
if args.lib_dir:
cp += pp + args.lib_dir + ':'
release8 = False
other_args = []
for k, v in list(javac_switches.items()):
if k not in ignored_options and not k.startswith(ignored_options_prefixes):
if k == "source" or k == "target" or k == "-release":
# If the source/target is < 8, change it to 8.
# The CF is generally incompatible with java versions below 8, so
# this tries treating the code as Java 8 source. If it doesn't work,
# that's okay - there is no guarantee that DLJC will faithfully reproduce
# the build, and this is the best that DLJC can do in this situation.
if v in ["1.5", "5", "1.6", "6", "1.7", "7", "1.8"]:
v = "8"
if v == "8":
release8 = True
# Do not use source/target, because Java 11 JVMs will
# crash on some classes, e.g.
# https://bugs.openjdk.java.net/browse/JDK-8212636.
# Use --release instead.
if jdkVersion == 11:
k = "-release"
elif jdkVersion == 8 and k == "-release":
# don't try to use --release on a Java 8 JVM, which doesn't support it
v = False
# Combine --add-opens into a single arg with equals, so we
# can more easily remove key and value for release8, below:
if v is not None and v is not True and k.startswith("-add-opens"):
other_args.append("-" + k + "=" + v)
else:
if v is None or v is not False:
other_args.append("-" + k)
if v is not None and v is not True:
other_args.append(str(v))
checker_command += check.getArgumentsByVersion(jdkVersion, other_args)
if release8:
# Avoid javac "error: option --add-opens not allowed with target 1.8"
checker_command = [arg for arg in checker_command if not arg.startswith("--add-opens")]
other_args = [arg for arg in other_args if not arg.startswith("--add-opens")]
while diffResult:
iterationCheckerCmd = checker_command
# TODO: the switch to ajava files instead of stub files should make the separate stubs argument
# to dljc unnecessary, as there's no longer any need to combine stub lists.
# TODO: do we need to treat the -Aajava argument the same way? I.e., will this work if the user
# supplies their own -Aajava= argument as part of the extraJavacArgs argument?
if args.stubs:
iterationCheckerCmd.append("-Astubs=" + str(args.stubs))
iterationAjavaDirs = ajavaDirs.copy()
if args.ajava:
iterationAjavaDirs.append(str(args.ajava))
if iterationAjavaDirs:
iterationCheckerCmd.append(
"-Aajava=" + ":".join(iterationAjavaDirs))
# suppress all type.anno.before.modifier warnings, because delombok
# prints annotations in the wrong place
if delombok:
iterationCheckerCmd.append("-AsuppressWarnings=type.anno.before.modifier")
pprint.pformat(jc)
cmd = iterationCheckerCmd + ["-classpath", cp] + processorArg + other_args + java_files
stats = common.run_cmd(cmd + ["-Ainfer=ajava", "-Awarns"], args, 'wpi')
# process outputs
# move the old wpi files, add them to ajava path
previousIterationDir = os.path.join(resultsDir, "iteration" + str(iteration))
os.mkdir(previousIterationDir)
iteration += 1
try:
ajavaFiles = os.listdir(wpiDir)
except OSError as e:
print("No WPI outputs were discovered; it is likely that WPI failed or the Checker Framework crashed.")
print("Check the file " + os.path.join(os.getcwd(), 'dljc-out', 'wpi.log') + " for more information.")
raise e
for ajavaFile in ajavaFiles:
shutil.move(os.path.join(wpiDir, ajavaFile),
previousIterationDir)
ajavaDirs.append(previousIterationDir)
if len(ajavaDirs) > 1:
dcmp = dircmp(ajavaDirs[-1], ajavaDirs[-2])
diffResult = has_differing_files(dcmp)
# Run one final time without "-Awarns", for the final user output.
common.run_cmd(cmd, args, 'wpi')
def has_differing_files(dcmp):
return (dcmp.diff_files
or any(map(has_differing_files, dcmp.subdirs.values())))
| []
| []
| [
"CHECKERFRAMEWORK"
]
| [] | ["CHECKERFRAMEWORK"] | python | 1 | 0 | |
tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.aiplatform_v1.services.featurestore_online_serving_service import (
FeaturestoreOnlineServingServiceAsyncClient,
)
from google.cloud.aiplatform_v1.services.featurestore_online_serving_service import (
FeaturestoreOnlineServingServiceClient,
)
from google.cloud.aiplatform_v1.services.featurestore_online_serving_service import (
transports,
)
from google.cloud.aiplatform_v1.types import feature_selector
from google.cloud.aiplatform_v1.types import featurestore_online_service
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert (
FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(None) is None
)
assert (
FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(
api_mtls_endpoint
)
== api_mtls_endpoint
)
assert (
FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(
sandbox_endpoint
)
== sandbox_mtls_endpoint
)
assert (
FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(
sandbox_mtls_endpoint
)
== sandbox_mtls_endpoint
)
assert (
FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class",
[
FeaturestoreOnlineServingServiceClient,
FeaturestoreOnlineServingServiceAsyncClient,
],
)
def test_featurestore_online_serving_service_client_from_service_account_info(
client_class,
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"),
(
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_featurestore_online_serving_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class",
[
FeaturestoreOnlineServingServiceClient,
FeaturestoreOnlineServingServiceAsyncClient,
],
)
def test_featurestore_online_serving_service_client_from_service_account_file(
client_class,
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_featurestore_online_serving_service_client_get_transport_class():
transport = FeaturestoreOnlineServingServiceClient.get_transport_class()
available_transports = [
transports.FeaturestoreOnlineServingServiceGrpcTransport,
]
assert transport in available_transports
transport = FeaturestoreOnlineServingServiceClient.get_transport_class("grpc")
assert transport == transports.FeaturestoreOnlineServingServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
FeaturestoreOnlineServingServiceClient,
transports.FeaturestoreOnlineServingServiceGrpcTransport,
"grpc",
),
(
FeaturestoreOnlineServingServiceAsyncClient,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
FeaturestoreOnlineServingServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FeaturestoreOnlineServingServiceClient),
)
@mock.patch.object(
FeaturestoreOnlineServingServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient),
)
def test_featurestore_online_serving_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(
FeaturestoreOnlineServingServiceClient, "get_transport_class"
) as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(
FeaturestoreOnlineServingServiceClient, "get_transport_class"
) as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
FeaturestoreOnlineServingServiceClient,
transports.FeaturestoreOnlineServingServiceGrpcTransport,
"grpc",
"true",
),
(
FeaturestoreOnlineServingServiceAsyncClient,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
FeaturestoreOnlineServingServiceClient,
transports.FeaturestoreOnlineServingServiceGrpcTransport,
"grpc",
"false",
),
(
FeaturestoreOnlineServingServiceAsyncClient,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
FeaturestoreOnlineServingServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FeaturestoreOnlineServingServiceClient),
)
@mock.patch.object(
FeaturestoreOnlineServingServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_featurestore_online_serving_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class",
[
FeaturestoreOnlineServingServiceClient,
FeaturestoreOnlineServingServiceAsyncClient,
],
)
@mock.patch.object(
FeaturestoreOnlineServingServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FeaturestoreOnlineServingServiceClient),
)
@mock.patch.object(
FeaturestoreOnlineServingServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient),
)
def test_featurestore_online_serving_service_client_get_mtls_endpoint_and_cert_source(
client_class,
):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
FeaturestoreOnlineServingServiceClient,
transports.FeaturestoreOnlineServingServiceGrpcTransport,
"grpc",
),
(
FeaturestoreOnlineServingServiceAsyncClient,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_featurestore_online_serving_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
FeaturestoreOnlineServingServiceClient,
transports.FeaturestoreOnlineServingServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
FeaturestoreOnlineServingServiceAsyncClient,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_featurestore_online_serving_service_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_featurestore_online_serving_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.aiplatform_v1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = FeaturestoreOnlineServingServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
FeaturestoreOnlineServingServiceClient,
transports.FeaturestoreOnlineServingServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
FeaturestoreOnlineServingServiceAsyncClient,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_featurestore_online_serving_service_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="aiplatform.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type", [featurestore_online_service.ReadFeatureValuesRequest, dict,]
)
def test_read_feature_values(request_type, transport: str = "grpc"):
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = featurestore_online_service.ReadFeatureValuesResponse()
response = client.read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == featurestore_online_service.ReadFeatureValuesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, featurestore_online_service.ReadFeatureValuesResponse)
def test_read_feature_values_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values), "__call__"
) as call:
client.read_feature_values()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == featurestore_online_service.ReadFeatureValuesRequest()
@pytest.mark.asyncio
async def test_read_feature_values_async(
transport: str = "grpc_asyncio",
request_type=featurestore_online_service.ReadFeatureValuesRequest,
):
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
featurestore_online_service.ReadFeatureValuesResponse()
)
response = await client.read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == featurestore_online_service.ReadFeatureValuesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, featurestore_online_service.ReadFeatureValuesResponse)
@pytest.mark.asyncio
async def test_read_feature_values_async_from_dict():
await test_read_feature_values_async(request_type=dict)
def test_read_feature_values_field_headers():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = featurestore_online_service.ReadFeatureValuesRequest()
request.entity_type = "entity_type/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values), "__call__"
) as call:
call.return_value = featurestore_online_service.ReadFeatureValuesResponse()
client.read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_read_feature_values_field_headers_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = featurestore_online_service.ReadFeatureValuesRequest()
request.entity_type = "entity_type/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
featurestore_online_service.ReadFeatureValuesResponse()
)
await client.read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"]
def test_read_feature_values_flattened():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = featurestore_online_service.ReadFeatureValuesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.read_feature_values(entity_type="entity_type_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].entity_type
mock_val = "entity_type_value"
assert arg == mock_val
def test_read_feature_values_flattened_error():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.read_feature_values(
featurestore_online_service.ReadFeatureValuesRequest(),
entity_type="entity_type_value",
)
@pytest.mark.asyncio
async def test_read_feature_values_flattened_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = featurestore_online_service.ReadFeatureValuesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
featurestore_online_service.ReadFeatureValuesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.read_feature_values(entity_type="entity_type_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].entity_type
mock_val = "entity_type_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_read_feature_values_flattened_error_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.read_feature_values(
featurestore_online_service.ReadFeatureValuesRequest(),
entity_type="entity_type_value",
)
@pytest.mark.parametrize(
"request_type",
[featurestore_online_service.StreamingReadFeatureValuesRequest, dict,],
)
def test_streaming_read_feature_values(request_type, transport: str = "grpc"):
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iter(
[featurestore_online_service.ReadFeatureValuesResponse()]
)
response = client.streaming_read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert (
args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest()
)
# Establish that the response is the type that we expect.
for message in response:
assert isinstance(
message, featurestore_online_service.ReadFeatureValuesResponse
)
def test_streaming_read_feature_values_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values), "__call__"
) as call:
client.streaming_read_feature_values()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert (
args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest()
)
@pytest.mark.asyncio
async def test_streaming_read_feature_values_async(
transport: str = "grpc_asyncio",
request_type=featurestore_online_service.StreamingReadFeatureValuesRequest,
):
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(
side_effect=[featurestore_online_service.ReadFeatureValuesResponse()]
)
response = await client.streaming_read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert (
args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest()
)
# Establish that the response is the type that we expect.
message = await response.read()
assert isinstance(message, featurestore_online_service.ReadFeatureValuesResponse)
@pytest.mark.asyncio
async def test_streaming_read_feature_values_async_from_dict():
await test_streaming_read_feature_values_async(request_type=dict)
def test_streaming_read_feature_values_field_headers():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = featurestore_online_service.StreamingReadFeatureValuesRequest()
request.entity_type = "entity_type/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values), "__call__"
) as call:
call.return_value = iter(
[featurestore_online_service.ReadFeatureValuesResponse()]
)
client.streaming_read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_streaming_read_feature_values_field_headers_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = featurestore_online_service.StreamingReadFeatureValuesRequest()
request.entity_type = "entity_type/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values), "__call__"
) as call:
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(
side_effect=[featurestore_online_service.ReadFeatureValuesResponse()]
)
await client.streaming_read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"]
def test_streaming_read_feature_values_flattened():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iter(
[featurestore_online_service.ReadFeatureValuesResponse()]
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.streaming_read_feature_values(entity_type="entity_type_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].entity_type
mock_val = "entity_type_value"
assert arg == mock_val
def test_streaming_read_feature_values_flattened_error():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.streaming_read_feature_values(
featurestore_online_service.StreamingReadFeatureValuesRequest(),
entity_type="entity_type_value",
)
@pytest.mark.asyncio
async def test_streaming_read_feature_values_flattened_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iter(
[featurestore_online_service.ReadFeatureValuesResponse()]
)
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.streaming_read_feature_values(
entity_type="entity_type_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].entity_type
mock_val = "entity_type_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_streaming_read_feature_values_flattened_error_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.streaming_read_feature_values(
featurestore_online_service.StreamingReadFeatureValuesRequest(),
entity_type="entity_type_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.FeaturestoreOnlineServingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.FeaturestoreOnlineServingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FeaturestoreOnlineServingServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.FeaturestoreOnlineServingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = FeaturestoreOnlineServingServiceClient(
client_options=options, transport=transport,
)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = FeaturestoreOnlineServingServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.FeaturestoreOnlineServingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FeaturestoreOnlineServingServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.FeaturestoreOnlineServingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = FeaturestoreOnlineServingServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.FeaturestoreOnlineServingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.FeaturestoreOnlineServingServiceGrpcTransport,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport, transports.FeaturestoreOnlineServingServiceGrpcTransport,
)
def test_featurestore_online_serving_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.FeaturestoreOnlineServingServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_featurestore_online_serving_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.aiplatform_v1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.FeaturestoreOnlineServingServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"read_feature_values",
"streaming_read_feature_values",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_featurestore_online_serving_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.aiplatform_v1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.FeaturestoreOnlineServingServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_featurestore_online_serving_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.aiplatform_v1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.FeaturestoreOnlineServingServiceTransport()
adc.assert_called_once()
def test_featurestore_online_serving_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
FeaturestoreOnlineServingServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.FeaturestoreOnlineServingServiceGrpcTransport,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
],
)
def test_featurestore_online_serving_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.FeaturestoreOnlineServingServiceGrpcTransport, grpc_helpers),
(
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
grpc_helpers_async,
),
],
)
def test_featurestore_online_serving_service_transport_create_channel(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="aiplatform.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.FeaturestoreOnlineServingServiceGrpcTransport,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
],
)
def test_featurestore_online_serving_service_grpc_transport_client_cert_source_for_mtls(
transport_class,
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_featurestore_online_serving_service_host_no_port():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com"
),
)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_featurestore_online_serving_service_host_with_port():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com:8000"
),
)
assert client.transport._host == "aiplatform.googleapis.com:8000"
def test_featurestore_online_serving_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.FeaturestoreOnlineServingServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_featurestore_online_serving_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.FeaturestoreOnlineServingServiceGrpcTransport,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
],
)
def test_featurestore_online_serving_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.FeaturestoreOnlineServingServiceGrpcTransport,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
],
)
def test_featurestore_online_serving_service_transport_channel_mtls_with_adc(
transport_class,
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_entity_type_path():
project = "squid"
location = "clam"
featurestore = "whelk"
entity_type = "octopus"
expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(
project=project,
location=location,
featurestore=featurestore,
entity_type=entity_type,
)
actual = FeaturestoreOnlineServingServiceClient.entity_type_path(
project, location, featurestore, entity_type
)
assert expected == actual
def test_parse_entity_type_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"featurestore": "cuttlefish",
"entity_type": "mussel",
}
path = FeaturestoreOnlineServingServiceClient.entity_type_path(**expected)
# Check that the path construction is reversible.
actual = FeaturestoreOnlineServingServiceClient.parse_entity_type_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "winkle"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = FeaturestoreOnlineServingServiceClient.common_billing_account_path(
billing_account
)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nautilus",
}
path = FeaturestoreOnlineServingServiceClient.common_billing_account_path(
**expected
)
# Check that the path construction is reversible.
actual = FeaturestoreOnlineServingServiceClient.parse_common_billing_account_path(
path
)
assert expected == actual
def test_common_folder_path():
folder = "scallop"
expected = "folders/{folder}".format(folder=folder,)
actual = FeaturestoreOnlineServingServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "abalone",
}
path = FeaturestoreOnlineServingServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = FeaturestoreOnlineServingServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "squid"
expected = "organizations/{organization}".format(organization=organization,)
actual = FeaturestoreOnlineServingServiceClient.common_organization_path(
organization
)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "clam",
}
path = FeaturestoreOnlineServingServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = FeaturestoreOnlineServingServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "whelk"
expected = "projects/{project}".format(project=project,)
actual = FeaturestoreOnlineServingServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "octopus",
}
path = FeaturestoreOnlineServingServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = FeaturestoreOnlineServingServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "oyster"
location = "nudibranch"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = FeaturestoreOnlineServingServiceClient.common_location_path(
project, location
)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
}
path = FeaturestoreOnlineServingServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = FeaturestoreOnlineServingServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.FeaturestoreOnlineServingServiceTransport, "_prep_wrapped_messages"
) as prep:
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.FeaturestoreOnlineServingServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = FeaturestoreOnlineServingServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(
FeaturestoreOnlineServingServiceClient,
transports.FeaturestoreOnlineServingServiceGrpcTransport,
),
(
FeaturestoreOnlineServingServiceAsyncClient,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
TrainAndTest/Spectrogram/train_domres_egs.py | #!/usr/bin/env python
# encoding: utf-8
"""
@Author: yangwenhao
@Contact: [email protected]
@Software: PyCharm
@File: train_lores10_kaldi.py
@Time: 2020/4/4 11:14 AM
@Overview:
"""
from __future__ import print_function
import argparse
import os
import os.path as osp
import sys
import time
# Version conflict
import warnings
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torchvision.transforms as transforms
from kaldi_io import read_mat, read_vec_flt
from tensorboardX import SummaryWriter
from torch.autograd import Variable
from torch.optim.lr_scheduler import MultiStepLR, ExponentialLR
from tqdm import tqdm
from Define_Model.LossFunction import CenterLoss
from Define_Model.SoftmaxLoss import AngleSoftmaxLoss, AngleLinear, AdditiveMarginLinear, AMSoftmaxLoss
from Define_Model.model import PairwiseDistance
from Process_Data import constants as c
from Process_Data.KaldiDataset import ScriptTestDataset, KaldiExtractDataset, \
ScriptVerifyDataset
from Process_Data.LmdbDataset import EgsDataset
from Process_Data.audio_processing import concateinputfromMFB, to2tensor, varLengthFeat, ConcateVarInput
from Process_Data.audio_processing import toMFB, totensor, truncatedinput, read_audio
from TrainAndTest.common_func import create_optimizer, create_model, verification_test, verification_extract
from eval_metrics import evaluate_kaldi_eer, evaluate_kaldi_mindcf
from logger import NewLogger
warnings.filterwarnings("ignore")
import torch._utils
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Speaker Recognition')
# Data options
parser.add_argument('--train-dir', type=str, help='path to dataset')
parser.add_argument('--valid-dir', type=str, help='path to dataset')
parser.add_argument('--test-dir', type=str, help='path to voxceleb1 test dataset')
parser.add_argument('--trials', type=str, default='trials', help='trials filename')
parser.add_argument('--domain', action='store_true', default=False, help='set domain in dataset')
parser.add_argument('--nj', default=12, type=int, metavar='NJOB', help='num of job')
parser.add_argument('--feat-format', type=str, default='kaldi', choices=['kaldi', 'npy'],
help='number of jobs to make feats (default: 10)')
parser.add_argument('--check-path', default='Data/checkpoint/LoResNet10/spect/soft',
help='folder to output model checkpoints')
parser.add_argument('--save-init', action='store_true', default=True, help='need to make mfb file')
parser.add_argument('--resume',
default='Data/checkpoint/LoResNet10/spect/soft/checkpoint_10.pth', type=str,
metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--start-epoch', default=1, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--epochs', type=int, default=20, metavar='E',
help='number of epochs to train (default: 10)')
parser.add_argument('--scheduler', default='multi', type=str,
metavar='SCH', help='The optimizer to use (default: Adagrad)')
parser.add_argument('--gamma', default=0.75, type=float,
metavar='GAMMA', help='The optimizer to use (default: Adagrad)')
parser.add_argument('--milestones', default='10,15', type=str,
metavar='MIL', help='The optimizer to use (default: Adagrad)')
parser.add_argument('--min-softmax-epoch', type=int, default=40, metavar='MINEPOCH',
help='minimum epoch for initial parameter using softmax (default: 2')
parser.add_argument('--veri-pairs', type=int, default=12800, metavar='VP',
help='number of epochs to train (default: 10)')
# Training options
# Model options
parser.add_argument('--model', type=str, help='path to voxceleb1 test dataset')
parser.add_argument('--resnet-size', default=8, type=int,
metavar='RES', help='The channels of convs layers)')
parser.add_argument('--inst-norm', action='store_true', default=False,
help='replace batchnorm with instance norm')
parser.add_argument('--channels', default='64,128,256', type=str,
metavar='CHA', help='The channels of convs layers)')
parser.add_argument('--feat-dim', default=161, type=int, metavar='FEAT',
help='acoustic feature dimension')
parser.add_argument('--remove-vad', action='store_true', default=False,
help='using Cosine similarity')
parser.add_argument('--alpha', default=12, type=float, metavar='FEAT',
help='acoustic feature dimension')
parser.add_argument('--kernel-size', default='5,5', type=str, metavar='KE',
help='kernel size of conv filters')
parser.add_argument('--cos-sim', action='store_true', default=True,
help='using Cosine similarity')
parser.add_argument('--avg-size', type=int, default=4, metavar='ES',
help='Dimensionality of the embedding')
parser.add_argument('--embedding-size-a', type=int, default=128, metavar='ES',
help='Dimensionality of the embedding')
parser.add_argument('--embedding-size-b', type=int, default=64, metavar='ES',
help='Dimensionality of the embedding')
parser.add_argument('--embedding-size-o', type=int, default=32, metavar='ES',
help='Dimensionality of the embedding')
parser.add_argument('--batch-size', type=int, default=128, metavar='BS',
help='input batch size for training (default: 128)')
parser.add_argument('--input-per-spks', type=int, default=224, metavar='IPFT',
help='input sample per file for testing (default: 8)')
parser.add_argument('--num-valid', type=int, default=5, metavar='IPFT',
help='input sample per file for testing (default: 8)')
parser.add_argument('--test-input-per-file', type=int, default=4, metavar='IPFT',
help='input sample per file for testing (default: 8)')
parser.add_argument('--test-batch-size', type=int, default=1, metavar='BST',
help='input batch size for testing (default: 64)')
parser.add_argument('--dropout-p', type=float, default=0., metavar='BST',
help='input batch size for testing (default: 64)')
# loss configure
parser.add_argument('--loss-type', type=str, default='soft', choices=['soft', 'asoft', 'center', 'amsoft'],
help='path to voxceleb1 test dataset')
parser.add_argument('--finetune', action='store_true', default=False,
help='using Cosine similarity')
parser.add_argument('--loss-ratio', type=float, default=0.1, metavar='LOSSRATIO',
help='the ratio softmax loss - triplet loss (default: 2.0')
parser.add_argument('--dom-ratio', type=float, default=0.1, metavar='DOMAINLOSSRATIO',
help='the ratio softmax loss - triplet loss (default: 2.0')
parser.add_argument('--sim-ratio', type=float, default=0.1, metavar='DOMAINLOSSRATIO',
help='the ratio softmax loss - triplet loss (default: 2.0')
# args for additive margin-softmax
parser.add_argument('--margin', type=float, default=0.3, metavar='MARGIN',
help='the margin value for the angualr softmax loss function (default: 3.0')
parser.add_argument('--s', type=float, default=15, metavar='S',
help='the margin value for the angualr softmax loss function (default: 3.0')
# args for a-softmax
parser.add_argument('--m', type=int, default=3, metavar='M',
help='the margin value for the angualr softmax loss function (default: 3.0')
parser.add_argument('--lambda-min', type=int, default=5, metavar='S',
help='random seed (default: 0)')
parser.add_argument('--lambda-max', type=float, default=1000, metavar='S',
help='random seed (default: 0)')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR', help='learning rate (default: 0.125)')
parser.add_argument('--lr-decay', default=0, type=float, metavar='LRD',
help='learning rate decay ratio (default: 1e-4')
parser.add_argument('--weight-decay', default=5e-4, type=float,
metavar='WEI', help='weight decay (default: 0.0)')
parser.add_argument('--momentum', default=0.9, type=float,
metavar='MOM', help='momentum for sgd (default: 0.9)')
parser.add_argument('--dampening', default=0, type=float,
metavar='DAM', help='dampening for sgd (default: 0.0)')
parser.add_argument('--optimizer', default='sgd', type=str,
metavar='OPT', help='The optimizer to use (default: Adagrad)')
# Device options
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--gpu-id', default='1', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--seed', type=int, default=123456, metavar='S',
help='random seed (default: 0)')
parser.add_argument('--log-interval', type=int, default=1, metavar='LI',
help='how many batches to wait before logging training status')
parser.add_argument('--acoustic-feature', choices=['fbank', 'spectrogram', 'mfcc'], default='fbank',
help='choose the acoustic features type.')
parser.add_argument('--makemfb', action='store_true', default=False,
help='need to make mfb file')
parser.add_argument('--makespec', action='store_true', default=False,
help='need to make spectrograms file')
args = parser.parse_args()
# Set the device to use by setting CUDA_VISIBLE_DEVICES env variable in
# order to prevent any memory allocation on unused GPUs
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
args.cuda = not args.no_cuda and torch.cuda.is_available()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# torch.multiprocessing.set_sharing_strategy('file_system')
if args.cuda:
torch.cuda.manual_seed_all(args.seed)
cudnn.benchmark = True
# create logger Define visulaize SummaryWriter instance
writer = SummaryWriter(logdir=args.check_path, filename_suffix='_first')
sys.stdout = NewLogger(osp.join(args.check_path, 'log.txt'))
kwargs = {'num_workers': args.nj, 'pin_memory': False} if args.cuda else {}
if not os.path.exists(args.check_path):
os.makedirs(args.check_path)
opt_kwargs = {'lr': args.lr,
'lr_decay': args.lr_decay,
'weight_decay': args.weight_decay,
'dampening': args.dampening,
'momentum': args.momentum}
l2_dist = nn.CosineSimilarity(dim=1, eps=1e-6) if args.cos_sim else PairwiseDistance(2)
if args.acoustic_feature == 'fbank':
transform = transforms.Compose([
concateinputfromMFB(num_frames=c.NUM_FRAMES_SPECT, remove_vad=args.remove_vad),
# varLengthFeat(),
to2tensor()
])
transform_T = transforms.Compose([
ConcateVarInput(num_frames=c.NUM_FRAMES_SPECT, remove_vad=args.remove_vad),
# to2tensor()
])
transform_V = transforms.Compose([
varLengthFeat(remove_vad=args.remove_vad),
to2tensor()
])
else:
transform = transforms.Compose([
truncatedinput(),
toMFB(),
totensor(),
# tonormal()
])
file_loader = read_audio
# pdb.set_trace()
torch.multiprocessing.set_sharing_strategy('file_system')
if args.feat_format == 'kaldi':
file_loader = read_mat
elif args.feat_format == 'npy':
file_loader = np.load
train_dir = EgsDataset(dir=args.train_dir, feat_dim=args.feat_dim, loader=file_loader, transform=transform,
domain=args.domain)
test_dir = ScriptTestDataset(dir=args.test_dir, loader=np.load, transform=transform_T)
if len(test_dir) < args.veri_pairs:
args.veri_pairs = len(test_dir)
print('There are %d verification pairs.' % len(test_dir))
else:
test_dir.partition(args.veri_pairs)
valid_dir = EgsDataset(dir=args.valid_dir, feat_dim=args.feat_dim, loader=file_loader, transform=transform,
domain=args.domain)
def main():
# Views the training images and displays the distance on anchor-negative and anchor-positive
# test_display_triplet_distance = False
# print the experiment configuration
print('\nCurrent time is \33[91m{}\33[0m.'.format(str(time.asctime())))
print('Parsed options: {}'.format(vars(args)))
print('Number of Speakers: {}.\n'.format(train_dir.num_spks))
# instantiate model and initialize weights
kernel_size = args.kernel_size.split(',')
kernel_size = [int(x) for x in kernel_size]
padding = [int((x - 1) / 2) for x in kernel_size]
kernel_size = tuple(kernel_size)
padding = tuple(padding)
channels = args.channels.split(',')
channels = [int(x) for x in channels]
model_kwargs = {'embedding_size_a': args.embedding_size_a,
'embedding_size_b': args.embedding_size_b,
'embedding_size_o': args.embedding_size_o,
'inst_norm': args.inst_norm,
'resnet_size': args.resnet_size,
'num_classes_a': train_dir.num_spks,
'num_classes_b': train_dir.num_doms,
'channels': channels,
'avg_size': args.avg_size,
'alpha': args.alpha,
'kernel_size': kernel_size,
'padding': padding,
'dropout_p': args.dropout_p}
print('Model options: {}'.format(model_kwargs))
model = create_model(args.model, **model_kwargs)
start_epoch = 0
if args.save_init and not args.finetune:
check_path = '{}/checkpoint_{}.pth'.format(args.check_path, start_epoch)
torch.save(model, check_path)
if args.resume:
if os.path.isfile(args.resume):
print('=> loading checkpoint {}'.format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
filtered = {k: v for k, v in checkpoint['state_dict'].items() if 'num_batches_tracked' not in k}
model_dict = model.state_dict()
model_dict.update(filtered)
model.load_state_dict(model_dict)
#
# model.dropout.p = args.dropout_p
else:
print('=> no checkpoint found at {}'.format(args.resume))
ce_criterion = nn.CrossEntropyLoss()
if args.loss_type == 'soft':
xe_criterion = None
elif args.loss_type == 'asoft':
ce_criterion = None
model.classifier_spk = AngleLinear(in_features=args.embedding_size, out_features=train_dir.num_spks, m=args.m)
xe_criterion = AngleSoftmaxLoss(lambda_min=args.lambda_min, lambda_max=args.lambda_max)
elif args.loss_type == 'center':
xe_criterion = CenterLoss(num_classes=train_dir.num_spks, feat_dim=args.embedding_size)
elif args.loss_type == 'amsoft':
ce_criterion = None
model.classifier_spk = AdditiveMarginLinear(feat_dim=args.embedding_size, n_classes=train_dir.num_spks)
xe_criterion = AMSoftmaxLoss(margin=args.margin, s=args.s)
optimizer = create_optimizer(model.parameters(), args.optimizer, **opt_kwargs)
if args.loss_type == 'center':
optimizer = torch.optim.SGD([{'params': xe_criterion.parameters(), 'lr': args.lr * 5},
{'params': model.parameters()}],
lr=args.lr, weight_decay=args.weight_decay,
momentum=args.momentum)
if args.finetune:
if args.loss_type == 'asoft' or args.loss_type == 'amsoft':
classifier_params = list(map(id, model.classifier.parameters()))
rest_params = filter(lambda p: id(p) not in classifier_params, model.parameters())
optimizer = torch.optim.SGD([{'params': model.classifier.parameters(), 'lr': args.lr * 5},
{'params': rest_params}],
lr=args.lr, weight_decay=args.weight_decay,
momentum=args.momentum)
if args.scheduler == 'exp':
scheduler = ExponentialLR(optimizer, gamma=args.gamma)
else:
milestones = args.milestones.split(',')
milestones = [int(x) for x in milestones]
milestones.sort()
scheduler = MultiStepLR(optimizer, milestones=milestones, gamma=0.1)
ce = [ce_criterion, xe_criterion]
start = args.start_epoch + start_epoch
print('Start epoch is : ' + str(start))
# start = 0
end = start + args.epochs
train_loader = torch.utils.data.DataLoader(train_dir, batch_size=args.batch_size, shuffle=False, **kwargs)
valid_loader = torch.utils.data.DataLoader(valid_dir, batch_size=int(args.batch_size / 2), shuffle=False, **kwargs)
test_loader = torch.utils.data.DataLoader(test_dir, batch_size=args.test_batch_size, shuffle=False, **kwargs)
# sitw_test_loader = torch.utils.data.DataLoader(sitw_test_dir, batch_size=args.test_batch_size,
# shuffle=False, **kwargs)
# sitw_dev_loader = torch.utils.data.DataLoader(sitw_dev_part, batch_size=args.test_batch_size, shuffle=False,
# **kwargs)
if args.cuda:
model = model.cuda()
for i in range(len(ce)):
if ce[i] != None:
ce[i] = ce[i].cuda()
print('Dropout is {}.'.format(model.dropout_p))
for epoch in range(start, end):
# pdb.set_trace()
print('\n\33[1;34m Current \'{}\' learning rate is '.format(args.optimizer), end='')
for param_group in optimizer.param_groups:
print('{:.5f} '.format(param_group['lr']), end='')
print(' \33[0m')
if epoch % 2 == 1 and epoch != (end - 1):
test(test_loader, valid_loader, model, epoch)
train(train_loader, model, ce, optimizer, epoch)
if epoch % 4 == 1 or epoch == (end - 1):
check_path = '{}/checkpoint_{}.pth'.format(args.check_path, epoch)
torch.save({'epoch': epoch,
'state_dict': model.state_dict(),
'criterion': ce},
check_path)
scheduler.step()
# exit(1)
extract_dir = KaldiExtractDataset(dir=args.test_dir, transform=transform_V, filer_loader=np.load)
extract_loader = torch.utils.data.DataLoader(extract_dir, batch_size=1, shuffle=False, **kwargs)
xvector_dir = args.check_path
xvector_dir = xvector_dir.replace('checkpoint', 'xvector')
verification_extract(extract_loader, model, xvector_dir)
verify_dir = ScriptVerifyDataset(dir=args.test_dir, trials_file=args.trials, xvectors_dir=xvector_dir,
loader=read_vec_flt)
verify_loader = torch.utils.data.DataLoader(verify_dir, batch_size=64, shuffle=False, **kwargs)
verification_test(test_loader=verify_loader, dist_type=('cos' if args.cos_sim else 'l2'),
log_interval=args.log_interval)
writer.close()
def train(train_loader, model, ce, optimizer, epoch):
# switch to evaluate mode
model.train()
lambda_ = 2. / (1 + np.exp(-10. * epoch / args.epochs)) - 1.
model.grl.set_lambda(lambda_)
correct_a = 0.
correct_b = 0.
total_datasize = 0.
total_loss_a = 0.
total_loss_b = 0.
total_loss_c = 0.
total_loss = 0.
# for param_group in optimizer.param_groups:
# print('\33[1;34m Optimizer \'{}\' learning rate is {}.\33[0m'.format(args.optimizer, param_group['lr']))
ce_criterion, xe_criterion = ce
pbar = tqdm(enumerate(train_loader))
output_softmax = nn.Softmax(dim=1)
for batch_idx, (data, label_a, label_b) in pbar:
if args.cuda:
data = data.cuda()
data, label_a = Variable(data), Variable(label_a)
label_b = Variable(label_b)
logits_spk, feat_spk, logits_dom, feat_dom = model(data)
true_labels_a = label_a.cuda()
true_labels_b = label_b.cuda()
# pdb.set_trace()
# cos_theta, phi_theta = classfier
spk_label = logits_spk
dom_lable = logits_dom
if args.loss_type == 'soft':
spk_loss = ce_criterion(logits_spk, true_labels_a)
elif args.loss_type == 'asoft':
spk_label, _ = spk_label
spk_loss = xe_criterion(logits_spk, true_labels_a)
elif args.loss_type == 'center':
loss_cent = ce_criterion(logits_spk, true_labels_a)
loss_xent = xe_criterion(feat_spk, true_labels_a)
spk_loss = args.loss_ratio * loss_xent + loss_cent
elif args.loss_type == 'amsoft':
spk_loss = xe_criterion(logits_spk, true_labels_a)
dom_loss = (args.dom_ratio * ce_criterion(dom_lable, true_labels_b))
loss = spk_loss + dom_loss
if args.sim_ratio:
spk_dom_sim_loss = torch.cosine_similarity(feat_spk, feat_dom, dim=1).pow(2).mean()
spk_dom_sim_loss = args.sim_ratio * spk_dom_sim_loss
loss += spk_dom_sim_loss
predicted_labels_a = output_softmax(spk_label)
predicted_one_labels_a = torch.max(predicted_labels_a, dim=1)[1]
minibatch_correct_a = float((predicted_one_labels_a.cuda() == true_labels_a.cuda()).sum().item())
minibatch_acc_a = minibatch_correct_a / len(predicted_one_labels_a)
correct_a += minibatch_correct_a
predicted_labels_b = output_softmax(dom_lable)
predicted_one_labels_b = torch.max(predicted_labels_b, dim=1)[1]
minibatch_correct_b = float((predicted_one_labels_b.cuda() == true_labels_b.cuda()).sum().item())
minibatch_acc_b = minibatch_correct_b / len(predicted_one_labels_b)
correct_b += minibatch_correct_b
total_datasize += len(predicted_one_labels_a)
total_loss_a += float(spk_loss.item())
total_loss_b += float(dom_loss.item())
total_loss_c += float(spk_dom_sim_loss.item()) if args.sim_ratio else 0.
total_loss += float(loss.item())
# compute gradient and update weights
optimizer.zero_grad()
loss.backward()
if args.loss_type == 'center' and args.loss_ratio != 0:
for param in xe_criterion.parameters():
param.grad.data *= (1. / args.loss_ratio)
optimizer.step()
if batch_idx % args.log_interval == 0:
pbar.set_description(
'Train Epoch {:2d}: [{:4d}/{:4d}({:3.0f}%)] AvgLoss: {:.4f} SpkLoss: {:.4f} DomLoss: {:.4f} ' \
'SimLoss: {:.4f} Batch Accuracy: Spk: {:.4f}%, Dom: {:.4f}%'.format(
epoch,
batch_idx,
len(train_loader),
100. * batch_idx / len(train_loader),
total_loss / (batch_idx + 1),
total_loss_a / (batch_idx + 1),
total_loss_b / (batch_idx + 1),
total_loss_c / (batch_idx + 1),
100. * minibatch_acc_a,
100. * minibatch_acc_b))
print('\n\33[91mTrain Epoch {}: Avg loss: {:.4f} Spk Loss: {:.4f} Dom Loss: {:.4f} .'.format(epoch,
total_loss / len(
train_loader),
total_loss_a / len(
train_loader),
total_loss_b / len(
train_loader)))
print('Spk Accuracy:{:.4f}%, Dom Accuracy:{:.4f}%.\33[0m'.format(100 * correct_a / total_datasize,
100 * correct_b / total_datasize, ))
writer.add_scalar('Train/Spk_Accuracy', correct_a / total_datasize, epoch)
writer.add_scalar('Train/Dom_Accuracy', correct_b / total_datasize, epoch)
writer.add_scalar('Train/Loss', total_loss / len(train_loader), epoch)
torch.cuda.empty_cache()
def test(test_loader, valid_loader, model, epoch):
# switch to evaluate mode
model.eval()
valid_pbar = tqdm(enumerate(valid_loader))
softmax = nn.Softmax(dim=1)
correct_a = 0.
correct_b = 0.
total_datasize = 0.
for batch_idx, (data, label_a, label_b) in valid_pbar:
data = Variable(data.cuda())
# compute output
out_a, _, out_b, _ = model(data)
if args.loss_type == 'asoft':
predicted_labels_a, _ = out_a
else:
predicted_labels_a = out_a
predicted_labels_b = out_b
true_labels_a = Variable(label_a.cuda())
true_labels_b = Variable(label_b.cuda())
# pdb.set_trace()
predicted_one_labels_a = softmax(predicted_labels_a)
predicted_one_labels_a = torch.max(predicted_one_labels_a, dim=1)[1]
batch_correct_a = (predicted_one_labels_a.cuda() == true_labels_a.cuda()).sum().item()
minibatch_acc_a = float(batch_correct_a / len(predicted_one_labels_a))
correct_a += batch_correct_a
predicted_one_labels_b = softmax(predicted_labels_b)
predicted_one_labels_b = torch.max(predicted_one_labels_b, dim=1)[1]
batch_correct_b = (predicted_one_labels_b.cuda() == true_labels_b.cuda()).sum().item()
minibatch_acc_b = float(batch_correct_b / len(predicted_one_labels_b))
correct_b += batch_correct_b
total_datasize += len(predicted_one_labels_a)
if batch_idx % args.log_interval == 0:
valid_pbar.set_description(
'Valid Epoch: {:2d} [{:8d}/{:8d} ({:3.0f}%)] Batch Spk Accuracy: {:.4f}% Dom Accuracy: {:.4f}%'.format(
epoch,
batch_idx * len(data),
len(valid_loader.dataset),
100. * batch_idx / len(valid_loader),
100. * minibatch_acc_a,
100. * minibatch_acc_b
))
spk_valid_accuracy = 100. * correct_a / total_datasize
dom_valid_accuracy = 100. * correct_b / total_datasize
writer.add_scalar('Test/Spk_Valid_Accuracy', spk_valid_accuracy, epoch)
writer.add_scalar('Test/Dom_Valid_Accuracy', dom_valid_accuracy, epoch)
torch.cuda.empty_cache()
labels, distances = [], []
pbar = tqdm(enumerate(test_loader))
for batch_idx, (data_a, data_p, label) in pbar:
vec_a_shape = data_a.shape
vec_p_shape = data_p.shape
# pdb.set_trace()
data_a = data_a.reshape(vec_a_shape[0] * vec_a_shape[1], 1, vec_a_shape[2], vec_a_shape[3])
data_p = data_p.reshape(vec_p_shape[0] * vec_p_shape[1], 1, vec_p_shape[2], vec_p_shape[3])
if args.cuda:
data_a, data_p = data_a.cuda(), data_p.cuda()
data_a, data_p, label = Variable(data_a), Variable(data_p), Variable(label)
# compute output
_, out_a_, _, _ = model(data_a)
_, out_p_, _, _ = model(data_p)
# out_a = out_a_
# out_p = out_p_
out_a = out_a_.reshape(vec_a_shape[0], vec_a_shape[1], args.embedding_size_a).mean(dim=1)
out_p = out_p_.reshape(vec_p_shape[0], vec_p_shape[1], args.embedding_size_a).mean(dim=1)
dists = l2_dist.forward(out_a, out_p) # torch.sqrt(torch.sum((out_a - out_p) ** 2, 1)) # euclidean distance
# dists = dists.reshape(vec_shape[0], vec_shape[1]).mean(dim=1)
dists = dists.data.cpu().numpy()
distances.append(dists)
labels.append(label.data.cpu().numpy())
if batch_idx % args.log_interval == 0:
pbar.set_description('Test Epoch: {} [{}/{} ({:.0f}%)]'.format(
epoch, batch_idx * len(data_a), len(test_loader.dataset), 100. * batch_idx / len(test_loader)))
labels = np.array([sublabel for label in labels for sublabel in label])
distances = np.array([subdist for dist in distances for subdist in dist])
eer, eer_threshold, accuracy = evaluate_kaldi_eer(distances, labels, cos=args.cos_sim, re_thre=True)
writer.add_scalar('Test/EER', 100. * eer, epoch)
writer.add_scalar('Test/Threshold', eer_threshold, epoch)
mindcf_01, mindcf_001 = evaluate_kaldi_mindcf(distances, labels)
writer.add_scalar('Test/mindcf-0.01', mindcf_01, epoch)
writer.add_scalar('Test/mindcf-0.001', mindcf_001, epoch)
dist_type = 'cos' if args.cos_sim else 'l2'
print('\nFor %s_distance, ' % dist_type)
print(' \33[91mTest Spk ERR is {:.4f}%, Threshold is {}'.format(100. * eer, eer_threshold))
print(' mindcf-0.01 {:.4f}, mindcf-0.001 {:.4f},'.format(mindcf_01, mindcf_001))
print(' Valid Spk Accuracy is %.4f %%, Dom Accuracy is %.4f %% .\33[0m' % (spk_valid_accuracy, dom_valid_accuracy))
torch.cuda.empty_cache()
if __name__ == '__main__':
main()
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
main.go | package main
import (
"context"
"fmt"
"io/ioutil"
"os"
"os/signal"
"path/filepath"
"runtime"
"sync"
"syscall"
"time"
"github.com/sirupsen/logrus"
)
var log *logrus.Entry
var Version = "2.0.0"
var mu = &sync.Mutex{}
var wg = &sync.WaitGroup{}
var loglevel = "info"
var ctx context.Context
var cancel func()
func main() {
ctx, cancel = context.WithCancel(context.Background())
defer cancel()
loglevel = os.Getenv("SNOWMAN_LOGLEVEL")
if loglevel == "" {
loglevel = "info"
}
log = logrus.NewEntry(logrus.New())
switch loglevel {
case "debug":
log.Logger.SetLevel(logrus.DebugLevel)
case "warn":
log.Logger.SetLevel(logrus.WarnLevel)
default:
loglevel = "info"
log.Logger.SetLevel(logrus.InfoLevel)
}
name := os.Getenv("SNOWMAN_NAME")
if name == "" {
name = "snowman"
}
log.Infof("Ryanteck RTK-000-00A GPIO Snowman '%s' version %s starting", name, Version)
go interruptWatcher()
man := NewSnowMan(name, log)
wg.Add(1)
go man.StartBackplane(ctx, wg)
err := man.Open()
if err != nil {
fmt.Printf("Could not open rpi: %s", err)
os.Exit(1)
}
defer man.Close()
wg.Add(1)
go man.Run(ctx, wg)
wg.Wait()
}
func interruptWatcher() {
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
for {
select {
case sig := <-sigs:
switch sig {
case syscall.SIGINT, syscall.SIGTERM:
log.Warnf("Shutting down on %s", sig)
cancel()
case syscall.SIGQUIT:
dumpGoRoutines()
}
case <-ctx.Done():
return
}
}
}
func dumpGoRoutines() {
mu.Lock()
defer mu.Unlock()
outname := filepath.Join(os.TempDir(), fmt.Sprintf("snowman-threaddump-%d-%d.txt", os.Getpid(), time.Now().UnixNano()))
buf := make([]byte, 1<<20)
stacklen := runtime.Stack(buf, true)
err := ioutil.WriteFile(outname, buf[:stacklen], 0644)
if err != nil {
log.Errorf("Could not produce thread dump: %s", err)
return
}
log.Warnf("Produced thread dump to %s", outname)
}
| [
"\"SNOWMAN_LOGLEVEL\"",
"\"SNOWMAN_NAME\""
]
| []
| [
"SNOWMAN_NAME",
"SNOWMAN_LOGLEVEL"
]
| [] | ["SNOWMAN_NAME", "SNOWMAN_LOGLEVEL"] | go | 2 | 0 | |
tests/types.go | package tests
import "time"
//go:generate ../orm -type Person,Employee,All
//go:generate ../orm -type Person -out ./external
// Person for testing
type Person struct {
Name string
Age int
unexported bool
}
// Employee is a person who works
// This is a test case for struct embedding
type Employee struct {
Person
Salary int
}
// All is to test generation of variant fields and types
type All struct {
// Annotated
Auto int `sql:"primary key;autoincrement"`
NotNil string `sql:"not null"`
// Standard
Int int
Int8 int8
Int16 int16
Int32 int32
Int64 int64
UInt uint
UInt8 uint8
UInt16 uint16
UInt32 uint32
UInt64 uint64
Time time.Time
VarCharString string `sql:"type:VARCHAR(100)"`
VarCharByte []byte `sql:"type:VARCHAR(100)"`
String string
Bytes []byte
Bool bool
// Pointers
PInt *int
PInt8 *int8
PInt16 *int16
PInt32 *int32
PInt64 *int64
PUInt *uint
PUInt8 *uint8
PUInt16 *uint16
PUInt32 *uint32
PUInt64 *uint64
PTime *time.Time
PVarCharString *string `sql:"type:VARCHAR(100)"`
PVarCharByte *[]byte `sql:"type:VARCHAR(100)"`
PString *string
PBytes *[]byte
PBool *bool
// Special cases
// test that unexported are not generating columns
unexported int
// test a case where field is a reserved name
Select int
}
// Ignore contains ignored fields.
type Ignore struct {
ID int64
Data map[string]interface{} `sql:"-"`
}
| []
| []
| []
| [] | [] | go | null | null | null |
app.py | """
Example application using Tornado and Curl
"""
import os
import sys
import tornado.httpclient
import tornado.ioloop
import tornado.web
tornado.httpclient.AsyncHTTPClient.configure(
'tornado.curl_httpclient.CurlAsyncHTTPClient')
class MainHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self):
client = tornado.httpclient.AsyncHTTPClient()
client.fetch('http://www.google.com/', self.handle_request)
def handle_request(self, response):
if response.error:
self.write('Error: {}'.format(response.error))
else:
self.write(response.body)
self.finish()
def main(args):
app = tornado.web.Application([
(r'/', MainHandler),
])
port = os.getenv('PORT', 8000)
print >> sys.stderr, 'listening on port {}'.format(port)
app.listen(port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
try:
main(sys.argv[1:])
except KeyboardInterrupt:
print >> sys.stderr, 'interrupted'
| []
| []
| [
"PORT"
]
| [] | ["PORT"] | python | 1 | 0 | |
perl/perl.bzl | # Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perl rules for Bazel"""
_perl_file_types = [".pl", ".pm", ".t"]
_perl_srcs_attr = attr.label_list(allow_files = _perl_file_types)
_perl_deps_attr = attr.label_list(
allow_files = False,
providers = ["transitive_perl_sources"],
)
_perl_data_attr = attr.label_list(
allow_files = True,
)
_perl_main_attr = attr.label(
allow_single_file = _perl_file_types,
)
_perl_env_attr = attr.string_dict()
def _collect_transitive_sources(ctx):
return depset(
ctx.files.srcs,
transitive = [dep.transitive_perl_sources for dep in ctx.attr.deps],
order = "postorder",
)
def _get_main_from_sources(ctx):
sources = ctx.files.srcs
if len(sources) != 1:
fail("Cannot infer main from multiple 'srcs'. Please specify 'main' attribute.", "main")
return sources[0]
def _perl_library_implementation(ctx):
transitive_sources = _collect_transitive_sources(ctx)
return struct(
runfiles = ctx.runfiles(collect_data = True),
transitive_perl_sources = transitive_sources,
)
def _is_identifier(name):
# Must be non-empty.
if name == None or len(name) == 0:
return False
# Must start with alpha or '_'
if not (name[0].isalpha() or name[0] == "_"):
return False
# Must consist of alnum characters or '_'s.
for c in name.elems():
if not (c.isalnum() or c == "_"):
return False
return True
PERL_STUB_TEMPLATE = """#!/usr/bin/perl
use strict;
use warnings;
use Data::Dumper qw(Dumper);
use Cwd qw(abs_path realpath);
use File::Basename;
use File::Spec::Functions;
sub main {{
my @args = @ARGV;
# Follow symlinks, looking for my module space.
my $stub_filename = abs_path($0);
my $module_space = '';
while () {{
# Found it?
$module_space = $stub_filename . '.runfiles';
if (-d $module_space) {{
last;
}}
if (-l $stub_filename) {{
# Absolutize
$stub_filename = catfile(dirname($stub_filename), readlink $stub_filename);
continue;
}}
if ($0 =~ /(.*\.runfiles)/.*/) {{
$module_space = $1;
last;
}}
print "Cannot find .runfiles directory for $0" and exit 1;
}}
print "$module_space\n";
my $main_filename = catfile($module_space, '{workspace_name}', '{main_path}');
{environment}
chdir catfile($module_space, '{workspace_name}');
exec($^X, $main_filename, @args);
}}
main()
"""
def _create_stub(workspace_name, executable_name, main_path, env, env_files):
environment = ""
for name, value in env.items():
if not _is_identifier(name):
fail("%s is not a valid environment variable name." % str(name))
environment += (" $ENV{{{key}}} = '{value}' " +
"unless defined $ENV{{{key}}};\n").format(
key = name,
value = value.replace("'", "\\'"),
)
for name, value in env_files.items():
if not _is_identifier(name):
fail("%s is not a valid environment variable name." % str(name))
environment += (" $ENV{{{key}}} = realpath(catfile($module_space, " +
"'{workspace_name}', '{value}')) " +
"unless defined $ENV{{{key}}};\n").format(
key = name,
value = value.replace("'", "\\'"),
workspace_name = workspace_name,
)
return PERL_STUB_TEMPLATE.format(
workspace_name = workspace_name,
executable_name = executable_name,
environment = environment,
main_path = main_path,
)
def _perl_binary_implementation(ctx):
transitive_sources = _collect_transitive_sources(ctx)
main = ctx.file.main
if main == None:
main = _get_main_from_sources(ctx)
ctx.actions.write(
output = ctx.outputs.executable,
content = _create_stub(
ctx.workspace_name,
ctx.outputs.executable.basename,
main.path,
ctx.attr.env,
ctx.attr.env_files,
),
is_executable = True,
)
return DefaultInfo(
files = depset([ctx.outputs.executable]),
default_runfiles = ctx.runfiles(
collect_data = True,
collect_default = True,
transitive_files = depset([ctx.outputs.executable], transitive = [transitive_sources]),
),
)
def _perl_test_implementation(ctx):
return _perl_binary_implementation(ctx)
perl_library = rule(
attrs = {
"srcs": _perl_srcs_attr,
"deps": _perl_deps_attr,
"data": _perl_data_attr,
},
implementation = _perl_library_implementation,
)
perl_binary = rule(
attrs = {
"srcs": _perl_srcs_attr,
"deps": _perl_deps_attr,
"data": _perl_data_attr,
"main": _perl_main_attr,
"env": _perl_env_attr,
"env_files": _perl_env_attr,
},
executable = True,
implementation = _perl_binary_implementation,
)
perl_test = rule(
attrs = {
"srcs": _perl_srcs_attr,
"deps": _perl_deps_attr,
"data": _perl_data_attr,
"main": _perl_main_attr,
"env": _perl_env_attr,
"env_files": _perl_env_attr,
},
executable = True,
test = True,
implementation = _perl_test_implementation,
)
| []
| []
| []
| [] | [] | python | null | null | null |
main.go | package main
import (
"flag"
"github.com/ca-gip/kubi-members/internal/controller"
"github.com/ca-gip/kubi-members/internal/ldap"
membersclientset "github.com/ca-gip/kubi-members/pkg/generated/clientset/versioned"
projectclientset "github.com/ca-gip/kubi/pkg/generated/clientset/versioned"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2"
"os"
"path/filepath"
)
var (
masterURL string
kubeconfig string
)
func main() {
flag.StringVar(&kubeconfig, "kubeconfig", defaultKubeconfig(), "Path to a kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
klog.InitFlags(nil)
flag.Parse()
// Load kube config
cfg, err := rest.InClusterConfig()
if err != nil {
cfg, err = clientcmd.BuildConfigFromFlags(masterURL, kubeconfig)
if err != nil {
klog.Fatalf("Error building kubeconfig: %s", err.Error())
}
}
// Generate clientsets
configMapClient, err := kubernetes.NewForConfig(cfg)
if err != nil {
klog.Fatalf("Error building kubernetes configMapClient: %s", err.Error())
}
projectClient, err := projectclientset.NewForConfig(cfg)
if err != nil {
klog.Fatalf("Error building kubernetes projectClient: %s", err.Error())
}
membersClient, err := membersclientset.NewForConfig(cfg)
if err != nil {
klog.Fatalf("Error building kubernetes membersClient: %s", err.Error())
}
klog.Info("Creating LDAP client")
ldapClient := ldap.NewLdap()
controller := controller.NewController(configMapClient, projectClient, membersClient, ldapClient)
if err := controller.Run(); err != nil {
klog.Fatalf("Error running controller: %s", err.Error())
}
}
func defaultKubeconfig() string {
fname := os.Getenv("KUBECONFIG")
if fname != "" {
return fname
}
home, err := os.UserHomeDir()
if err != nil {
klog.Warningf("failed to get home directory: %v", err)
return ""
}
return filepath.Join(home, ".kube", "config")
}
| [
"\"KUBECONFIG\""
]
| []
| [
"KUBECONFIG"
]
| [] | ["KUBECONFIG"] | go | 1 | 0 | |
data/utils/sample.py | '''
samples from all raw data;
by default samples in a non-iid manner; namely, randomly selects users from
raw data until their cumulative amount of data exceeds the given number of
datapoints to sample (specified by --fraction argument);
ordering of original data points is not preserved in sampled data
'''
import argparse
import json
import os
import random
import time
from collections import OrderedDict
from constants import DATASETS, SEED_FILES
from util import iid_divide
parser = argparse.ArgumentParser()
parser.add_argument('--name',
help='name of dataset to parse; default: sent140;',
type=str,
choices=DATASETS,
default='sent140')
parser.add_argument('--iid',
help='sample iid;',
action="store_true")
parser.add_argument('--niid',
help="sample niid;",
dest='iid', action='store_false')
parser.add_argument('--union',
help="sample from union lists;",
dest='union', action='store_true')
parser.add_argument('--fraction',
help='fraction of all data to sample; default: 0.1;',
type=float,
default=0.1)
parser.add_argument('--u',
help=('number of users in iid data set; ignored in niid case;'
'represented as fraction of original total number of users; '
'default: 0.01;'),
type=float,
default=0.01)
parser.add_argument('--seed',
help='seed for random sampling of data',
type=int,
default=None)
parser.set_defaults(iid=False)
args = parser.parse_args()
print('------------------------------')
print('sampling data')
parent_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
data_dir = os.path.join(parent_path, args.name, 'data')
subdir = os.path.join(data_dir, 'all_data')
files = os.listdir(subdir)
files = [f for f in files if f.endswith('.json')]
rng_seed = (args.seed if (args.seed is not None and args.seed >= 0) else int(time.time()))
print ("Using seed {}".format(rng_seed))
rng = random.Random(rng_seed)
print (os.environ.get('LEAF_DATA_META_DIR'))
if os.environ.get('LEAF_DATA_META_DIR') is not None:
seed_fname = os.path.join(os.environ.get('LEAF_DATA_META_DIR'), SEED_FILES['sampling'])
with open(seed_fname, 'w+') as f:
f.write("# sampling_seed used by sampling script - supply as "
"--smplseed to preprocess.sh or --seed to utils/sample.py\n")
f.write(str(rng_seed))
print ("- random seed written out to {file}".format(file=seed_fname))
else:
print ("- using random seed '{seed}' for sampling".format(seed=rng_seed))
if args.union:
print("=== Sampling users for each union")
union_dir = os.path.join(data_dir, 'union')
path_file = os.path.join(union_dir, "union_list_path")
with open(path_file, "r") as f:
union_list_file = f.read()
os.remove(path_file)
with open(union_list_file, "r") as f:
union_list = json.load(f)
total_samples = sum(map(lambda union: sum(map(lambda c: c[1], union)), union_list))
print("total_samples:", total_samples)
unions = list(filter(lambda l: len(l) > 1, union_list))
singles = list(filter(lambda l: len(l) == 1, union_list))
print("Number of unions:", len(unions))
print("Number of singles:", len(singles))
union_num_samples = []
union_sample = []
samples_so_far = 0
for union in unions:
print("-"*80)
print("\tusers:", len(union))
samples_in_this_union = sum(map(lambda c: c[1], union))
print("\tsamples_in_this_union", samples_in_this_union)
frac = args.fraction * samples_in_this_union
print("\tfrac", frac)
selected_users = []
sample_count = 0
for id, samples in union:
if sample_count + samples > frac:
break
selected_users.append(id)
sample_count += samples
print("\tusers in sample:", len(selected_users))
print("\tsamples in sample:", sample_count)
union_sample.append(selected_users)
union_num_samples.append(sample_count)
samples_so_far += sample_count
samples_remain = total_samples * args.fraction - samples_so_far
print("samples remain:", samples_remain)
num_singles = 0
for single in singles:
samples_in_this_user = single[0][1]
id = single[0][0]
if samples_remain - samples_in_this_user < 0:
break
union_sample.append([id])
union_num_samples.append(samples_in_this_user)
samples_remain -= samples_in_this_user
num_singles += 1
union_names = ["union_%d" % i for i in range(len(unions))]
singles_names = ["single_%d" % i for i in range(num_singles)]
names = union_names + singles_names
print("NAMES AND LISTS MATCH:", len(union_sample) == len(names))
print("number of selected singles:", num_singles, "- total singles: ", len(singles))
union_data = dict([(name, {"x": [], "y": []}) for name in names])
for f in files:
print("Looking for users in",f)
file_dir = os.path.join(subdir, f)
with open(file_dir, 'r') as inf:
data = json.load(inf, object_pairs_hook=OrderedDict)
for user, user_data in data['user_data'].items():
for name, union in zip(names,union_sample):
if user in union:
union_data[name]['x'] += user_data['x']
union_data[name]['y'] += user_data['y']
#print([(n,len(d["x"])) for n,d in union_data.items()])
# ------------
# create .json file
all_data = {}
all_data['users'] = names
all_data['num_samples'] = union_num_samples
all_data['unions'] = union_sample
all_data['user_data'] = union_data
slabel = 'union'
arg_frac = str(args.fraction)
arg_frac = arg_frac[2:]
arg_nu = str(args.u)
arg_nu = arg_nu[2:]
arg_label = arg_frac
file_name = '%s_%s.json' % (slabel, arg_label)
ouf_dir = os.path.join(data_dir, 'sampled_data', file_name)
# NOTE: For now, we just write everything to one big json.
# This will give us issues if we use a large sample.
print('writing %s' % file_name)
with open(ouf_dir, 'w') as outfile:
json.dump(all_data, outfile)
if not args.union:
new_user_count = 0 # for iid case
for f in files:
file_dir = os.path.join(subdir, f)
with open(file_dir, 'r') as inf:
# Load data into an OrderedDict, to prevent ordering changes
# and enable reproducibility
data = json.load(inf, object_pairs_hook=OrderedDict)
num_users = len(data['users'])
tot_num_samples = sum(data['num_samples'])
num_new_samples = int(args.fraction * tot_num_samples)
hierarchies = None
if(args.iid):
raw_list = list(data['user_data'].values())
raw_x = [elem['x'] for elem in raw_list]
raw_y = [elem['y'] for elem in raw_list]
x_list = [item for sublist in raw_x for item in sublist] # flatten raw_x
y_list = [item for sublist in raw_y for item in sublist] # flatten raw_y
num_new_users = int(round(args.u * num_users))
if num_new_users == 0:
num_new_users += 1
indices = [i for i in range(tot_num_samples)]
new_indices = rng.sample(indices, num_new_samples)
users = [str(i+new_user_count) for i in range(num_new_users)]
user_data = {}
for user in users:
user_data[user] = {'x': [], 'y': []}
all_x_samples = [x_list[i] for i in new_indices]
all_y_samples = [y_list[i] for i in new_indices]
x_groups = iid_divide(all_x_samples, num_new_users)
y_groups = iid_divide(all_y_samples, num_new_users)
for i in range(num_new_users):
user_data[users[i]]['x'] = x_groups[i]
user_data[users[i]]['y'] = y_groups[i]
num_samples = [len(user_data[u]['y']) for u in users]
new_user_count += num_new_users
else:
ctot_num_samples = 0
users = data['users']
users_and_hiers = None
if 'hierarchies' in data:
users_and_hiers = list(zip(users, data['hierarchies']))
rng.shuffle(users_and_hiers)
else:
rng.shuffle(users)
user_i = 0
num_samples = []
user_data = {}
if 'hierarchies' in data:
hierarchies = []
while(ctot_num_samples < num_new_samples):
hierarchy = None
if users_and_hiers is not None:
user, hier = users_and_hiers[user_i]
else:
user = users[user_i]
cdata = data['user_data'][user]
cnum_samples = len(data['user_data'][user]['y'])
if (ctot_num_samples + cnum_samples > num_new_samples):
cnum_samples = num_new_samples - ctot_num_samples
indices = [i for i in range(cnum_samples)]
new_indices = rng.sample(indices, cnum_samples)
x = []
y = []
for i in new_indices:
x.append(data['user_data'][user]['x'][i])
y.append(data['user_data'][user]['y'][i])
cdata = {'x': x, 'y': y}
if 'hierarchies' in data:
hierarchies.append(hier)
num_samples.append(cnum_samples)
user_data[user] = cdata
ctot_num_samples += cnum_samples
user_i += 1
if 'hierarchies' in data:
users = [u for u, h in users_and_hiers][:user_i]
else:
users = users[:user_i]
# ------------
# create .json file
all_data = {}
all_data['users'] = users
if hierarchies is not None:
all_data['hierarchies'] = hierarchies
all_data['num_samples'] = num_samples
all_data['user_data'] = user_data
slabel = ''
if(args.iid):
slabel = 'iid'
else:
slabel = 'niid'
arg_frac = str(args.fraction)
arg_frac = arg_frac[2:]
arg_nu = str(args.u)
arg_nu = arg_nu[2:]
arg_label = arg_frac
if(args.iid):
arg_label = '%s_%s' % (arg_nu, arg_label)
file_name = '%s_%s_%s.json' % ((f[:-5]), slabel, arg_label)
ouf_dir = os.path.join(data_dir, 'sampled_data', file_name)
print('writing %s' % file_name)
with open(ouf_dir, 'w') as outfile:
json.dump(all_data, outfile)
| []
| []
| [
"LEAF_DATA_META_DIR"
]
| [] | ["LEAF_DATA_META_DIR"] | python | 1 | 0 | |
tests/test_helpers.go | /*
* Based on https://github.com/cosmos/gaia/blob/v2.0.12/cli_test/cli_test.go
*/
package tests
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/crypto/secp256k1"
"github.com/tendermint/tendermint/types"
tmtypes "github.com/tendermint/tendermint/types"
app "github.com/sharering/shareledger"
apptypes "github.com/sharering/shareledger/types"
clientkeys "github.com/cosmos/cosmos-sdk/client/keys"
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/cosmos-sdk/crypto/keys"
"github.com/cosmos/cosmos-sdk/server"
"github.com/cosmos/cosmos-sdk/simapp"
"github.com/cosmos/cosmos-sdk/tests"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/x/auth"
"github.com/cosmos/cosmos-sdk/x/distribution"
"github.com/cosmos/cosmos-sdk/x/gov"
"github.com/cosmos/cosmos-sdk/x/slashing"
"github.com/cosmos/cosmos-sdk/x/staking"
idtypes "github.com/ShareRing/modules/id/types"
shareringUtils "github.com/ShareRing/modules/utils"
)
const (
denom = "shr"
keyAuthority = "authority"
keyTreasurer = "treasurer"
keyValidator = "validator"
keyIdSigner = "id-signer"
keyAccOp = "acc-op"
keyDocIssuer = "doc-issuer"
keyUser1 = "user1"
keyUser2 = "user2"
keyUser3 = "user3"
keyEmtyUser = "emptyUser"
fooDenom = "footoken"
feeDenom = "shr"
fee2Denom = "fee2token"
keyBaz = "baz"
keyVesting = "vesting"
keyFooBarBaz = "foobarbaz"
DefaultKeyPass = "12345678"
)
var (
totalCoins = sdk.NewCoins(
// sdk.NewCoin(fee2Denom, sdk.TokensFromConsensusPower(2000000)),
// sdk.NewCoin(feeDenom, sdk.TokensFromConsensusPower(2000000)),
// sdk.NewCoin(fooDenom, sdk.TokensFromConsensusPower(2000)),
sdk.NewCoin(denom, sdk.TokensFromConsensusPower(300).Add(sdk.NewInt(12))), // add coins from inflation
)
startCoins = sdk.NewCoins(
// sdk.NewCoin(fee2Denom, sdk.TokensFromConsensusPower(1000000)),
// sdk.NewCoin(feeDenom, sdk.TokensFromConsensusPower(1000000)),
// sdk.NewCoin(fooDenom, sdk.TokensFromConsensusPower(1000)),
sdk.NewCoin(denom, shareringUtils.SHRDecimal.Mul(sdk.NewInt(int64(1000)))),
)
vestingCoins = sdk.NewCoins(
sdk.NewCoin(feeDenom, sdk.TokensFromConsensusPower(500000)),
)
)
//___________________________________________________________________________________
// Fixtures
// Fixtures is used to setup the testing environment
type Fixtures struct {
BuildDir string
RootDir string
GaiadBinary string
GaiacliBinary string
ChainID string
RPCAddr string
Port string
GaiadHome string
GaiacliHome string
P2PAddr string
T *testing.T
}
// NewFixtures creates a new instance of Fixtures with many vars set
func NewFixtures(t *testing.T) *Fixtures {
tmpDir, err := ioutil.TempDir("", "shareledger_integration_"+t.Name()+"_")
require.NoError(t, err)
servAddr, port, err := server.FreeTCPAddr()
require.NoError(t, err)
p2pAddr, _, err := server.FreeTCPAddr()
require.NoError(t, err)
buildDir := os.Getenv("BUILDDIR")
if buildDir == "" {
buildDir, err = filepath.Abs("../build/")
require.NoError(t, err)
}
// apptypes.ConfigureSDK()
config := sdk.GetConfig()
config.SetBech32PrefixForAccount(apptypes.Bech32PrefixAccAddr, apptypes.Bech32PrefixAccPub)
config.SetBech32PrefixForValidator(apptypes.Bech32PrefixValAddr, apptypes.Bech32PrefixValPub)
config.SetBech32PrefixForConsensusNode(apptypes.Bech32PrefixConsAddr, apptypes.Bech32PrefixConsPub)
return &Fixtures{
T: t,
BuildDir: buildDir,
RootDir: tmpDir,
GaiadBinary: filepath.Join(buildDir, "shareledger"),
GaiacliBinary: filepath.Join(buildDir, "slcli"),
GaiadHome: filepath.Join(tmpDir, ".shareledger"),
GaiacliHome: filepath.Join(tmpDir, ".slcli"),
RPCAddr: servAddr,
P2PAddr: p2pAddr,
Port: port,
}
}
// GenesisFile returns the path of the genesis file
func (f Fixtures) GenesisFile() string {
return filepath.Join(f.GaiadHome, "config", "genesis.json")
}
// GenesisFile returns the application's genesis state
func (f Fixtures) GenesisState() simapp.GenesisState {
cdc := codec.New()
genDoc, err := tmtypes.GenesisDocFromFile(f.GenesisFile())
require.NoError(f.T, err)
var appState simapp.GenesisState
require.NoError(f.T, cdc.UnmarshalJSON(genDoc.AppState, &appState))
return appState
}
// InitFixtures is called at the beginning of a test and initializes a chain
// with 1 validator.
func InitFixtures(t *testing.T) (f *Fixtures) {
f = NewFixtures(t)
// reset test state
f.UnsafeResetAll()
f.CLIConfig("keyring-backend", "test")
// ensure keystore has foo and bar keys
f.KeysDelete(keyAuthority)
f.KeysDelete(keyTreasurer)
f.KeysDelete(keyValidator)
f.KeysDelete(keyIdSigner)
f.KeysDelete(keyAccOp)
f.KeysDelete(keyDocIssuer)
f.KeysDelete(keyUser1)
f.KeysDelete(keyUser2)
f.KeysDelete(keyUser3)
f.KeysDelete(keyEmtyUser)
f.KeysAdd(keyAuthority)
f.KeysAdd(keyTreasurer)
f.KeysAdd(keyValidator)
f.KeysAdd(keyIdSigner)
f.KeysAdd(keyUser1)
f.KeysAdd(keyUser2)
f.KeysAdd(keyUser3)
f.KeysAdd(keyEmtyUser)
f.KeysAdd(keyAccOp)
f.KeysAdd(keyDocIssuer)
// ensure that CLI output is in JSON format
f.CLIConfig("output", "json")
// NOTE: GDInit sets the ChainID
f.GDInit(keyValidator)
f.CLIConfig("chain-id", f.ChainID)
f.CLIConfig("broadcast-mode", "block")
f.CLIConfig("trust-node", "true")
f.KeysShow(keyValidator)
// start an account with tokens
f.AddGenesisAccount(f.KeyAddress(keyAuthority), startCoins)
f.AddGenesisAccount(f.KeyAddress(keyTreasurer), startCoins)
f.AddGenesisAccount(f.KeyAddress(keyValidator), startCoins)
f.AddGenesisAccount(f.KeyAddress(keyIdSigner), startCoins)
f.AddGenesisAccount(f.KeyAddress(keyDocIssuer), startCoins)
f.AddGenesisAccount(f.KeyAddress(keyUser1), startCoins)
f.AddGenesisAccount(f.KeyAddress(keyUser2), startCoins)
f.AddGenesisAccount(f.KeyAddress(keyUser3), startCoins)
f.AddGenesisAccount(f.KeyAddress(keyAccOp), startCoins)
f.AddGenesisAuthorityAccount(f.KeyAddress(keyAuthority))
f.AddGenesisTreasurerAccount(f.KeyAddress(keyTreasurer))
f.AddGenesisValidatorAccount(f.KeyAddress(keyValidator))
f.AddGenesisOperatorAccount(f.KeyAddress(keyAccOp))
f.GenTx(keyValidator, "--keyring-backend test --amount=1000000shr")
f.CollectGenTxs()
return
}
// Cleanup is meant to be run at the end of a test to clean up an remaining test state
func (f *Fixtures) Cleanup(dirs ...string) {
clean := append(dirs, f.RootDir)
for _, d := range clean {
require.NoError(f.T, os.RemoveAll(d))
}
}
// Flags returns the flags necessary for making most CLI calls
func (f *Fixtures) Flags() string {
return fmt.Sprintf("--home=%s --node=%s", f.GaiacliHome, f.RPCAddr)
}
//___________________________________________________________________________________
// id
func (f *Fixtures) CreateId(id string, backup, owner sdk.AccAddress, extraData string, flags ...string) (bool, string, string) {
cmd := fmt.Sprintf("%s tx id create %s %s %s %s %v", f.GaiacliBinary, id, backup, owner, extraData, f.Flags())
return executeWriteRetStdStreams(f.T, addFlags(cmd, flags), DefaultKeyPass)
}
func (f *Fixtures) CreateIdInBatch(id []string, backup, owner []sdk.AccAddress, extraData []string, flags ...string) (bool, string, string) {
sep := ","
backupArr := make([]string, 0, len(backup))
ownerArr := make([]string, 0, len(backup))
for i := 0; i < len(backup); i++ {
backupArr = append(backupArr, backup[i].String())
ownerArr = append(ownerArr, owner[i].String())
}
ids := strings.Join(id, sep)
owners := strings.Join(ownerArr, sep)
backups := strings.Join(backupArr, sep)
extras := strings.Join(extraData, sep)
cmd := fmt.Sprintf("%s tx id create-batch %s %s %s %s %v", f.GaiacliBinary, ids, backups, owners, extras, f.Flags())
return executeWriteRetStdStreams(f.T, addFlags(cmd, flags), DefaultKeyPass)
}
func (f *Fixtures) ReplaceIdOwner(id string, newOwner sdk.AccAddress, flags ...string) (bool, string, string) {
cmd := fmt.Sprintf("%s tx id replace %s %s %v", f.GaiacliBinary, id, newOwner.String(), f.Flags())
return executeWriteRetStdStreams(f.T, addFlags(cmd, flags), DefaultKeyPass)
}
func (f *Fixtures) UpdateId(id, extraData string, flags ...string) (bool, string, string) {
cmd := fmt.Sprintf("%s tx id update %s %s %v", f.GaiacliBinary, id, extraData, f.Flags())
return executeWriteRetStdStreams(f.T, addFlags(cmd, flags), DefaultKeyPass)
}
func (f *Fixtures) QueryIdById(id string, flags ...string) idtypes.ID {
cmd := fmt.Sprintf("%s query id info id %s %v", f.GaiacliBinary, id, f.Flags())
out, _ := tests.ExecuteT(f.T, addFlags(cmd, flags), "")
var idRs idtypes.ID
cdc := app.MakeCodec()
err := cdc.UnmarshalJSON([]byte(out), &idRs)
require.NoError(f.T, err, "out %v\n, err %v", out, err)
return idRs
}
func (f *Fixtures) QueryIdByOwner(owner sdk.Address, flags ...string) idtypes.ID {
cmd := fmt.Sprintf("%s query id info address %s %v", f.GaiacliBinary, owner.String(), f.Flags())
out, _ := tests.ExecuteT(f.T, addFlags(cmd, flags), "")
var idRs idtypes.ID
cdc := app.MakeCodec()
err := cdc.UnmarshalJSON([]byte(out), &idRs)
require.NoError(f.T, err, "out %v\n, err %v", out, err)
return idRs
}
//___________________________________________________________________________________
// gaiad
// UnsafeResetAll is gaiad unsafe-reset-all
func (f *Fixtures) UnsafeResetAll(flags ...string) {
cmd := fmt.Sprintf("%s --home=%s unsafe-reset-all", f.GaiadBinary, f.GaiadHome)
executeWrite(f.T, addFlags(cmd, flags))
err := os.RemoveAll(filepath.Join(f.GaiadHome, "config", "gentx"))
require.NoError(f.T, err)
}
// GDInit is gaiad init
// NOTE: GDInit sets the ChainID for the Fixtures instance
func (f *Fixtures) GDInit(moniker string, flags ...string) {
cmd := fmt.Sprintf("%s init -o --home=%s %s", f.GaiadBinary, f.GaiadHome, moniker)
_, stderr := tests.ExecuteT(f.T, addFlags(cmd, flags), DefaultKeyPass)
var chainID string
var initRes map[string]json.RawMessage
err := json.Unmarshal([]byte(stderr), &initRes)
require.NoError(f.T, err)
err = json.Unmarshal(initRes["chain_id"], &chainID)
require.NoError(f.T, err)
f.ChainID = chainID
}
// AddGenesisAccount is gaiad add-genesis-account
func (f *Fixtures) AddGenesisAccount(address sdk.AccAddress, coins sdk.Coins, flags ...string) {
cmd := fmt.Sprintf("%s add-genesis-account %s %s --home=%s", f.GaiadBinary, address, coins, f.GaiadHome)
executeWriteCheckErr(f.T, addFlags(cmd, flags))
}
// GenTx is gaiad gentx
func (f *Fixtures) GenTx(name string, flags ...string) {
cmd := fmt.Sprintf("%s gentx --name=%s --home=%s --home-client=%s", f.GaiadBinary, name, f.GaiadHome, f.GaiacliHome)
executeWriteCheckErr(f.T, addFlags(cmd, flags), DefaultKeyPass)
}
// CollectGenTxs is gaiad collect-gentxs
func (f *Fixtures) CollectGenTxs(flags ...string) {
cmd := fmt.Sprintf("%s collect-gentxs --home=%s", f.GaiadBinary, f.GaiadHome)
executeWriteCheckErr(f.T, addFlags(cmd, flags), DefaultKeyPass)
}
// GDStart runs gaiad start with the appropriate flags and returns a process
func (f *Fixtures) GDStart(flags ...string) *tests.Process {
cmd := fmt.Sprintf("%s start --home=%s --rpc.laddr=%v --p2p.laddr=%v", f.GaiadBinary, f.GaiadHome, f.RPCAddr, f.P2PAddr)
proc := tests.GoExecuteTWithStdout(f.T, addFlags(cmd, flags))
tests.WaitForTMStart(f.Port)
tests.WaitForNextNBlocksTM(1, f.Port)
return proc
}
// GDTendermint returns the results of gaiad tendermint [query]
func (f *Fixtures) GDTendermint(query string) string {
cmd := fmt.Sprintf("%s tendermint %s --home=%s", f.GaiadBinary, query, f.GaiadHome)
success, stdout, stderr := executeWriteRetStdStreams(f.T, cmd)
require.Empty(f.T, stderr)
require.True(f.T, success)
return strings.TrimSpace(stdout)
}
// ValidateGenesis runs gaiad validate-genesis
func (f *Fixtures) ValidateGenesis() {
cmd := fmt.Sprintf("%s validate-genesis --home=%s", f.GaiadBinary, f.GaiadHome)
executeWriteCheckErr(f.T, cmd)
}
// gentlemint: Add default system accounts to genesis
func (f *Fixtures) AddGenesisAuthorityAccount(address sdk.AccAddress, flags ...string) {
cmd := fmt.Sprintf("%s add-genesis-authority %s --home=%s", f.GaiadBinary, address, f.GaiadHome)
executeWriteCheckErr(f.T, addFlags(cmd, flags))
}
func (f *Fixtures) AddGenesisTreasurerAccount(address sdk.AccAddress, flags ...string) {
cmd := fmt.Sprintf("%s add-genesis-treasurer %s --home=%s", f.GaiadBinary, address, f.GaiadHome)
executeWriteCheckErr(f.T, addFlags(cmd, flags))
}
func (f *Fixtures) AddGenesisValidatorAccount(address sdk.AccAddress, flags ...string) {
cmd := fmt.Sprintf("%s add-genesis-validator %s --home=%s", f.GaiadBinary, address, f.GaiadHome)
executeWriteCheckErr(f.T, addFlags(cmd, flags))
}
func (f *Fixtures) AddGenesisOperatorAccount(address sdk.AccAddress, flags ...string) {
cmd := fmt.Sprintf("%s add-genesis-account-operator %s --home=%s", f.GaiadBinary, address, f.GaiadHome)
executeWriteCheckErr(f.T, addFlags(cmd, flags))
}
//___________________________________________________________________________________
// gaiacli keys
// KeysDelete is gaiacli keys delete
func (f *Fixtures) KeysDelete(name string, flags ...string) {
cmd := fmt.Sprintf("%s keys delete --home=%s %s", f.GaiacliBinary, f.GaiacliHome, name)
executeWrite(f.T, addFlags(cmd, append(append(flags, "-y"), "-f")))
}
// KeysAdd is gaiacli keys add
func (f *Fixtures) KeysAdd(name string, flags ...string) {
cmd := fmt.Sprintf("%s keys add --home=%s %s", f.GaiacliBinary, f.GaiacliHome, name)
executeWriteCheckErr(f.T, addFlags(cmd, flags), DefaultKeyPass)
}
// KeysAddRecover prepares gaiacli keys add --recover
func (f *Fixtures) KeysAddRecover(name, mnemonic string, flags ...string) (exitSuccess bool, stdout, stderr string) {
cmd := fmt.Sprintf("%s keys add --home=%s --recover %s", f.GaiacliBinary, f.GaiacliHome, name)
return executeWriteRetStdStreams(f.T, addFlags(cmd, flags), DefaultKeyPass, mnemonic)
}
// KeysAddRecoverHDPath prepares gaiacli keys add --recover --account --index
func (f *Fixtures) KeysAddRecoverHDPath(name, mnemonic string, account uint32, index uint32, flags ...string) {
cmd := fmt.Sprintf("%s keys add --home=%s --recover %s --account %d --index %d", f.GaiacliBinary, f.GaiacliHome, name, account, index)
executeWriteCheckErr(f.T, addFlags(cmd, flags), DefaultKeyPass, mnemonic)
}
// KeysShow is gaiacli keys show
func (f *Fixtures) KeysShow(name string, flags ...string) keys.KeyOutput {
cmd := fmt.Sprintf("%s keys show --home=%s %s", f.GaiacliBinary, f.GaiacliHome, name)
out, _ := tests.ExecuteT(f.T, addFlags(cmd, flags), "")
var ko keys.KeyOutput
err := clientkeys.UnmarshalJSON([]byte(out), &ko)
require.NoError(f.T, err)
return ko
}
// KeyAddress returns the SDK account address from the key
func (f *Fixtures) KeyAddress(name string) sdk.AccAddress {
ko := f.KeysShow(name)
accAddr, err := sdk.AccAddressFromBech32(ko.Address)
require.NoError(f.T, err)
return accAddr
}
//___________________________________________________________________________________
// gaiacli config
// CLIConfig is gaiacli config
func (f *Fixtures) CLIConfig(key, value string, flags ...string) {
cmd := fmt.Sprintf("%s config --home=%s %s %s", f.GaiacliBinary, f.GaiacliHome, key, value)
executeWriteCheckErr(f.T, addFlags(cmd, flags))
}
//___________________________________________________________________________________
// gaiacli tx send/sign/broadcast
// TxSend is gaiacli tx send
func (f *Fixtures) TxSend(from string, to sdk.AccAddress, amount sdk.Coin, flags ...string) (bool, string, string) {
cmd := fmt.Sprintf("%s tx send %s %s %s %v", f.GaiacliBinary, from, to, amount, f.Flags())
return executeWriteRetStdStreams(f.T, addFlags(cmd, flags), DefaultKeyPass)
}
// TxSign is gaiacli tx sign
func (f *Fixtures) TxSign(signer, fileName string, flags ...string) (bool, string, string) {
cmd := fmt.Sprintf("%s tx sign %v --from=%s %v", f.GaiacliBinary, f.Flags(), signer, fileName)
return executeWriteRetStdStreams(f.T, addFlags(cmd, flags), DefaultKeyPass)
}
// TxBroadcast is gaiacli tx broadcast
func (f *Fixtures) TxBroadcast(fileName string, flags ...string) (bool, string, string) {
cmd := fmt.Sprintf("%s tx broadcast %v %v", f.GaiacliBinary, f.Flags(), fileName)
return executeWriteRetStdStreams(f.T, addFlags(cmd, flags), DefaultKeyPass)
}
// TxEncode is gaiacli tx encode
func (f *Fixtures) TxEncode(fileName string, flags ...string) (bool, string, string) {
cmd := fmt.Sprintf("%s tx encode %v %v", f.GaiacliBinary, f.Flags(), fileName)
return executeWriteRetStdStreams(f.T, addFlags(cmd, flags), DefaultKeyPass)
}
// TxMultisign is gaiacli tx multisign
func (f *Fixtures) TxMultisign(fileName, name string, signaturesFiles []string,
flags ...string) (bool, string, string) {
cmd := fmt.Sprintf("%s tx multisign %v %s %s %s", f.GaiacliBinary, f.Flags(),
fileName, name, strings.Join(signaturesFiles, " "),
)
return executeWriteRetStdStreams(f.T, cmd)
}
//___________________________________________________________________________________
// gaiacli tx staking
// TxStakingCreateValidator is gaiacli tx staking create-validator
func (f *Fixtures) TxStakingCreateValidator(from, consPubKey string, amount sdk.Coin, flags ...string) (bool, string, string) {
cmd := fmt.Sprintf("%s tx staking create-validator %v --from=%s --pubkey=%s", f.GaiacliBinary, f.Flags(), from, consPubKey)
cmd += fmt.Sprintf(" --amount=%v --moniker=%v --commission-rate=%v", amount, from, "0.05")
cmd += fmt.Sprintf(" --commission-max-rate=%v --commission-max-change-rate=%v", "0.20", "0.10")
cmd += fmt.Sprintf(" --min-self-delegation=%v", "1")
return executeWriteRetStdStreams(f.T, addFlags(cmd, flags), DefaultKeyPass)
}
// TxStakingUnbond is gaiacli tx staking unbond
func (f *Fixtures) TxStakingUnbond(from, shares string, validator sdk.ValAddress, flags ...string) bool {
cmd := fmt.Sprintf("%s tx staking unbond %s %v --from=%s %v", f.GaiacliBinary, validator, shares, from, f.Flags())
return executeWrite(f.T, addFlags(cmd, flags), DefaultKeyPass)
}
//___________________________________________________________________________________
// gaiacli tx gov
// TxGovSubmitProposal is gaiacli tx gov submit-proposal
func (f *Fixtures) TxGovSubmitProposal(from, typ, title, description string, deposit sdk.Coin, flags ...string) (bool, string, string) {
cmd := fmt.Sprintf("%s tx gov submit-proposal %v --from=%s --type=%s", f.GaiacliBinary, f.Flags(), from, typ)
cmd += fmt.Sprintf(" --title=%s --description=%s --deposit=%s", title, description, deposit)
return executeWriteRetStdStreams(f.T, addFlags(cmd, flags), DefaultKeyPass)
}
// TxGovDeposit is gaiacli tx gov deposit
func (f *Fixtures) TxGovDeposit(proposalID int, from string, amount sdk.Coin, flags ...string) (bool, string, string) {
cmd := fmt.Sprintf("%s tx gov deposit %d %s --from=%s %v", f.GaiacliBinary, proposalID, amount, from, f.Flags())
return executeWriteRetStdStreams(f.T, addFlags(cmd, flags), DefaultKeyPass)
}
// TxGovVote is gaiacli tx gov vote
func (f *Fixtures) TxGovVote(proposalID int, option gov.VoteOption, from string, flags ...string) (bool, string, string) {
cmd := fmt.Sprintf("%s tx gov vote %d %s --from=%s %v", f.GaiacliBinary, proposalID, option, from, f.Flags())
return executeWriteRetStdStreams(f.T, addFlags(cmd, flags), DefaultKeyPass)
}
// TxGovSubmitParamChangeProposal executes a CLI parameter change proposal
// submission.
func (f *Fixtures) TxGovSubmitParamChangeProposal(
from, proposalPath string, deposit sdk.Coin, flags ...string,
) (bool, string, string) {
cmd := fmt.Sprintf(
"%s tx gov submit-proposal param-change %s --from=%s %v",
f.GaiacliBinary, proposalPath, from, f.Flags(),
)
return executeWriteRetStdStreams(f.T, addFlags(cmd, flags), DefaultKeyPass)
}
// TxGovSubmitCommunityPoolSpendProposal executes a CLI community pool spend proposal
// submission.
func (f *Fixtures) TxGovSubmitCommunityPoolSpendProposal(
from, proposalPath string, deposit sdk.Coin, flags ...string,
) (bool, string, string) {
cmd := fmt.Sprintf(
"%s tx gov submit-proposal community-pool-spend %s --from=%s %v",
f.GaiacliBinary, proposalPath, from, f.Flags(),
)
return executeWriteRetStdStreams(f.T, addFlags(cmd, flags), DefaultKeyPass)
}
//___________________________________________________________________________________
// gaiacli query account
// QueryAccount is gaiacli query account
func (f *Fixtures) QueryAccount(address sdk.AccAddress, flags ...string) auth.BaseAccount {
cmd := fmt.Sprintf("%s query account %s %v", f.GaiacliBinary, address, f.Flags())
out, _ := tests.ExecuteT(f.T, addFlags(cmd, flags), "")
var initRes map[string]json.RawMessage
err := json.Unmarshal([]byte(out), &initRes)
require.NoError(f.T, err, "out %v, err %v", out, err)
value := initRes["value"]
var acc auth.BaseAccount
cdc := codec.New()
codec.RegisterCrypto(cdc)
err = cdc.UnmarshalJSON(value, &acc)
require.NoError(f.T, err, "value %v, err %v", string(value), err)
return acc
}
//___________________________________________________________________________________
// gaiacli query txs
// QueryTxs is gaiacli query txs
func (f *Fixtures) QueryTxs(page, limit int, events ...string) *sdk.SearchTxsResult {
cmd := fmt.Sprintf("%s query txs --page=%d --limit=%d --events='%s' %v", f.GaiacliBinary, page, limit, queryEvents(events), f.Flags())
out, _ := tests.ExecuteT(f.T, cmd, "")
var result sdk.SearchTxsResult
cdc := app.MakeCodec()
err := cdc.UnmarshalJSON([]byte(out), &result)
require.NoError(f.T, err, "out %v\n, err %v", out, err)
return &result
}
// QueryTxsInvalid query txs with wrong parameters and compare expected error
func (f *Fixtures) QueryTxsInvalid(expectedErr error, page, limit int, tags ...string) {
cmd := fmt.Sprintf("%s query txs --page=%d --limit=%d --events='%s' %v", f.GaiacliBinary, page, limit, queryEvents(tags), f.Flags())
_, err := tests.ExecuteT(f.T, cmd, "")
require.EqualError(f.T, expectedErr, err)
}
//___________________________________________________________________________________
// gaiacli query staking
// QueryStakingValidator is gaiacli query staking validator
func (f *Fixtures) QueryStakingValidator(valAddr sdk.ValAddress, flags ...string) staking.Validator {
cmd := fmt.Sprintf("%s query staking validator %s %v", f.GaiacliBinary, valAddr, f.Flags())
out, _ := tests.ExecuteT(f.T, addFlags(cmd, flags), "")
var validator staking.Validator
cdc := app.MakeCodec()
err := cdc.UnmarshalJSON([]byte(out), &validator)
require.NoError(f.T, err, "out %v\n, err %v", out, err)
return validator
}
// QueryStakingUnbondingDelegationsFrom is gaiacli query staking unbonding-delegations-from
func (f *Fixtures) QueryStakingUnbondingDelegationsFrom(valAddr sdk.ValAddress, flags ...string) []staking.UnbondingDelegation {
cmd := fmt.Sprintf("%s query staking unbonding-delegations-from %s %v", f.GaiacliBinary, valAddr, f.Flags())
out, _ := tests.ExecuteT(f.T, addFlags(cmd, flags), "")
var ubds []staking.UnbondingDelegation
cdc := app.MakeCodec()
err := cdc.UnmarshalJSON([]byte(out), &ubds)
require.NoError(f.T, err, "out %v\n, err %v", out, err)
return ubds
}
// QueryStakingDelegationsTo is gaiacli query staking delegations-to
func (f *Fixtures) QueryStakingDelegationsTo(valAddr sdk.ValAddress, flags ...string) []staking.Delegation {
cmd := fmt.Sprintf("%s query staking delegations-to %s %v", f.GaiacliBinary, valAddr, f.Flags())
out, _ := tests.ExecuteT(f.T, addFlags(cmd, flags), "")
var delegations []staking.Delegation
cdc := app.MakeCodec()
err := cdc.UnmarshalJSON([]byte(out), &delegations)
require.NoError(f.T, err, "out %v\n, err %v", out, err)
return delegations
}
// QueryStakingPool is gaiacli query staking pool
func (f *Fixtures) QueryStakingPool(flags ...string) staking.Pool {
cmd := fmt.Sprintf("%s query staking pool %v", f.GaiacliBinary, f.Flags())
out, _ := tests.ExecuteT(f.T, addFlags(cmd, flags), "")
var pool staking.Pool
cdc := app.MakeCodec()
err := cdc.UnmarshalJSON([]byte(out), &pool)
require.NoError(f.T, err, "out %v\n, err %v", out, err)
return pool
}
// QueryStakingParameters is gaiacli query staking parameters
func (f *Fixtures) QueryStakingParameters(flags ...string) staking.Params {
cmd := fmt.Sprintf("%s query staking params %v", f.GaiacliBinary, f.Flags())
out, _ := tests.ExecuteT(f.T, addFlags(cmd, flags), "")
var params staking.Params
cdc := app.MakeCodec()
err := cdc.UnmarshalJSON([]byte(out), ¶ms)
require.NoError(f.T, err, "out %v\n, err %v", out, err)
return params
}
//___________________________________________________________________________________
// gaiacli query gov
// QueryGovParamDeposit is gaiacli query gov param deposit
func (f *Fixtures) QueryGovParamDeposit() gov.DepositParams {
cmd := fmt.Sprintf("%s query gov param deposit %s", f.GaiacliBinary, f.Flags())
out, _ := tests.ExecuteT(f.T, cmd, "")
var depositParam gov.DepositParams
cdc := app.MakeCodec()
err := cdc.UnmarshalJSON([]byte(out), &depositParam)
require.NoError(f.T, err, "out %v\n, err %v", out, err)
return depositParam
}
// QueryGovParamVoting is gaiacli query gov param voting
func (f *Fixtures) QueryGovParamVoting() gov.VotingParams {
cmd := fmt.Sprintf("%s query gov param voting %s", f.GaiacliBinary, f.Flags())
out, _ := tests.ExecuteT(f.T, cmd, "")
var votingParam gov.VotingParams
cdc := app.MakeCodec()
err := cdc.UnmarshalJSON([]byte(out), &votingParam)
require.NoError(f.T, err, "out %v\n, err %v", out, err)
return votingParam
}
// QueryGovParamTallying is gaiacli query gov param tallying
func (f *Fixtures) QueryGovParamTallying() gov.TallyParams {
cmd := fmt.Sprintf("%s query gov param tallying %s", f.GaiacliBinary, f.Flags())
out, _ := tests.ExecuteT(f.T, cmd, "")
var tallyingParam gov.TallyParams
cdc := app.MakeCodec()
err := cdc.UnmarshalJSON([]byte(out), &tallyingParam)
require.NoError(f.T, err, "out %v\n, err %v", out, err)
return tallyingParam
}
// QueryGovProposals is gaiacli query gov proposals
func (f *Fixtures) QueryGovProposals(flags ...string) gov.Proposals {
cmd := fmt.Sprintf("%s query gov proposals %v", f.GaiacliBinary, f.Flags())
stdout, stderr := tests.ExecuteT(f.T, addFlags(cmd, flags), "")
if strings.Contains(stderr, "No matching proposals found") {
return gov.Proposals{}
}
require.Empty(f.T, stderr)
var out gov.Proposals
cdc := app.MakeCodec()
err := cdc.UnmarshalJSON([]byte(stdout), &out)
require.NoError(f.T, err)
return out
}
// QueryGovProposal is gaiacli query gov proposal
func (f *Fixtures) QueryGovProposal(proposalID int, flags ...string) gov.Proposal {
cmd := fmt.Sprintf("%s query gov proposal %d %v", f.GaiacliBinary, proposalID, f.Flags())
out, _ := tests.ExecuteT(f.T, addFlags(cmd, flags), "")
var proposal gov.Proposal
cdc := app.MakeCodec()
err := cdc.UnmarshalJSON([]byte(out), &proposal)
require.NoError(f.T, err, "out %v\n, err %v", out, err)
return proposal
}
// QueryGovVote is gaiacli query gov vote
func (f *Fixtures) QueryGovVote(proposalID int, voter sdk.AccAddress, flags ...string) gov.Vote {
cmd := fmt.Sprintf("%s query gov vote %d %s %v", f.GaiacliBinary, proposalID, voter, f.Flags())
out, _ := tests.ExecuteT(f.T, addFlags(cmd, flags), "")
var vote gov.Vote
cdc := app.MakeCodec()
err := cdc.UnmarshalJSON([]byte(out), &vote)
require.NoError(f.T, err, "out %v\n, err %v", out, err)
return vote
}
// QueryGovVotes is gaiacli query gov votes
func (f *Fixtures) QueryGovVotes(proposalID int, flags ...string) []gov.Vote {
cmd := fmt.Sprintf("%s query gov votes %d %v", f.GaiacliBinary, proposalID, f.Flags())
out, _ := tests.ExecuteT(f.T, addFlags(cmd, flags), "")
var votes []gov.Vote
cdc := app.MakeCodec()
err := cdc.UnmarshalJSON([]byte(out), &votes)
require.NoError(f.T, err, "out %v\n, err %v", out, err)
return votes
}
// QueryGovDeposit is gaiacli query gov deposit
func (f *Fixtures) QueryGovDeposit(proposalID int, depositor sdk.AccAddress, flags ...string) gov.Deposit {
cmd := fmt.Sprintf("%s query gov deposit %d %s %v", f.GaiacliBinary, proposalID, depositor, f.Flags())
out, _ := tests.ExecuteT(f.T, addFlags(cmd, flags), "")
var deposit gov.Deposit
cdc := app.MakeCodec()
err := cdc.UnmarshalJSON([]byte(out), &deposit)
require.NoError(f.T, err, "out %v\n, err %v", out, err)
return deposit
}
// QueryGovDeposits is gaiacli query gov deposits
func (f *Fixtures) QueryGovDeposits(propsalID int, flags ...string) []gov.Deposit {
cmd := fmt.Sprintf("%s query gov deposits %d %v", f.GaiacliBinary, propsalID, f.Flags())
out, _ := tests.ExecuteT(f.T, addFlags(cmd, flags), "")
var deposits []gov.Deposit
cdc := app.MakeCodec()
err := cdc.UnmarshalJSON([]byte(out), &deposits)
require.NoError(f.T, err, "out %v\n, err %v", out, err)
return deposits
}
//___________________________________________________________________________________
// query slashing
// QuerySigningInfo returns the signing info for a validator
func (f *Fixtures) QuerySigningInfo(val string) slashing.ValidatorSigningInfo {
cmd := fmt.Sprintf("%s query slashing signing-info %s %s", f.GaiacliBinary, val, f.Flags())
res, errStr := tests.ExecuteT(f.T, cmd, "")
require.Empty(f.T, errStr)
cdc := app.MakeCodec()
var sinfo slashing.ValidatorSigningInfo
err := cdc.UnmarshalJSON([]byte(res), &sinfo)
require.NoError(f.T, err)
return sinfo
}
// QuerySlashingParams is gaiacli query slashing params
func (f *Fixtures) QuerySlashingParams() slashing.Params {
cmd := fmt.Sprintf("%s query slashing params %s", f.GaiacliBinary, f.Flags())
res, errStr := tests.ExecuteT(f.T, cmd, "")
require.Empty(f.T, errStr)
cdc := app.MakeCodec()
var params slashing.Params
err := cdc.UnmarshalJSON([]byte(res), ¶ms)
require.NoError(f.T, err)
return params
}
//___________________________________________________________________________________
// query distribution
// QueryRewards returns the rewards of a delegator
func (f *Fixtures) QueryRewards(delAddr sdk.AccAddress, flags ...string) distribution.QueryDelegatorTotalRewardsResponse {
cmd := fmt.Sprintf("%s query distribution rewards %s %s", f.GaiacliBinary, delAddr, f.Flags())
res, errStr := tests.ExecuteT(f.T, cmd, "")
require.Empty(f.T, errStr)
cdc := app.MakeCodec()
var rewards distribution.QueryDelegatorTotalRewardsResponse
err := cdc.UnmarshalJSON([]byte(res), &rewards)
require.NoError(f.T, err)
return rewards
}
//___________________________________________________________________________________
// query supply
// QueryTotalSupply returns the total supply of coins
func (f *Fixtures) QueryTotalSupply(flags ...string) (totalSupply sdk.Coins) {
cmd := fmt.Sprintf("%s query supply total %s", f.GaiacliBinary, f.Flags())
res, errStr := tests.ExecuteT(f.T, cmd, "")
require.Empty(f.T, errStr)
cdc := app.MakeCodec()
err := cdc.UnmarshalJSON([]byte(res), &totalSupply)
require.NoError(f.T, err)
return totalSupply
}
// QueryTotalSupplyOf returns the total supply of a given coin denom
func (f *Fixtures) QueryTotalSupplyOf(denom string, flags ...string) sdk.Int {
cmd := fmt.Sprintf("%s query supply total %s %s", f.GaiacliBinary, denom, f.Flags())
res, errStr := tests.ExecuteT(f.T, cmd, "")
require.Empty(f.T, errStr)
cdc := app.MakeCodec()
var supplyOf sdk.Int
err := cdc.UnmarshalJSON([]byte(res), &supplyOf)
require.NoError(f.T, err)
return supplyOf
}
//___________________________________________________________________________________
//
func (f *Fixtures) ExportGenesis(flags ...string) (*tmtypes.GenesisDoc, error) {
cmd := fmt.Sprintf("%s export --home=%s", f.GaiadBinary, f.GaiadHome)
_, res, _ := executeWriteRetStdStreams(f.T, addFlags(cmd, flags), DefaultKeyPass)
return types.GenesisDocFromJSON([]byte(res))
}
//___________________________________________________________________________________
// executors
func executeWriteCheckErr(t *testing.T, cmdStr string, writes ...string) {
require.True(t, executeWrite(t, cmdStr, writes...))
}
func executeWrite(t *testing.T, cmdStr string, writes ...string) (exitSuccess bool) {
exitSuccess, _, _ = executeWriteRetStdStreams(t, cmdStr, writes...)
return
}
func executeWriteRetStdStreams(t *testing.T, cmdStr string, writes ...string) (bool, string, string) {
proc := tests.GoExecuteT(t, cmdStr)
// Enables use of interactive commands
for _, write := range writes {
_, err := proc.StdinPipe.Write([]byte(write + "\n"))
require.NoError(t, err)
}
// Read both stdout and stderr from the process
stdout, stderr, err := proc.ReadAll()
if err != nil {
fmt.Println("Err on proc.ReadAll()", err, cmdStr)
}
// Log output.
if len(stdout) > 0 {
t.Log("Stdout:", string(stdout))
}
if len(stderr) > 0 {
t.Log("Stderr:", string(stderr))
}
// Wait for process to exit
proc.Wait()
// Return succes, stdout, stderr
return proc.ExitState.Success(), string(stdout), string(stderr)
}
//___________________________________________________________________________________
// utils
func addFlags(cmd string, flags []string) string {
for _, f := range flags {
cmd += " " + f
}
return strings.TrimSpace(cmd)
}
func queryEvents(events []string) (out string) {
for _, event := range events {
out += event + "&"
}
return strings.TrimSuffix(out, "&")
}
// Write the given string to a new temporary file
func WriteToNewTempFile(t *testing.T, s string) *os.File {
fp, err := ioutil.TempFile(os.TempDir(), "cosmos_cli_test_")
require.Nil(t, err)
_, err = fp.WriteString(s)
require.Nil(t, err)
return fp
}
func marshalStdTx(t *testing.T, stdTx auth.StdTx) []byte {
cdc := app.MakeCodec()
bz, err := cdc.MarshalBinaryBare(stdTx)
require.NoError(t, err)
return bz
}
func unmarshalStdTx(t *testing.T, s string) (stdTx auth.StdTx) {
cdc := app.MakeCodec()
require.Nil(t, cdc.UnmarshalJSON([]byte(s), &stdTx))
return
}
func createRandomAddr(amount int) (prvs []secp256k1.PrivKeySecp256k1, addrs []sdk.AccAddress) {
for i := 0; i < amount; i++ {
prv := secp256k1.GenPrivKey()
addr := sdk.AccAddress(prv.PubKey().Address())
addrs = append(addrs, addr)
prvs = append(prvs, prv)
}
return
}
| [
"\"BUILDDIR\""
]
| []
| [
"BUILDDIR"
]
| [] | ["BUILDDIR"] | go | 1 | 0 | |
RecommendMovie/asgi.py | """
ASGI config for RecommendMovie project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'RecommendMovie.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
build/rust/build_rustc_target.py | #!/usr/bin/env python2.7
# Copyright 2018 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import os
import subprocess
import sys
TERM_COLOR_RED = '\033[91m'
TERM_COLOR_END = '\033[0m'
# Updates the path of the main target in the depfile to the relative path
# from base_path build_output_path
def fix_depfile(depfile_path, base_path, build_output_path):
with open(depfile_path, "r") as depfile:
content = depfile.read()
content_split = content.split(': ', 1)
target_path = os.path.relpath(build_output_path, start=base_path)
new_content = "%s: %s" % (target_path, content_split[1])
with open(depfile_path, "w") as depfile:
depfile.write(new_content)
# Creates the directory containing the given file.
def create_base_directory(file):
path = os.path.dirname(file)
try:
os.makedirs(path)
except os.error:
# Already existed.
pass
# Starts the given command and returns the newly created job.
def start_command(args, env):
return subprocess.Popen(args, env=env, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def main():
parser = argparse.ArgumentParser("Compiles a Rust crate")
parser.add_argument("--rustc",
help="Path to rustc",
required=True)
# This forces a recompile when the CIPD version changes. The value is unused.
parser.add_argument("--cipd-version",
help="CIPD version of Rust toolchain",
required=False)
parser.add_argument("--crate-root",
help="Path to source directory",
required=True)
parser.add_argument("--crate-type",
help="Type of crate to build",
required=True,
choices=["bin", "rlib", "staticlib", "proc-macro"])
parser.add_argument("--crate-name",
help="Name of crate to build",
required=True)
parser.add_argument("--edition",
help="Edition of rust to use when compiling the crate",
required=True,
choices=["2015", "2018"])
parser.add_argument("--opt-level",
help="Optimization level",
required=True,
choices=["0", "1", "2", "3", "s", "z"])
parser.add_argument("--lto",
help="Use LTO",
required=False,
choices=["thin", "fat"])
parser.add_argument("--output-file",
help="Path at which the output file should be stored",
required=True)
parser.add_argument("--depfile",
help="Path at which the output depfile should be stored",
required=True)
parser.add_argument("--test",
action="store_true",
help="Whether to build the target in test configuration",
default=False)
parser.add_argument("--root-out-dir",
help="Root output dir on which depfile paths should be rebased",
required=True)
parser.add_argument("--target",
help="Target for which this crate is being compiled",
required=True)
parser.add_argument("--cmake-dir",
help="Path to the directory containing cmake",
required=True)
parser.add_argument("--clang_prefix",
help="Path to the clang prefix",
required=True)
parser.add_argument("--clang-resource-dir",
help="Path to the clang resource dir",
required=True)
parser.add_argument("--sysroot",
help="Path to the sysroot",
required=True)
parser.add_argument("--lib-dir",
help="Link path for binary libraries",
action='append', default=[])
parser.add_argument("--lib-dir-file",
help="File of --lib-dir directory names, one per line")
parser.add_argument("--first-party-crate-root",
help="Path to directory containing the libs for first-party dependencies",
required=True)
parser.add_argument("--third-party-crate-root",
help="Path to directory containing the libs for third-party dependencies",
required=True)
parser.add_argument("--dep-data",
action="append",
help="Path to metadata from a crate dependency",
required=False)
parser.add_argument("--mmacosx-version-min",
help="Select macosx framework version",
required=False)
parser.add_argument("--symbol-level",
help="Symbols to include (0=none, 1=minimal, 2=full)",
choices=["0", "1", "2"],
required=True)
parser.add_argument("--cap-lints",
help="Maximum error promotion for lints",
choices=["deny", "allow", "warn"],
required=True)
parser.add_argument("--unstable-rust-feature",
help="Unstable Rust feature to allow",
action="append",
dest="unstable_rust_features",
required=False)
parser.add_argument("--feature",
help="Feature to enable",
action="append",
dest="features",
required=False)
parser.add_argument("--remap-path-prefix",
help="Remap source names in output",
action="append",
required=False)
parser.add_argument("--mac-host",
help="Whether or not the host is a Mac",
default=False,
action="store_true",
required=False)
parser.add_argument
args = parser.parse_args()
env = os.environ.copy()
env["CC"] = os.path.join(args.clang_prefix, "clang")
env["CXX"] = os.path.join(args.clang_prefix, "clang++")
env["AR"] = os.path.join(args.clang_prefix, "llvm-ar")
env["RANLIB"] = os.path.join(args.clang_prefix, "llvm-ranlib")
if args.cmake_dir:
env["PATH"] = "%s:%s" % (env["PATH"], args.cmake_dir)
env["RUST_BACKTRACE"] = "1"
create_base_directory(args.output_file)
if args.lib_dir_file:
with open(args.lib_dir_file) as f:
args.lib_dir += [line.strip() for line in f.readlines()]
call_args = [
args.rustc,
args.crate_root,
"-Dwarnings",
"--cap-lints",
args.cap_lints,
"--edition=%s" % args.edition,
"--crate-type=%s" % args.crate_type,
"--crate-name=%s" % args.crate_name,
"--target=%s" % args.target,
"-Copt-level=%s" % args.opt_level,
"-Cdebuginfo=%s" % args.symbol_level,
"--color=always",
"-Zallow-features=%s" % ",".join(args.unstable_rust_features or [])
]
call_args += ["-Lnative=%s" % dir for dir in args.lib_dir]
if args.test:
call_args += ["--test"]
if args.features:
for feature in args.features:
call_args += ["--cfg", "feature=\"%s\"" % feature]
if args.remap_path_prefix:
for path_prefix in args.remap_path_prefix:
call_args += ["--remap-path-prefix", path_prefix]
if args.target.endswith("fuchsia"):
call_args += [
"-L", os.path.join(args.sysroot, "lib"),
"-Clinker=%s" % os.path.join(args.clang_prefix, "lld"),
"-Clink-arg=--pack-dyn-relocs=relr",
"-Clink-arg=--sysroot=%s" % args.sysroot,
"-Clink-arg=-L%s" % os.path.join(args.sysroot, "lib"),
"-Clink-arg=-L%s" % os.path.join(args.clang_resource_dir, args.target, "lib"),
"-Clink-arg=--threads",
"-Clink-arg=-dynamic-linker=ld.so.1",
"-Clink-arg=--icf=all",
]
if args.target.startswith("aarch64"):
call_args += ["-Clink-arg=--fix-cortex-a53-843419"]
else:
call_args += [
"-Clinker=%s" % os.path.join(args.clang_prefix, "clang"),
]
if args.target.startswith("aarch64"):
call_args += ["-Clink-arg=-Wl,--fix-cortex-a53-843419"]
if args.target.endswith("linux-gnu"):
call_args += ["-Clink-arg=-Wl,--build-id"]
if not args.target.endswith("darwin"):
call_args += ["-Clink-arg=-Wl,--threads", "-Clink-arg=-Wl,--icf=all"]
if args.mmacosx_version_min:
call_args += [
"-Clink-arg=-mmacosx-version-min=%s" % args.mmacosx_version_min,
]
if args.lto:
call_args += ["-Clto=%s" % args.lto]
# calculate all the search paths we should look for for deps in cargo's output
search_path_suffixes = [
os.path.join("debug", "deps"),
os.path.join("release", "deps"),
]
targets = [
"x86_64-fuchsia",
"aarch64-fuchsia",
"x86_64-unknown-linux-gnu",
"x86_64-apple-darwin",
]
# add in e.g. x86_64-unknown-linux/release/deps
for target in targets:
search_path_suffixes += [os.path.join(target, suffix) for suffix in search_path_suffixes]
search_paths = [
args.first_party_crate_root,
args.third_party_crate_root,
]
search_paths += [os.path.join(args.third_party_crate_root, suffix) for suffix in search_path_suffixes]
for path in search_paths:
call_args += ["-L", "dependency=%s" % path]
externs = []
# Collect externs
if args.dep_data:
for data_path in args.dep_data:
if not os.path.isfile(data_path):
print TERM_COLOR_RED
print "Missing Rust target data for dependency " + data_path
print "Did you accidentally depend on a non-Rust target?"
print TERM_COLOR_END
return -1
dep_data = json.load(open(data_path))
if dep_data["third_party"]:
package_name = dep_data["package_name"]
crate = dep_data["crate_name"]
crate_type = dep_data["crate_type"]
if crate_type == "lib":
ext = ".rlib"
elif crate_type == "staticlib":
ext = ".a"
elif crate_type == "proc-macro":
if args.mac_host:
ext = ".dylib"
else:
ext = ".so"
else:
print "Unrecognized crate type: " + crate_type
return -1
filename = "lib" + crate + "-" + package_name + ext
lib_path = os.path.join(args.third_party_crate_root, filename)
if not os.path.exists(lib_path):
print TERM_COLOR_RED
print "lib not found at path: " + lib_path
print "This is a bug. Please report this to the Fuchsia Toolchain team."
print TERM_COLOR_END
return -1
else:
crate = dep_data["crate_name"]
lib_path = dep_data["lib_path"]
crate_underscore = crate.replace("-", "_")
externs.append("%s=%s" % (crate_underscore, lib_path))
# add externs to arguments
for extern in externs:
call_args += ["--extern", extern]
# Build the depfile
depfile_args = call_args + [
"-o%s" % args.depfile,
"--emit=dep-info",
]
depfile_job = start_command(depfile_args, env)
# Build the desired output
build_args = call_args + ["-o%s" % args.output_file]
build_job = start_command(build_args, env)
# Wait for build jobs to complete
stdout, stderr = depfile_job.communicate()
if stdout or stderr:
print(stdout + stderr)
if depfile_job.returncode != 0:
return depfile_job.returncode
fix_depfile(args.depfile, os.getcwd(), args.output_file)
stdout, stderr = build_job.communicate()
if stdout or stderr:
print(stdout + stderr)
if build_job.returncode != 0:
return build_job.returncode
if __name__ == '__main__':
sys.exit(main())
| []
| []
| []
| [] | [] | python | 0 | 0 | |
src/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'NAP.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
sdk-java/kar-runtime-liberty/src/main/java/com/ibm/research/kar/Kar.java | /*
* Copyright IBM Corporation 2020,2021
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.research.kar;
import java.net.URI;
import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import java.util.logging.Logger;
import javax.json.Json;
import javax.json.JsonArray;
import javax.json.JsonArrayBuilder;
import javax.json.JsonBuilderFactory;
import javax.json.JsonNumber;
import javax.json.JsonObject;
import javax.json.JsonObjectBuilder;
import javax.json.JsonValue;
import javax.ws.rs.ProcessingException;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.Status;
import com.ibm.research.kar.actor.ActorInstance;
import com.ibm.research.kar.actor.ActorRef;
import com.ibm.research.kar.actor.Reminder;
import com.ibm.research.kar.actor.Subscription;
import com.ibm.research.kar.actor.exceptions.ActorMethodInvocationException;
import com.ibm.research.kar.actor.exceptions.ActorMethodNotFoundException;
import com.ibm.research.kar.actor.exceptions.ActorMethodTimeoutException;
import com.ibm.research.kar.liberty.KarSidecar;
import com.ibm.research.kar.runtime.KarConfig;
import org.eclipse.microprofile.rest.client.RestClientBuilder;
public class Kar {
public static final String KAR_ACTOR_JSON = "application/kar+json";
public static final MediaType KAR_ACTOR_JSON_TYPE = new MediaType("application", "kar+json");
private static final Logger logger = Logger.getLogger(Kar.class.getName());
private static final KarSidecar sidecar = instantiateSidecar();
private static final JsonBuilderFactory factory = Json.createBuilderFactory(Map.of());
private Kar() {
}
private static KarSidecar instantiateSidecar() {
String port = System.getenv("KAR_RUNTIME_PORT");
if (port == null || port.trim().isEmpty()) {
logger.severe("KAR_RUNTIME_PORT is not set. Fatal misconfiguration. Forcing immediate hard exit of JVM.");
Runtime.getRuntime().halt(1);
}
String baseURIStr = "http://localhost:" + port + "/";
logger.fine("KAR Sidecar base URI is " + baseURIStr);
URI sidecarURI = URI.create(baseURIStr);
RestClientBuilder builder = RestClientBuilder.newBuilder().baseUri(sidecarURI);
KarSidecar sidecar = builder.readTimeout(KarConfig.SIDECAR_CONNECTION_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS)
.connectTimeout(KarConfig.SIDECAR_CONNECTION_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS).build(KarSidecar.class);
return sidecar;
}
private static JsonArray packArgs(JsonValue[] args) {
JsonArrayBuilder ja = factory.createArrayBuilder();
for (JsonValue a : args) {
ja.add(a);
}
return ja.build();
}
private static Object toValue(Response response) {
if (response.hasEntity()) {
MediaType type = response.getMediaType();
MediaType basicType = new MediaType(type.getType(), type.getSubtype());
if (basicType.equals(MediaType.APPLICATION_JSON_TYPE) || basicType.equals(KAR_ACTOR_JSON_TYPE)) {
return response.readEntity(JsonValue.class);
} else if (basicType.equals(MediaType.TEXT_PLAIN_TYPE)) {
return response.readEntity(String.class);
} else {
return JsonValue.NULL;
}
} else {
return JsonValue.NULL;
}
}
private static int toInt(Response response) {
if (response.hasEntity()) {
return response.readEntity(java.lang.Integer.TYPE);
} else {
return 0;
}
}
private static String responseToString(Response response) {
if (response.hasEntity()) {
MediaType type = response.getMediaType();
MediaType basicType = new MediaType(type.getType(), type.getSubtype());
if (basicType.equals(MediaType.APPLICATION_JSON_TYPE) || basicType.equals(KAR_ACTOR_JSON_TYPE)) {
return response.readEntity(JsonValue.class).toString();
} else if (basicType.equals(MediaType.TEXT_PLAIN_TYPE)) {
return response.readEntity(String.class);
}
}
return null;
}
private static Reminder[] toReminderArray(Response response) {
try {
ArrayList<Reminder> res = new ArrayList<Reminder>();
JsonArray ja = ((JsonValue) toValue(response)).asJsonArray();
for (JsonValue jv : ja) {
try {
JsonObject jo = jv.asJsonObject();
String actorType = jo.getJsonObject("Actor").getString("Type");
String actorId = jo.getJsonObject("Actor").getString("ID");
String id = jo.getString("id");
String path = jo.getString("path");
String targetTimeString = jo.getString("targetTime");
Instant targetTime = Instant.parse(targetTimeString);
Duration period = null;
if (jo.get("period") != null) {
long nanos = ((JsonNumber) jo.get("period")).longValueExact();
period = Duration.ofNanos(nanos);
}
String encodedData = jo.getString("encodedData");
Reminder r = new Reminder(Actors.ref(actorType, actorId), id, path, targetTime, period, encodedData);
res.add(r);
} catch (ClassCastException e) {
logger.warning("toReminderArray: Dropping unexpected element " + jv);
}
}
return res.toArray(new Reminder[res.size()]);
} catch (ClassCastException e) {
return new Reminder[0];
}
}
private static Subscription[] toSubscriptionArray(Response response) {
try {
ArrayList<Subscription> res = new ArrayList<Subscription>();
JsonArray ja = ((JsonValue) toValue(response)).asJsonArray();
for (JsonValue jv : ja) {
try {
JsonObject jo = jv.asJsonObject();
String actorType = jo.getJsonObject("Actor").getString("Type");
String actorId = jo.getJsonObject("Actor").getString("ID");
String id = jo.getString("id");
String path = jo.getString("path");
String topic = jo.getString("topic");
Subscription s = new Subscription(Actors.ref(actorType, actorId), id, path, topic);
res.add(s);
} catch (ClassCastException e) {
logger.warning("toReminderArray: Dropping unexpected element " + jv);
}
}
return res.toArray(new Subscription[res.size()]);
} catch (ClassCastException e) {
return new Subscription[0];
}
}
private static final class ActorRefImpl implements ActorRef {
final String type;
final String id;
ActorRefImpl(String type, String id) {
this.type = type;
this.id = id;
}
@Override
public String getType() {
return type;
}
@Override
public String getId() {
return id;
}
}
/******************
* KAR API
******************/
/**
* KAR API methods for Services
*/
public static class Services {
/*
* Lower-level REST operations on a KAR Service
*/
/**
* Synchronous REST DELETE
*
* @param service The name of the service.
* @param path The service endpoint.
* @return The response returned by the target service.
*/
public static Response delete(String service, String path) {
return sidecar.callDelete(service, path);
}
/**
* Asynchronous REST DELETE
*
* @param service The name of the service.
* @param path The service endpoint.
* @return The response returned by the target service.
*/
public static CompletionStage<Response> deleteAsync(String service, String path) {
return sidecar.callAsyncDelete(service, path);
}
/**
* Synchronous REST GET
*
* @param service The name of the service.
* @param path The service endpoint.
* @return The response returned by the target service.
*/
public static Response get(String service, String path) {
return sidecar.callGet(service, path);
}
/**
* Asynchronous REST GET
*
* @param service The name of the service.
* @param path The service endpoint.
* @return The response returned by the target service.
*/
public static CompletionStage<Response> getAsync(String service, String path) {
return sidecar.callAsyncGet(service, path);
}
/**
* Synchronous REST HEAD
*
* @param service The name of the service.
* @param path The service endpoint.
* @return The response returned by the target service.
*/
public static Response head(String service, String path) {
return sidecar.callHead(service, path);
}
/**
* Asynchronous REST HEAD
*
* @param service The name of the service.
* @param path The service endpoint.
* @return The response returned by the target service.
*/
public static CompletionStage<Response> headAsync(String service, String path) {
return sidecar.callAsyncHead(service, path);
}
/**
* Synchronous REST OPTIONS
*
* @param service The name of the service.
* @param path The service endpoint.
* @return The response returned by the target service.
*/
public static Response options(String service, String path) {
return sidecar.callOptions(service, path, JsonValue.NULL);
}
/**
* Synchronous REST OPTIONS
*
* @param service The name of the service.
* @param path The service endpoint.
* @param body The request body.
* @return The response returned by the target service.
*/
public static Response options(String service, String path, JsonValue body) {
return sidecar.callOptions(service, path, body);
}
/**
* Asynchronous REST OPTIONS
*
* @param service The name of the service.
* @param path The service endpoint.
* @return The response returned by the target service.
*/
public static CompletionStage<Response> optionsAsync(String service, String path) {
return sidecar.callAsyncOptions(service, path, JsonValue.NULL);
}
/**
* Asynchronous REST OPTIONS
*
* @param service The name of the service.
* @param path The service endpoint.
* @param body The request body.
* @return The response returned by the target service.
*/
public static CompletionStage<Response> optionsAsync(String service, String path, JsonValue body) {
return sidecar.callAsyncOptions(service, path, body);
}
/**
* Synchronous REST PATCH
*
* @param service The name of the service.
* @param path The service endpoint.
* @param body The request body.
* @return The response returned by the target service.
*/
public static Response patch(String service, String path, JsonValue body) {
return sidecar.callPatch(service, path, body);
}
/**
* Asynchronous REST PATCH
*
* @param service The name of the service.
* @param path The service endpoint.
* @param body The request body.
* @return The response returned by the target service.
*/
public static CompletionStage<Response> patchAsync(String service, String path, JsonValue body) {
return sidecar.callAsyncPatch(service, path, body);
}
/**
* Synchronous REST POST
*
* @param service The name of the service.
* @param path The service endpoint.
* @param body The request body.
* @return The response returned by the target service.
*/
public static Response post(String service, String path, JsonValue body) {
return sidecar.callPost(service, path, body);
}
/**
* Asynchronous REST POST
*
* @param service The name of the service.
* @param path The service endpoint.
* @param body The request body.
* @return The response returned by the target service.
*/
public static CompletionStage<Response> postAsync(String service, String path, JsonValue body) {
return sidecar.callAsyncPost(service, path, body);
}
/**
* Synchronous REST PUT
*
* @param service The name of the service.
* @param path The service endpoint.
* @param body The request body.
* @return The response returned by the target service.
*/
public static Response put(String service, String path, JsonValue body) {
return sidecar.callPut(service, path, body);
}
/**
* Asynchronous REST PUT
*
* @param service The name of the service.
* @param path The service endpoint.
* @param body The request body.
* @return The response returned by the target service.
*/
public static CompletionStage<Response> putAsync(String service, String path, JsonValue body) {
return sidecar.callAsyncPut(service, path, body);
}
/*
* Higher-level Service call/tell operations that hide the REST layer
*/
/**
* Asynchronous service invocation; returns as soon as the invocation has been
* initiated.
*
* @param service The name of the service to invoke.
* @param path The service endpoint to invoke.
* @param body The request body with which to invoke the service endpoint.
*/
public static void tell(String service, String path, JsonValue body) {
sidecar.tellPost(service, path, body);
}
/**
* Synchronous service invocation
*
* @param service The name of the service to invoke.
* @param path The service endpoint to invoke.
* @param body The request body with which to invoke the service endpoint.
* @return The result returned by the target service.
*/
public static Object call(String service, String path, JsonValue body) {
Response resp = sidecar.callPost(service, path, body);
return toValue(resp);
}
/**
* Aynchronous service invocation with eventual access to the result of the
* invocation
*
* @param service The name of the service to invoke.
* @param path The service endpoint to invoke.
* @param body The request body with which to invoke the service endpoint.
* @return A CompletionStage containing the result of invoking the target
* service.
*/
public static CompletionStage<Object> callAsync(String service, String path, JsonValue body) {
return sidecar.callAsyncPut(service, path, body).thenApply(response -> toValue(response));
}
}
/**
* KAR API methods for Actors
*/
public static class Actors {
/**
* Construct an ActorRef that represents a specific Actor instance.
*
* @param type The type of the Actor instance
* @param id The instance id of the Actor instance
* @return An ActorRef representing the Actor instance.
*/
public static ActorRef ref(String type, String id) {
return new ActorRefImpl(type, id);
}
/**
* Asynchronously remove all user-level and runtime state of an Actor.
*
* @param actor The Actor instance.
*/
public static void remove(ActorRef actor) {
sidecar.actorDelete(actor.getType(), actor.getId());
}
/**
* Asynchronous actor invocation; returns as soon as the invocation has been
* initiated.
*
* @param actor The target actor.
* @param path The actor method to invoke.
* @param args The arguments with which to invoke the actor method.
*/
public static void tell(ActorRef actor, String path, JsonValue... args) {
sidecar.actorTell(actor.getType(), actor.getId(), path, packArgs(args));
}
/**
* Synchronous actor invocation where the invoked method will execute as part of
* the current session.
*
* @param caller The calling actor.
* @param actor The target actor.
* @param path The actor method to invoke.
* @param args The arguments with which to invoke the actor method.
* @return The result of the invoked actor method.
*/
public static JsonValue call(ActorInstance caller, ActorRef actor, String path, JsonValue... args)
throws ActorMethodNotFoundException, ActorMethodInvocationException {
try {
Response response = sidecar.actorCall(actor.getType(), actor.getId(), path, caller.getSession(),
packArgs(args));
return callProcessResponse(response);
} catch (WebApplicationException e) {
if (e.getResponse() != null && e.getResponse().getStatus() == 404) {
String msg = responseToString(e.getResponse());
throw new ActorMethodNotFoundException(
msg != null ? msg : "Not found: " + actor.getType() + "[" + actor.getId() + "]." + path, e);
} else if (e.getResponse() != null && e.getResponse().getStatus() == 408) {
throw new ActorMethodTimeoutException(
"Method timeout: " + actor.getType() + "[" + actor.getId() + "]." + path);
} else {
throw e;
}
}
}
/**
* Synchronous actor invocation where the invoked method will execute as part of
* the specified session.
*
* @param session The session in which to execute the actor method
* @param actor The target actor.
* @param path The actor method to invoke.
* @param args The arguments with which to invoke the actor method.
* @return The result of the invoked actor method.
*/
public static JsonValue call(String session, ActorRef actor, String path, JsonValue... args)
throws ActorMethodNotFoundException, ActorMethodInvocationException, ActorMethodTimeoutException {
try {
Response response = sidecar.actorCall(actor.getType(), actor.getId(), path, session, packArgs(args));
return callProcessResponse(response);
} catch (WebApplicationException e) {
if (e.getResponse() != null && e.getResponse().getStatus() == 404) {
String msg = responseToString(e.getResponse());
throw new ActorMethodNotFoundException(
msg != null ? msg : "Not found: " + actor.getType() + "[" + actor.getId() + "]." + path, e);
} else if (e.getResponse() != null && e.getResponse().getStatus() == 408) {
throw new ActorMethodTimeoutException(
"Method timeout: " + actor.getType() + "[" + actor.getId() + "]." + path);
} else {
throw e;
}
}
}
/**
* Synchronous actor invocation where the invoked method will execute in a new
* session.
*
* @param actor The target Actor.
* @param path The actor method to invoke.
* @param args The arguments with which to invoke the actor method.
* @return The result of the invoked actor method.
*/
public static JsonValue call(ActorRef actor, String path, JsonValue... args)
throws ActorMethodNotFoundException, ActorMethodInvocationException {
try {
Response response = sidecar.actorCall(actor.getType(), actor.getId(), path, null, packArgs(args));
return callProcessResponse(response);
} catch (WebApplicationException e) {
if (e.getResponse() != null && e.getResponse().getStatus() == 404) {
String msg = responseToString(e.getResponse());
throw new ActorMethodNotFoundException(
msg != null ? msg : "Not found: " + actor.getType() + "[" + actor.getId() + "]." + path, e);
} else if (e.getResponse() != null && e.getResponse().getStatus() == 408) {
throw new ActorMethodTimeoutException(
"Method timeout: " + actor.getType() + "[" + actor.getId() + "]." + path);
} else {
throw e;
}
}
}
/**
* Asynchronous actor invocation with eventual access to the result of the
* invocation.
*
* @param actor The target Actor.
* @param path The actor method to invoke.
* @param args The arguments with which to invoke the actor method.
* @return A CompletionStage containing the response returned from the actor
* method invocation.
*/
public static CompletionStage<JsonValue> callAsync(ActorRef actor, String path, JsonValue... args) {
CompletionStage<Response> cr = sidecar.actorCallAsync(actor.getType(), actor.getId(), path, null, packArgs(args));
return cr.thenApply(r -> callProcessResponse(r));
}
// Internal helper to go from a Response to the JsonValue representing the
// result of the method (or an exception)
private static JsonValue callProcessResponse(Response response)
throws ActorMethodNotFoundException, ActorMethodInvocationException {
if (response.getStatus() == Status.OK.getStatusCode()) {
JsonObject o = ((JsonValue) toValue(response)).asJsonObject();
if (o.containsKey("error")) {
String message = o.containsKey("message") ? o.getString("message") : "Unknown error";
Throwable cause = o.containsKey("stack") ? new Throwable(o.getString("stack")) : null;
cause.setStackTrace(new StackTraceElement[0]); // avoid duplicating the stack trace where we are creating this
// dummy exception...the real stack is in the msg.
throw new ActorMethodInvocationException(message, cause);
} else {
return o.containsKey("value") ? o.get("value") : JsonValue.NULL;
}
} else if (response.getStatus() == Status.NOT_FOUND.getStatusCode()) {
if (response.hasEntity()) {
throw new ActorMethodNotFoundException(toValue(response).toString());
} else {
throw new ActorMethodNotFoundException();
}
} else if (response.getStatus() == Status.NO_CONTENT.getStatusCode()) {
return null;
} else {
throw new ProcessingException(response.getStatus() + ": " + toValue(response));
}
}
/**
* KAR API methods for Actor Reminders
*/
public static class Reminders {
/**
* Cancel all reminders for an Actor instance.
*
* @param actor The Actor instance.
* @return The number of reminders that were cancelled.
*/
public static int cancelAll(ActorRef actor) {
Response response = sidecar.actorCancelReminders(actor.getType(), actor.getId());
return toInt(response);
}
/**
* Cancel a specific reminder for an Actor instance.
*
* @param actor The Actor instance.
* @param reminderId The id of a specific reminder to cancel
* @return The number of reminders that were cancelled.
*/
public static int cancel(ActorRef actor, String reminderId) {
Response response = sidecar.actorCancelReminder(actor.getType(), actor.getId(), reminderId, true);
return toInt(response);
}
/**
* Get all reminders for an Actor instance.
*
* @param actor The Actor instance.
* @return An array of matching reminders
*/
public static Reminder[] getAll(ActorRef actor) {
Response response = sidecar.actorGetReminders(actor.getType(), actor.getId());
return toReminderArray(response);
}
/**
* Get a specific reminder for an Actor instance.
*
* @param actor The Actor instance.
* @param reminderId The id of a specific reminder to cancel
* @return An array of matching reminders
*/
public static Reminder[] get(ActorRef actor, String reminderId) {
Response response = sidecar.actorGetReminder(actor.getType(), actor.getId(), reminderId, true);
return toReminderArray(response);
}
/**
* Schedule a reminder for an Actor instance.
*
* @param actor The Actor instance.
* @param path The actor method to invoke when the reminder fires.
* @param reminderId The id of the reminder being scheduled
* @param targetTime The earliest time at which the reminder should be delivered
* @param period For periodic reminders, a String that is compatible with
* GoLang's Duration
* @param args The arguments with which to invoke the actor method.
*/
public static void schedule(ActorRef actor, String path, String reminderId, Instant targetTime, Duration period,
JsonValue... args) {
JsonObjectBuilder builder = factory.createObjectBuilder();
builder.add("path", "/" + path);
builder.add("targetTime", targetTime.toString());
if (period != null) {
// Sigh. Encode in a way that GoLang will understand since it sadly doesn't
// actually implement ISO-8601
String goPeriod = "";
if (period.toHours() > 0) {
goPeriod += period.toHours() + "h";
period.minusHours(period.toHours());
}
if (period.toMinutes() > 0) {
goPeriod += period.toMinutes() + "m";
period.minusMinutes(period.toMinutes());
}
if (period.toSeconds() > 0) {
goPeriod += period.toSeconds() + "s";
period.minusSeconds(period.toSeconds());
}
if (period.toMillis() > 0) {
goPeriod += period.toMillis() + "ms";
period.minusMillis(period.toMillis());
}
builder.add("period", goPeriod);
}
builder.add("data", packArgs(args));
JsonObject requestBody = builder.build();
sidecar.actorScheduleReminder(actor.getType(), actor.getId(), reminderId, requestBody);
}
}
/**
* KAR API methods for Actor State
*/
public static class State {
public static class ActorUpdateResult {
public final int added;
public final int removed;
ActorUpdateResult(int added, int removed) {
this.added = added;
this.removed = removed;
}
};
/**
* Get one value from an Actor's state
*
* @param actor The Actor instance.
* @param key The key to use to access the instance's state
* @return The value associated with `key`
*/
public static JsonValue get(ActorRef actor, String key) {
JsonValue value;
try {
Response resp = sidecar.actorGetState(actor.getType(), actor.getId(), key, true);
return (JsonValue) toValue(resp);
} catch (WebApplicationException e) {
value = JsonValue.NULL;
}
return value;
}
/**
* Get all of an Actor's state.
*
* @param actor The Actor instance.
* @return A map representing the Actor's state
*/
public static Map<String, JsonValue> getAll(ActorRef actor) {
Response response = sidecar.actorGetAllState(actor.getType(), actor.getId());
try {
return ((JsonValue) toValue(response)).asJsonObject();
} catch (ClassCastException e) {
return Collections.emptyMap();
}
}
/**
* Check to see if an entry exists in an Actor's state
*
* @param actor The Actor instance.
* @param key The key to check against the instance's state
* @return `true` if the actor instance has a value defined for `key`, `false`
* otherwise.
*/
public static boolean contains(ActorRef actor, String key) {
Response resp;
try {
resp = sidecar.actorHeadState(actor.getType(), actor.getId(), key);
} catch (WebApplicationException e) {
resp = e.getResponse();
}
return resp != null && resp.getStatus() == Status.OK.getStatusCode();
}
/**
* Store one value to an Actor's state
*
* @param actor The Actor instance.
* @param key The key to use to access the instance's state
* @param value The value to store
* @return The number of new state entries created by this store (0 or 1)
*/
public static int set(ActorRef actor, String key, JsonValue value) {
Response response = sidecar.actorSetState(actor.getType(), actor.getId(), key, value);
return response.getStatus() == Status.CREATED.getStatusCode() ? 1 : 0;
}
/**
* Store multiple values to an Actor's state
*
* @param actor The Actor instance.
* @param updates A map containing the state updates to perform
* @return The number of new state entries created by this operation
*/
public static int set(ActorRef actor, Map<String, JsonValue> updates) {
if (updates.isEmpty())
return 0;
ActorUpdateResult result = update(actor, Collections.emptyList(), Collections.emptyMap(), updates,
Collections.emptyMap());
return result.added;
}
/**
* Remove one value from an Actor's state
*
* @param actor The Actor instance.
* @param key The key to delete
* @return `1` if an entry was actually removed and `0` if there was no entry
* for `key`.
*/
public static int remove(ActorRef actor, String key) {
Response response = sidecar.actorDeleteState(actor.getType(), actor.getId(), key, true);
return toInt(response);
}
/**
* Remove multiple values from an Actor's state
*
* @param actor The Actor instance.
* @param keys The keys to delete
* @return the number of entries actually removed
*/
public static int removeAll(ActorRef actor, List<String> keys) {
if (keys.isEmpty())
return 0;
ActorUpdateResult res = update(actor, keys, Collections.emptyMap(), Collections.emptyMap(),
Collections.emptyMap());
return res.removed;
}
/**
* Remove all elements of an Actor's user level state. Unlike
* {@link Actors#remove} this method is synchronous and does not remove the
* KAR-level mapping of the instance to a specific runtime Process.
*
* @param actor The Actor instance.
* @return The number of removed key/value pairs
*/
public static int removeAll(ActorRef actor) {
Response response = sidecar.actorDeleteAllState(actor.getType(), actor.getId());
return toInt(response);
}
/**
* Perform a multi-element update operation on a Actor's state. This method is
* the most general form of Actor state update and enables both top-level keys
* and submap keys to be removed and updated in a single KAR operation.
*
* @param actor The Actor instance.
* @param removals The keys to remove from the actor state
* @param submapRemovals A mapping from submap names to the keys to remove from
* each submap
* @param updates The updates to perform to the actors state
* @param submapUpdates A mapping from submap names to the updates to perform
* on each submap
* @return An object containing the number of state entries removed and added by
* the update.
*/
public static ActorUpdateResult update(ActorRef actor, List<String> removals,
Map<String, List<String>> submapRemovals, Map<String, JsonValue> updates,
Map<String, Map<String, JsonValue>> submapUpdates) {
JsonObjectBuilder requestBuilder = factory.createObjectBuilder();
if (!removals.isEmpty()) {
JsonArrayBuilder jb = factory.createArrayBuilder();
for (String k : removals) {
jb.add(k);
}
requestBuilder.add("removals", jb.build());
}
if (!submapRemovals.isEmpty()) {
JsonObjectBuilder smr = factory.createObjectBuilder();
for (Entry<String, List<String>> e : submapRemovals.entrySet()) {
JsonArrayBuilder jb = factory.createArrayBuilder();
for (String k : e.getValue()) {
jb.add(k);
}
smr.add(e.getKey(), jb.build());
}
requestBuilder.add("submapremovals", smr.build());
}
if (!updates.isEmpty()) {
JsonObjectBuilder u = factory.createObjectBuilder();
for (Entry<String, JsonValue> e : updates.entrySet()) {
u.add(e.getKey(), e.getValue());
}
requestBuilder.add("updates", u.build());
}
if (!submapUpdates.isEmpty()) {
JsonObjectBuilder smu = factory.createObjectBuilder();
for (Entry<String, Map<String, JsonValue>> e : submapUpdates.entrySet()) {
JsonObjectBuilder u = factory.createObjectBuilder();
for (Entry<String, JsonValue> e2 : e.getValue().entrySet()) {
u.add(e2.getKey(), e2.getValue());
}
smu.add(e.getKey(), u.build());
}
requestBuilder.add("submapupdates", smu.build());
}
JsonObject params = requestBuilder.build();
Response response = sidecar.actorUpdate(actor.getType(), actor.getId(), params);
JsonObject responseObject = ((JsonValue) toValue(response)).asJsonObject();
int added = responseObject.getInt("added");
int removed = responseObject.getInt("removed");
return new ActorUpdateResult(added, removed);
}
/**
* KAR API methods for optimized operations for storing a map as a nested
* element of an Actor's state.
*/
public static class Submap {
/**
* Get one value from a submap of an Actor's state
*
* @param actor The Actor instance.
* @param submap The name of the submap
* @param key The subkey to use to access the instance's state
* @return The value associated with `key/subkey`
*/
public static JsonValue get(ActorRef actor, String submap, String key) {
JsonValue value;
try {
Response resp = sidecar.actorGetWithSubkeyState(actor.getType(), actor.getId(), submap, key, true);
return (JsonValue) toValue(resp);
} catch (WebApplicationException e) {
value = JsonValue.NULL;
}
return value;
}
/**
* Get all key/value pairs of the given submap
*
* @param actor The Actor instance
* @param submap The name of the submap
* @return An array containing the currently defined subkeys
*/
public static Map<String, JsonValue> getAll(ActorRef actor, String submap) {
JsonObjectBuilder jb = factory.createObjectBuilder();
jb.add("op", Json.createValue("get"));
JsonObject params = jb.build();
Response response = sidecar.actorSubmapOp(actor.getType(), actor.getId(), submap, params);
try {
return ((JsonValue) toValue(response)).asJsonObject();
} catch (ClassCastException e) {
return Collections.emptyMap();
}
}
/**
* Check to see if an entry exists in a submap in an Actor's state
*
* @param actor The Actor instance.
* @param submap The name of the submap
* @param key The key to check for in the given submap
* @return `true` if the actor instance has a value defined for `key/subkey`,
* `false` otherwise.
*/
public static boolean contains(ActorRef actor, String submap, String key) {
Response resp;
try {
resp = sidecar.actorHeadWithSubkeyState(actor.getType(), actor.getId(), submap, key);
} catch (WebApplicationException e) {
resp = e.getResponse();
}
return resp != null && resp.getStatus() == Status.OK.getStatusCode();
}
/**
* Store one value to a submap in an Actor's state
*
* @param actor The Actor instance.
* @param submap The name of the submap to update
* @param key The key in the submap to update
* @param value The value to store at `key/subkey`
* @return The number of new state entries created by this store (0 or 1)
*/
public static int set(ActorRef actor, String submap, String key, JsonValue value) {
Response response = sidecar.actorSetWithSubkeyState(actor.getType(), actor.getId(), submap, key, value);
return response.getStatus() == Status.CREATED.getStatusCode() ? 1 : 0;
}
/**
* Store multiple values to an Actor sub-map with name `key`
*
* @param actor The Actor instance.
* @param submap The name of the submap to which the updates should be
* performed
* @param updates A map containing the (subkey, value) pairs to store
* @return The number of new map entries created by this operation
*/
public static int set(ActorRef actor, String submap, Map<String, JsonValue> updates) {
if (updates.isEmpty())
return 0;
Map<String, Map<String, JsonValue>> tmp = new HashMap<String, Map<String, JsonValue>>();
tmp.put(submap, updates);
ActorUpdateResult res = update(actor, Collections.emptyList(), Collections.emptyMap(), Collections.emptyMap(),
tmp);
return res.added;
}
/**
* Remove one value from a submap in the Actor's state
*
* @param actor The Actor instance.
* @param submap The name of the submap from which to delete the key
* @param key The key of the entry to delete from the submap
* @return `1` if an entry was actually removed and `0` if there was no entry
* for `key`.
*/
public static int remove(ActorRef actor, String submap, String key) {
Response response = sidecar.actorDeleteWithSubkeyState(actor.getType(), actor.getId(), submap, key, true);
return toInt(response);
}
/**
* Remove multiple values from one submap of an Actor's state
*
* @param actor The Actor instance.
* @param submap The name of the submap from which to delete the keys
* @param keys The keys to delete
* @return the number of entries actually removed
*/
public static int removeAll(ActorRef actor, String submap, List<String> keys) {
if (keys.isEmpty())
return 0;
Map<String, List<String>> tmp = new HashMap<String, List<String>>();
tmp.put(submap, keys);
ActorUpdateResult res = update(actor, Collections.emptyList(), tmp, Collections.emptyMap(),
Collections.emptyMap());
return res.removed;
}
/**
* Remove all values from a submap in the Actor's state.
*
* @param actor The Actor instance
* @param submap The name of the submap
* @return The number of removed subkey entrys
*/
public static int removeAll(ActorRef actor, String submap) {
JsonObjectBuilder jb = factory.createObjectBuilder();
jb.add("op", Json.createValue("clear"));
JsonObject params = jb.build();
Response response = sidecar.actorSubmapOp(actor.getType(), actor.getId(), submap, params);
return toInt(response);
}
/**
* Get the keys of the given submap
*
* @param actor The Actor instance
* @param submap The name of the submap
* @return An array containing the currently defined subkeys
*/
public static String[] keys(ActorRef actor, String submap) {
JsonObjectBuilder jb = factory.createObjectBuilder();
jb.add("op", Json.createValue("keys"));
JsonObject params = jb.build();
Response response = sidecar.actorSubmapOp(actor.getType(), actor.getId(), submap, params);
Object[] jstrings = ((JsonValue) toValue(response)).asJsonArray().toArray();
String[] ans = new String[jstrings.length];
for (int i = 0; i < jstrings.length; i++) {
ans[i] = ((JsonValue) jstrings[i]).toString();
}
return ans;
}
/**
* Get the number of keys in the given submap
*
* @param actor The Actor instance
* @param submap The name of the submap
* @return The number of currently define keys in the submap
*/
public static int size(ActorRef actor, String submap) {
JsonObjectBuilder jb = Json.createObjectBuilder();
jb.add("op", Json.createValue("size"));
JsonObject params = jb.build();
Response response = sidecar.actorSubmapOp(actor.getType(), actor.getId(), submap, params);
return toInt(response);
}
}
}
}
/**
* KAR API methods for Eventing.
*/
public static class Events {
/**
* Cancel all subscriptions for an Actor instance.
*
* @param actor The Actor instance.
* @return The number of subscriptions that were cancelled.
*/
public static int cancelAllSubscriptions(ActorRef actor) {
Response response = sidecar.actorCancelAllSubscriptions(actor.getType(), actor.getId());
return toInt(response);
}
/**
* Cancel a specific subscription for an Actor instance.
*
* @param actor The Actor instance.
* @param subscriptionId The id of a specific subscription to cancel
* @return The number of subscriptions that were cancelled.
*/
public static int cancelSubscription(ActorRef actor, String subscriptionId) {
Response response = sidecar.actorCancelSubscription(actor.getType(), actor.getId(), subscriptionId);
return toInt(response);
}
/**
* Get all subscriptions for an Actor instance.
*
* @param actor The Actor instance.
* @return An array of subscriptions
*/
public static Subscription[] getSubscriptions(ActorRef actor) {
Response response = sidecar.actorGetAllSubscriptions(actor.getType(), actor.getId());
return toSubscriptionArray(response);
}
/**
* Get a specific subscription for an Actor instance.
*
* @param actor The Actor instance.
* @param subscriptionId The id of a specific subscription to get
* @return An array of zero or one subscription
*/
public static Subscription[] getSubscription(ActorRef actor, String subscriptionId) {
Response response = sidecar.actorGetSubscription(actor.getType(), actor.getId(), subscriptionId);
return toSubscriptionArray(response);
}
/**
* Subscribe an Actor instance method to a topic.
*
* @param actor The Actor instance to subscribe
* @param path The actor method to invoke on each event received on the topic
* @param topic The topic to which to subscribe
*/
public static void subscribe(ActorRef actor, String path, String topic) {
subscribe(actor, path, topic, topic);
}
/**
* Subscribe an Actor instance method to a topic.
*
* @param actor The Actor instance to subscribe
* @param path The actor method to invoke on each event received on
* the topic
* @param topic The topic to which to subscribe
* @param subscriptionId The subscriptionId to use for this subscription
*/
public static void subscribe(ActorRef actor, String path, String topic, String subscriptionId) {
JsonObjectBuilder builder = factory.createObjectBuilder();
builder.add("path", "/" + path);
builder.add("topic", topic);
JsonObject data = builder.build();
sidecar.actorSubscribe(actor.getType(), actor.getId(), subscriptionId, data);
}
/**
* Create a topic using the default Kafka configuration options.
*
* @param topic The name of the topic to create
*/
public static void createTopic(String topic) {
sidecar.eventCreateTopic(topic, JsonValue.EMPTY_JSON_OBJECT);
}
/**
* Delete a topic.
*
* @param topic the name of the topic to delete
*/
public static void deleteTopic(String topic) {
sidecar.eventDeleteTopic(topic);
}
/**
* Publish an event on a topic.
*
* @param topic the name of the topic on which to publish
* @param event the event to publish
*/
public static void publish(String topic, JsonValue event) {
sidecar.eventPublish(topic, event);
}
}
/**
* KAR API methods for directly interacting with the KAR service mesh
*/
public static class Sys {
/**
* Shutdown this sidecar. Does not return.
*/
public static void shutdown() {
sidecar.shutdown();
}
/**
* Get information about a system component.
*
* @param component The component whose information is being requested
* @return information about the given component
*/
public static Object information(String component) {
Response response = sidecar.systemInformation(component);
return toValue(response);
}
}
}
| [
"\"KAR_RUNTIME_PORT\""
]
| []
| [
"KAR_RUNTIME_PORT"
]
| [] | ["KAR_RUNTIME_PORT"] | java | 1 | 0 | |
apogee/tools/path.py | ##################################################################################
#
# apogee.tools.path: return the path of various APOGEE data files
#
# This file depends on various environment variables that should be set:
#
# - SDSS_LOCAL_SAS_MIRROR: top-level directory with data
# - RESULTS_VERS: APOGEE reduction version (e.g., v304 for DR10)
# - APOGEE_APOKASC_REDUX: APOKASC catalog version
#
# contains:
#
# - allStarPath: the path of the allStar file
# - allVisitPath: the path of the allStar file
# - apogeeDesignPath: path of the apogeeDesign file
# - apogeeFieldPath: path of the apogeeField file
# - apogeeObjectPath: path of an apogeeObject file
# - apogeePlatePlate: path of the apogeePlate file
# - apokascPath: path of the APOKASC catalog
# - distPath: path of the file that has APOGEE distances
# - obslogPath: path of the observation log
# - rcsamplePath: path of the red clump sample file
# - apStarPath: path of a apStar file
# - aspcapStarPath: path of a aspcapStar file
# - apallPath: the path of the apall file (an early version of
# allStar by JB, now deprecated)
#
##################################################################################
import os, os.path
import numpy
import warnings
_APOGEE_DATA= os.getenv('SDSS_LOCAL_SAS_MIRROR')
if _APOGEE_DATA is None:
# Try old method
_APOGEE_DATA= os.getenv('APOGEE_DATA')
if _APOGEE_DATA is None:
raise RuntimeError("SDSS_LOCAL_SAS_MIRROR environment variable needs to be set to use the 'apogee' module")
else:
warnings.warn("APOGEE_DATA environment variable is deprecated in favor of SDSS_LOCAL_SAS_MIRROR; please update your environment",DeprecationWarning)
_APOGEE_REDUX= os.getenv('RESULTS_VERS')
if _APOGEE_REDUX is None:
_APOGEE_REDUX= os.getenv('APOGEE_REDUX')
if _APOGEE_REDUX is None:
raise RuntimeError("RESULTS_VERS environment variable needs to be set to use the 'apogee' module")
else:
warnings.warn("APOGEE_REDUX environment variable is deprecated in favor of RESULTS_VERS; please update your environment",DeprecationWarning)
_APOGEE_ASPCAP_REDUX= os.getenv('APOGEE_ASPCAP_REDUX')
_APOGEE_APOKASC_REDUX= os.getenv('APOGEE_APOKASC_REDUX')
# Reductions
_DR10REDUX='v304'
_DR11REDUX='v402'
_DR12REDUX='v603'
_DR13REDUX='l30e.2'
_CURRENTREDUX='current'
if _APOGEE_REDUX is None:
_APOGEE_REDUX= _DR12REDUX
if _APOGEE_APOKASC_REDUX is None:
_APOGEE_APOKASC_REDUX= 'v7.3'
if _APOGEE_ASPCAP_REDUX is None: #deprecated
_APOGEE_ASPCAP_REDUX= 'v0.4'
_ASPCAP= True
_CODEV= '1'
def apallPath(visit=False):
"""
NAME:
apallPath
PURPOSE:
returns the path of the relevant file
INPUT:
visit= if True, return the allVisit file, rather than the allStar file
OUTPUT:
path string
REQUIREMENTS:
environment variables APOGEE_DATA pointing to the data directory
APOGEE_REDUX with the current reduction version (e.g., v0.91)
HISTORY:
2012-01-02 - Written - Bovy (IAS)
2012-05-30 - Edited for ASPCAP - Bovy (IAS)
"""
if _CODEV == '1':
if _ASPCAP:
return os.path.join(_APOGEE_DATA,
'apall-1d-'+_APOGEE_REDUX
+'-aspcap-'+_APOGEE_ASPCAP_REDUX+'.fits')
else:
return os.path.join(_APOGEE_DATA,
'apall-'+_APOGEE_REDUX+'.fits')
elif _CODEV == '2':
if visit:
pass
else:
return os.path.join(_APOGEE_DATA,
'allStar-'+_APOGEE_ASPCAP_REDUX+'.fits')
def allStarPath(dr=None,_old=False):
"""
NAME:
allStarPath
PURPOSE:
returns the path of the relevant file
INPUT:
dr= return the path corresponding to this data release
OUTPUT:
path string
REQUIREMENTS:
environment variables APOGEE_DATA pointing to the data directory
APOGEE_REDUX with the current reduction version (e.g., v0.91)
HISTORY:
2012-01-02 - Written - Bovy (IAS)
2012-05-30 - Edited for ASPCAP - Bovy (IAS)
"""
if dr is None: dr= _default_dr()
redux= _redux_dr(dr=dr)
if _old:
return os.path.join(_APOGEE_DATA,
'allStar-%s.fits' % redux)
else:
specReduxPath= apogeeSpectroReduxDirPath(dr=dr)
if dr == '10':
return os.path.join(specReduxPath,'r3','s3','a3',
_redux_dr(dr=dr),'allStar-%s.fits' % redux)
elif dr == '12':
return os.path.join(specReduxPath,'r5','stars','l25_6d',
_redux_dr(dr=dr),'allStar-%s.fits' % redux)
elif dr == '13':
return os.path.join(specReduxPath,'r6','stars','l30e',
_redux_dr(dr=dr),'allStar-%s.fits' % redux)
elif dr == 'current':
return os.path.join(specReduxPath,'current','stars','l25_6d',
_redux_dr(dr=dr),'allStar-%s.fits' % redux)
def allVisitPath(dr=None,_old=False):
"""
NAME:
allVisitPath
PURPOSE:
returns the path of the relevant file
INPUT:
dr= return the path corresponding to this data release
OUTPUT:
path string
REQUIREMENTS:
environment variables APOGEE_DATA pointing to the data directory
APOGEE_REDUX with the current reduction version (e.g., v0.91)
HISTORY:
2012-01-02 - Written - Bovy (IAS)
2012-05-30 - Edited for ASPCAP - Bovy (IAS)
"""
if dr is None: dr= _default_dr()
redux= _redux_dr(dr=dr)
if _old:
return os.path.join(_APOGEE_DATA,
'allVisit-%s.fits' % redux)
else:
return allStarPath(dr=dr,_old=_old).replace('allStar','allVisit')
def apokascPath():
"""
NAME:
apokascPath
PURPOSE:
returns the path of the relevant file
INPUT:
(none)
OUTPUT:
path string
REQUIREMENTS:
environment variables APOGEE_DATA pointing to the data directory
APOKASC_REDUX with the current reduction version (e.g., v6.2)
HISTORY:
2012-01-02 - Written - Bovy (IAS)
2012-09-10 - Edited for APOKASC - Bovy (IAS)
"""
if _APOGEE_APOKASC_REDUX[1] == '7':
return os.path.join(_APOGEE_DATA,
'APOKASC_Catalog.'+_APOGEE_APOKASC_REDUX+'.fits')
else:
return os.path.join(_APOGEE_DATA,
'APOKASC_cat_'+_APOGEE_APOKASC_REDUX+'.fits')
def distPath(dr=None):
"""
NAME:
distPath
PURPOSE:
returns the path of the relevant file
INPUT:
(none)
OUTPUT:
path string
REQUIREMENTS:
environment variables APOGEE_DATA pointing to the data directory
APOGEE_REDUX with the current reduction version (e.g., v0.91)
HISTORY:
2012-01-02 - Written - Bovy (IAS)
2012-05-30 - Edited for ASPCAP - Bovy (IAS)
"""
if dr is None: dr= _default_dr()
redux= _redux_dr(dr=dr)
if redux.lower() == _DR12REDUX:
return os.path.join(_APOGEE_DATA,
'apogee-distances_DR12_v1.fits')
elif redux.lower() == _DR11REDUX:
return os.path.join(_APOGEE_DATA,
'allStar+-v402.130103.fits')
elif redux.lower() == 'v302' or redux.lower() == _DR10REDUX:
return os.path.join(_APOGEE_DATA,
'distmagall-'+redux+'.fits')
def rcsamplePath(dr=None,_old=False):
"""
NAME:
rcsamplePath
PURPOSE:
returns the path of the relevant file
INPUT:
dr= data reduction to load the catalog for (automatically set based on APOGEE_REDUX if not given explicitly)
OUTPUT:
path string
REQUIREMENTS:
environment variables APOGEE_DATA pointing to the data directory
APOGEE_REDUX with the current reduction version (e.g., v0.91)
HISTORY:
2012-01-02 - Written - Bovy (IAS)
2012-10-08 - Edited for rcsample - Bovy (IAS)
"""
if dr is None:
if _APOGEE_REDUX == 'v402': dr= '11'
elif _APOGEE_REDUX == 'v603': dr= '12'
elif _APOGEE_REDUX == 'l30e.2': dr= '13'
elif _APOGEE_REDUX == 'current':
return os.path.join(_APOGEE_DATA,'apogee-rc-current.fits')
else: raise IOError('No RC catalog available for the %s reduction' % _APOGEE_REDUX)
if _old:
return os.path.join(_APOGEE_DATA,
'apogee-rc-DR%s.fits' % dr)
else:
if dr == '11' or dr == '12':
return os.path.join(_APOGEE_DATA,'dr12','apogee','vac','apogee-rc',
'cat','apogee-rc-DR%s.fits' % dr)
elif dr == '13':
return os.path.join(_APOGEE_DATA,'dr13','apogee','vac','apogee-rc',
'cat','apogee-rc-DR%s.fits' % dr)
def obslogPath(year=None):
"""
NAME:
obslogPath
PURPOSE:
returns the path of the relevant file
INPUT:
year= read up to this year (None)
OUTPUT:
path string
REQUIREMENTS:
environment variables APOGEE_DATA pointing to the data directory
APOGEE_REDUX with the current reduction version (e.g., v0.91)
HISTORY:
2012-01-02 - Written - Bovy (IAS)
2012-11-04 - Edited for obslog - Bovy (IAS)
"""
if year is None:
if _APOGEE_REDUX == 'v402': year= 2
elif _APOGEE_REDUX == 'v603': year= 3
else: raise IOError('No default year available for APOGEE_REDUX %s, need to set it by hand' % _APOGEE_REDUX)
if year == 1 or year == 2:
return os.path.join(_APOGEE_DATA,
'obs-summary-year12.csv')
elif year == 3:
return os.path.join(_APOGEE_DATA,
'obs-summary-year123.csv')
def apogeeTargetDirPath(dr=None):
"""
NAME:
apogeeTargetDirPath
PURPOSE:
returns the path of the relevant directory
INPUT:
dr= return the path corresponding to this data release
OUTPUT:
path string
REQUIREMENTS:
environment variables APOGEE_DATA pointing to the data directory
APOGEE_REDUX with the current reduction version (e.g., v0.91)
HISTORY:
2012-01-02 - Written - Bovy (IAS)
2012-11-04 - Edited for apogeeTargetDir - Bovy (IAS)
"""
if dr is None: dr= _default_dr()
return os.path.join(_APOGEE_DATA,'dr%s' % dr,
'apogee','target','apogee_DR'+dr)
def apogeePlatePath(dr=None):
"""
NAME:
apogeePlatePath
PURPOSE:
returns the path of the relevant file
INPUT:
dr= return the path corresponding to this data release
OUTPUT:
path string
REQUIREMENTS:
environment variables APOGEE_DATA pointing to the data directory
APOGEE_REDUX with the current reduction version (e.g., v0.91)
HISTORY:
2012-01-02 - Written - Bovy (IAS)
2012-11-04 - Edited for apogeePlate - Bovy (IAS)
"""
if dr is None: dr= _default_dr()
if dr == '11' or dr == '12':
platename= 'apogeePlate.fits'
else:
platename= 'apogeePlate_DR%s.fits' % dr
return os.path.join(apogeeTargetDirPath(dr=dr),
platename)
def apogeeDesignPath(dr=None):
"""
NAME:
apogeeDesignPath
PURPOSE:
returns the path of the relevant file
INPUT:
dr= return the path corresponding to this data release
OUTPUT:
path string
REQUIREMENTS:
environment variables APOGEE_DATA pointing to the data directory
APOGEE_REDUX with the current reduction version (e.g., v0.91)
HISTORY:
2012-01-02 - Written - Bovy (IAS)
2012-11-04 - Edited for apogeePlate - Bovy (IAS)
"""
if dr is None: dr= _default_dr()
if dr == '11' or dr == '12':
platename= 'apogeeDesign.fits'
else:
platename= 'apogeeDesign_DR%s.fits' % dr
return os.path.join(apogeeTargetDirPath(dr=dr),
platename)
def apogeeFieldPath(dr=None):
"""
NAME:
apogeeFieldPath
PURPOSE:
returns the path of the relevant file
INPUT:
dr= return the path corresponding to this data release
OUTPUT:
path string
REQUIREMENTS:
environment variables APOGEE_DATA pointing to the data directory
APOGEE_REDUX with the current reduction version (e.g., v0.91)
HISTORY:
2012-01-02 - Written - Bovy (IAS)
2012-11-04 - Edited for apogeePlate - Bovy (IAS)
"""
if dr is None: dr= _default_dr()
if dr == '11' or dr == '12':
platename= 'apogeeField.fits'
else:
platename= 'apogeeField_DR%s.fits' % dr
return os.path.join(apogeeTargetDirPath(dr=dr),
platename)
def apogeeObjectPath(field_name,dr=None):
"""
NAME:
apogeeObjectPath
PURPOSE:
returns the path of the relevant file
INPUT:
field_name - name of the field
dr= return the path corresponding to this data release
OUTPUT:
path string
REQUIREMENTS:
environment variables APOGEE_DATA pointing to the data directory
APOGEE_REDUX with the current reduction version (e.g., v0.91)
HISTORY:
2012-01-02 - Written - Bovy (IAS)
2012-11-04 - Edited for apogeeObject - Bovy (IAS)
"""
if dr is None: dr= _default_dr()
if dr == '11' or dr == '12':
filename= 'apogeeObject_%s.fits' % field_name.strip()
else:
filename= 'apogeeObject_DR%s_%s.fits' % (dr,field_name.strip())
return os.path.join(apogeeTargetDirPath(dr=dr),
filename)
def aspcapStarPath(loc_id,apogee_id,dr=None):
"""
NAME:
aspcapStarPath
PURPOSE:
returns the path of the aspcapStar file
INPUT:
loc_id - location ID (field for 1m targets)
apogee_id - APOGEE ID of the star
dr= return the path corresponding to this data release
OUTPUT:
path string
HISTORY:
2014-11-25 - Written - Bovy (IAS)
"""
if dr is None: dr= _default_dr()
specReduxPath= apogeeSpectroReduxDirPath(dr=dr)
if dr == '10':
return os.path.join(specReduxPath,'r3','s3','a3',
_redux_dr(dr=dr),'%i' % loc_id,
'aspcapStar-%s-%s.fits' % (_redux_dr(dr=dr),
apogee_id))
elif dr == '12':
if isinstance(loc_id,str): #1m
return os.path.join(specReduxPath,'r5','stars','l25_6d',
_redux_dr(dr=dr),loc_id.strip(),
'aspcapStar-r5-%s-%s.fits' % (_redux_dr(dr=dr),
apogee_id.strip()))
elif loc_id ==1:
raise IOError('For 1m targets, give the FIELD instead of the location ID')
else:
return os.path.join(specReduxPath,'r5','stars','l25_6d',
_redux_dr(dr=dr),'%i' % loc_id,
'aspcapStar-r5-%s-%s.fits' % (_redux_dr(dr=dr),
apogee_id))
elif dr == '13':
if isinstance(loc_id,str): #1m
return os.path.join(specReduxPath,'r6','stars','l30e',
_redux_dr(dr=dr),loc_id.strip(),
'aspcapStar-r6-%s-%s.fits' % (_redux_dr(dr=dr),
apogee_id.strip()))
elif loc_id ==1:
raise IOError('For 1m targets, give the FIELD instead of the location ID')
else:
return os.path.join(specReduxPath,'r6','stars','l30e',
_redux_dr(dr=dr),'%i' % loc_id,
'aspcapStar-r6-%s-%s.fits' % (_redux_dr(dr=dr),
apogee_id))
elif dr == 'current':
if isinstance(loc_id,str): #1m
return os.path.join(specReduxPath,'current','stars','l25_6d',
_redux_dr(dr=dr),loc_id.strip(),
'aspcapStar-current-%s-%s.fits' \
% (_redux_dr(dr=dr),
apogee_id.strip()))
elif loc_id ==1:
raise IOError('For 1m targets, give the FIELD instead of the location ID')
else:
return os.path.join(specReduxPath,'current','stars','l25_6d',
_redux_dr(dr=dr),'%i' % loc_id,
'aspcapStar-current-%s-%s.fits' \
% (_redux_dr(dr=dr),
apogee_id))
def apStarPath(loc_id,apogee_id,dr=None):
"""
NAME:
apStarPath
PURPOSE:
returns the path of the apStar file
INPUT:
loc_id - location ID (field for 1m targets)
apogee_id - APOGEE ID of the star
dr= return the path corresponding to this data release
OUTPUT:
path string
HISTORY:
2015-01-13 - Written - Bovy (IAS)
"""
if dr is None: dr= _default_dr()
specReduxPath= apogeeSpectroReduxDirPath(dr=dr)
if dr == '10':
return os.path.join(specReduxPath,'r3','s3',
'%i' % loc_id,
'apStar-s3-%s.fits' % apogee_id)
elif dr == '12':
if isinstance(loc_id,str): #1m
return os.path.join(specReduxPath,'r5','stars','apo1m',
loc_id.strip(),
'apStar-r5-%s.fits' % apogee_id.strip())
elif loc_id ==1:
raise IOError('For 1m targets, give the FIELD instead of the location ID')
else:
return os.path.join(specReduxPath,'r5','stars','apo25m',
'%i' % loc_id,
'apStar-r5-%s.fits' % apogee_id)
elif dr == '13':
if isinstance(loc_id,str): #1m
return os.path.join(specReduxPath,'r6','stars','apo1m',
loc_id.strip(),
'apStar-r6-%s.fits' % apogee_id.strip())
elif loc_id ==1:
raise IOError('For 1m targets, give the FIELD instead of the location ID')
else:
return os.path.join(specReduxPath,'r6','stars','apo25m',
'%i' % loc_id,
'apStar-r6-%s.fits' % apogee_id)
elif dr == 'current':
if isinstance(loc_id,str): #1m
return os.path.join(specReduxPath,'current','stars','apo1m',
loc_id.strip(),
'apStar-current-%s.fits' % apogee_id.strip())
elif loc_id ==1:
raise IOError('For 1m targets, give the FIELD instead of the location ID')
else:
return os.path.join(specReduxPath,'current','stars','apo25m',
'%i' % loc_id,
'apStar-current-%s.fits' % apogee_id)
def apVisitPath(loc_id, mjd, fiberid, dr=None):
"""
NAME:
apVisitPath
PURPOSE:
returns the path of the apVisit file
INPUT:
loc_id = 4-digit location ID (field for 1m targets)
mjd = 5-digit MJD
fiberid = 3-digit fiber ID
dr = return the path corresponding to this data release (general default)
OUTPUT:
path string
HISTORY:
2016-11 - Meredith Rawls
2016-11-29 - Bovy (UofT) - Edited inputs
TODO:
automatically find all apVisit files for a given apogee ID and download them
"""
mjd = str(mjd).strip()
if not isinstance(fiberid,str):
fiberid= '%03i' % fiberid
if dr is None:
dr = _default_dr()
specReduxPath = apogeeSpectroReduxDirPath(dr=dr)
if dr == '10':
return os.path.join(specReduxPath, 'r3', 's3', loc_id, mjd,
'apVisit-s3-%s-%s-%s.fits' % (loc_id, mjd, fiberid))
elif dr == '12':
if isinstance(loc_id, str): #1m
return os.path.join(specReduxPath, 'r5', 'apo1m', loc_id, mjd,
'apVisit-r5-%s-%s-%s.fits' % (loc_id, mjd, fiberid))
elif loc_id == 1:
raise IOError('For 1m targets, give the FIELD instead of the location ID')
else:
loc_id = str(loc_id).strip()
return os.path.join(specReduxPath, 'r5', 'apo25m', loc_id, mjd,
'apVisit-r5-%s-%s-%s.fits' % (loc_id, mjd, fiberid))
elif dr == '13':
if isinstance(loc_id, str): #1m
return os.path.join(specReduxPath, 'r6', 'apo1m', loc_id, mjd,
'apVisit-r6-%s-%s-%s.fits' % (loc_id, mjd, fiberid))
elif loc_id == 1:
raise IOError('For 1m targets, give the FIELD instead of the location ID')
else:
loc_id = str(loc_id).strip()
return os.path.join(specReduxPath, 'r6', 'apo25m', loc_id, mjd,
'apVisit-r6-%s-%s-%s.fits' % (loc_id, mjd, fiberid))
elif dr == 'current':
if isinstance(loc_id, str): #1m
return os.path.join(specReduxPath, 'current', 'apo1m', loc_id, mjd,
'apVisit-current-%s-%s-%s.fits' % (loc_id, mjd, fiberid))
elif loc_id == 1:
raise IOError('For 1m targets, give the FIELD instead of the location ID')
else:
loc_id = str(loc_id).strip()
return os.path.join(specReduxPath, 'current', 'apo25m', loc_id, mjd,
'apVisit-current-%s-%s-%s.fits' % (loc_id, mjd, fiberid))
def modelSpecPath(lib='GK',teff=4500,logg=2.5,metals=0.,
cfe=0.,nfe=0.,afe=0.,vmicro=2.,
dr=None):
"""
NAME:
modelSpecPath
PURPOSE:
returns the path of a model spectrum file
INPUT:
lib= ('GK') spectral library
teff= (4500) grid-point Teff
logg= (2.5) grid-point logg
metals= (0.) grid-point metallicity
cfe= (0.) grid-point carbon-enhancement
nfe= (0.) grid-point nitrogen-enhancement
afe= (0.) grid-point alpha-enhancement
vmicro= (2.) grid-point microturbulence
dr= return the path corresponding to this data release
OUTPUT:
path string
HISTORY:
2015-01-20 - Written - Bovy (IAS)
"""
if dr is None: dr= _default_dr()
specReduxPath= apogeeSpectroReduxDirPath(dr=dr)
modelSpecLibPath= apogeeModelSpectroLibraryDirPath(dr=dr,lib=lib)
if dr == '10':
raise IOError('Loading model spectra for DR10 is not supported at this time')
elif dr == '12':
# Find closest grid-points for cfe, nfe, afe, and vmicro
cfegrid= numpy.linspace(-1.,1.,9)
nfegrid= numpy.linspace(-1.,1.,5)
afegrid= numpy.linspace(-1.,1.,9)
vmicrogrid= numpy.array([0.5,1.,2.,4.,8.])
cfep= cfegrid[numpy.argmin(numpy.fabs(cfegrid-cfe))]
nfep= nfegrid[numpy.argmin(numpy.fabs(nfegrid-nfe))]
afep= afegrid[numpy.argmin(numpy.fabs(afegrid-afe))]
vmp= vmicrogrid[numpy.argmin(numpy.fabs(vmicrogrid-vmicro))]
# Create strings
if cfep >= 0.:
cfestr= 'cp%i%i' % (int(cfep),int(round((cfep % 1)*10.)))
else:
cfestr= 'cm%i%i' % (int(-cfep),int(round((-cfep % 1)*10.)))
if nfep >= 0.:
nfestr= 'np%i%i' % (int(nfep),int(round((nfep % 1)*10.)))
else:
nfestr= 'nm%i%i' % (int(-nfep),int(round((-nfep % 1)*10.)))
if afep >= 0.:
afestr= 'ap%i%i' % (int(afep),int(round((afep % 1)*10.)))
else:
afestr= 'am%i%i' % (int(-afep),int(round((-afep % 1)*10.)))
if vmp >= 0.:
vmstr= 'vp%i%i' % (int(vmp),int(round((vmp % 1)*10.)))
else:
vmstr= 'cm%i%i' % (int(-vmp),int(round((-vmp % 1)*10.)))
return os.path.join(specReduxPath,modelSpecLibPath,
afestr+cfestr+nfestr+vmstr+'.fits')
def ferreModelLibraryPath(lib='GK',pca=True,sixd=True,unf=False,dr=None,
header=False):
"""
NAME:
ferreModelLibraryPath
PURPOSE:
returns the path of a model library
INPUT:
lib= ('GK') spectral library
dr= return the path corresponding to this data release
pca= (True) if True, return path of the PCA compressed library
sixd= (True) if True, return path of the 6D library (w/o vmicro)
unf= (False) if True, return path of the binary library (otherwise ascii)
header= (False) if True, return the path of the header file
OUTPUT:
path string
HISTORY:
2015-01-21 - Written - Bovy (IAS)
"""
if dr is None: dr= _default_dr()
specReduxPath= apogeeSpectroReduxDirPath(dr=dr)
modelSpecLibPath= apogeeModelSpectroLibraryDirPath(dr=dr,lib=lib)
if dr == '10':
raise IOError('Loading model libraries for DR10 is not supported at this time')
elif dr == '12':
if pca and sixd:
filename= 'p6_aps'
elif pca:
filename= 'p_aps'
else:
filename= 'f_'
filename+= 'as%s_131216_lsfcombo5v6_w123.' % lib.upper()
if header:
filename+= 'hdr'
elif unf:
filename+= 'unf'
else:
filename+= 'dat'
return os.path.join(specReduxPath,modelSpecLibPath,filename)
elif dr == 'current':
if pca and sixd:
filename= 'p6_aps'
elif pca:
filename= 'p_aps'
else:
filename= 'f_'
if 'ms' in lib:
filename+= '%s_140529_lsfcombo5v6_w123.' % lib
else:
filename+= 'as%s_131216_lsfcombo5v6_w123.' % lib.upper()
if header:
filename+= 'hdr'
elif unf:
filename+= 'unf'
else:
filename+= 'dat'
return os.path.join(specReduxPath,modelSpecLibPath,filename)
def modelAtmospherePath(lib='kurucz_filled',teff=4500,logg=2.5,metals=0.,
cfe=0.,afe=0.,vmicro=2.,dr=None):
"""
NAME:
modelAtmospherePath
PURPOSE:
returns the path of a model spectrum file
INPUT:
lib= ('kurucz_filled') atmosphere library
teff= (4500) grid-point Teff
logg= (2.5) grid-point logg
metals= (0.) grid-point metallicity
cfe= (0.) grid-point carbon-enhancement
afe= (0.) grid-point alpha-enhancement
vmicro= (2.) grid-point microturbulence
dr= return the path corresponding to this data release
OUTPUT:
path string
HISTORY:
2015-02-13 - Written - Bovy (IAS)
"""
if dr is None: dr= 'current'
specReduxPath= apogeeSpectroReduxDirPath(dr=dr)
modelAtmosphereLibPath= apogeeModelAtmosphereLibraryDirPath(dr=dr,lib=lib)
if dr == '10':
raise IOError('Loading model atmospheres for DR10 is not supported at this time')
elif dr == '12' or dr == 'current':
# Create directory + filename
if lib.lower() == 'kurucz_filled':
metalsstr= _modelAtmKurucz_metalsString(metals)
cfestr= _modelAtmKurucz_cfeString(cfe,metals)
afestr= _modelAtmKurucz_afeString(afe,metals)
dirname= os.path.join(specReduxPath,modelAtmosphereLibPath,
metalsstr+cfestr+afestr)
filename= 'a'+metalsstr+cfestr+afestr
teffstr= _modelAtmKurucz_teffString(teff)
loggstr= _modelAtmKurucz_loggString(logg,teff)
filename+= teffstr+loggstr+'v20.mod'
return os.path.join(dirname,filename)
def linelistPath(linelist,dr=None):
"""
NAME:
linelistPath
PURPOSE:
returns the path of a linelist
INPUT:
linelist - name of the linelist
OUTPUT:
path string
HISTORY:
2015-02-13 - Written - Bovy (IAS)
"""
if dr is None: dr= 'current'
specReduxPath= apogeeSpectroReduxDirPath(dr=dr)
return os.path.join(specReduxPath,'speclib','linelists',linelist)
def apWavePath(chip,dr=None):
"""
NAME:
apWavePath
PURPOSE:
returns the path of an apWave file
INPUT:
chip - chip 'a', 'b', or 'c'
dr= return the path corresponding to this data release
OUTPUT:
path string
HISTORY:
2015-02-27 - Written - Bovy (IAS)
"""
if dr is None: dr= _default_dr()
specReduxPath= apogeeSpectroReduxDirPath(dr=dr)
if dr == '10':
return os.path.join(specReduxPath,'r3','cal','wave',
'apWave-%s-02420038.fits' % chip)
elif dr == '12':
return os.path.join(specReduxPath,'r5','cal','wave',
'apWave-%s-02420038.fits' % chip)
elif dr == '13' or dr == 'current':
return os.path.join(specReduxPath,'r6','cal','wave',
'apWave-%s-02420038.fits' % chip)
def apLSFPath(chip,dr=None):
"""
NAME:
apLSFPath
PURPOSE:
returns the path of an apLSF file
INPUT:
chip - chip 'a', 'b', or 'c'
dr= return the path corresponding to this data release
OUTPUT:
path string
HISTORY:
2015-03-12 - Written - Bovy (IAS)
"""
if dr is None: dr= _default_dr()
specReduxPath= apogeeSpectroReduxDirPath(dr=dr)
if dr == '10':
return os.path.join(specReduxPath,'r3','cal','lsf',
'apLSF-%s-02490024.fits' % chip)
elif dr == '12':
return os.path.join(specReduxPath,'r5','cal','lsf',
'apLSF-%s-02490024.fits' % chip)
elif dr == '13' or dr == 'current':
return os.path.join(specReduxPath,'r6','cal','lsf',
'apLSF-%s-05440020.fits' % chip)
def apogeeSpectroReduxDirPath(dr=None):
"""
NAME:
apogeeSpectroReduxDirPath
PURPOSE:
returns the path of the spectro dir
INPUT:
dr= return the path corresponding to this data release
OUTPUT:
path string
HISTORY:
2014-11-25 - Written - Bovy (IAS)
"""
if dr is None: dr= _default_dr()
if dr.lower() == 'current':
return os.path.join(_APOGEE_DATA,'apogeework',
'apogee','spectro','redux')
else:
return os.path.join(_APOGEE_DATA,'dr%s' % dr,
'apogee','spectro','redux')
def apogeeModelSpectroLibraryDirPath(dr=None,lib='GK'):
"""
NAME:
apogeeModelSpectroLibraryDirPath
PURPOSE:
returns the path of the model spectra within the spectral reduction directory
INPUT:
dr= return the path corresponding to this data release
lib= ('GK') spectral library
OUTPUT:
path string
HISTORY:
2015-01-20 - Written - Bovy (IAS)
"""
if dr is None: dr= _default_dr()
if dr == '12':
if lib.lower() == 'gk':
return os.path.join('speclib','asset','kurucz_filled',
'solarisotopes','asGK_131216_lsfcombo5v6')
elif lib.lower() == 'f':
return os.path.join('speclib','asset','kurucz_filled',
'solarisotopes','asF_131216_lsfcombo5v6')
elif dr == 'current':
if lib.lower() == 'msgk':
return os.path.join('speclib','moog','kurucz_filled',
'solarisotopes','msGK_140529_lsfcombo5v6')
def apogeeModelAtmosphereLibraryDirPath(dr=None,lib='kurucz_filled'):
"""
NAME:
apogeeModelAtmosphereLibraryDirPath
PURPOSE:
returns the path of the model atmospheres within the spectral reduction directory
INPUT:
dr= return the path corresponding to this data release
lib= ('kurucz_filled') spectral library
OUTPUT:
path string
HISTORY:
2015-02-13 - Written - Bovy (IAS)
"""
if dr is None: dr= _default_dr()
if dr == '12' or dr == 'current':
if lib.lower() == 'kurucz_filled':
return os.path.join('speclib','kurucz_filled')
elif 'marcs' in lib.lower():
return os.path.join('speclib','marcs',lib)
def _default_dr():
if _APOGEE_REDUX == _DR10REDUX: dr= '10'
elif _APOGEE_REDUX == _DR11REDUX: dr= '11'
elif _APOGEE_REDUX == _DR12REDUX: dr= '12'
elif _APOGEE_REDUX == _DR13REDUX: dr= '13'
elif _APOGEE_REDUX == _CURRENTREDUX: dr= 'current'
else: raise IOError('No default dr available for APOGEE_REDUX %s, need to set it by hand' % _APOGEE_REDUX)
return dr
def _redux_dr(dr=None):
if dr is None: dr= _default_dr()
if dr == '10': return _DR10REDUX
elif dr == '11': return _DR11REDUX
elif dr == '12': return _DR12REDUX
elif dr == '13': return _DR13REDUX
elif dr == 'current': return _CURRENTREDUX
else: raise IOError('No reduction available for DR%s, need to set it by hand' % dr)
# Functions that give the correct string values for model atmosphere files
# [M/H]
_modelAtmKurucz_fehgrid= numpy.array([-5.,-4.5,-4.,-3.5,-3.,-2.75,-2.5,
-2.25,-2.,-1.75,-1.5,-1.25,-1.,
-0.75,-0.5,-0.25,0.,0.25,0.5,
1.,1.5])
def _py2_round(fl):
# Bad ... always round 0.5 up, like in python 2 (not 3!)
if fl % 1 >= 0.5:
return numpy.ceil(fl)
else:
return numpy.floor(fl)
def _modelAtmKurucz_metalsString(metals):
metalsp= _modelAtmKurucz_fehgrid[numpy.argmin(numpy.fabs(_modelAtmKurucz_fehgrid-metals))]
if metalsp >= 0.:
metalsstr= 'mp%i%i' % (int(metalsp),int(_py2_round((metalsp % 1)*10.)))
else:
metalsstr= 'mm%i%i' % (int(-metalsp),int(_py2_round((-metalsp % 1)*10.)))
return metalsstr
# [C/Fe]
_modelAtmKurucz_cfegrid_lowm= numpy.linspace(-1.,1.,5)
_modelAtmKurucz_cfegrid_midm= numpy.linspace(-1.5,1.,11)
_modelAtmKurucz_cfegrid_him= numpy.linspace(-1.5,1.,6)
def _modelAtmKurucz_cfeString(cfe,metals):
if metals <= -3.5:
tgrid= _modelAtmKurucz_cfegrid_lowm
elif metals >= 1.:
tgrid= _modelAtmKurucz_cfegrid_him
else:
tgrid= _modelAtmKurucz_cfegrid_midm
cfep= tgrid[numpy.argmin(numpy.fabs(tgrid-cfe))]
if cfep >= 0.:
cfestr= 'cp%i%i' % (int(cfep),int(_py2_round((cfep % 1)*10.)))
else:
cfestr= 'cm%i%i' % (int(-cfep),int(_py2_round((-cfep % 1)*10.)))
return cfestr
# [alpha/Fe]
_modelAtmKurucz_afegrid_lowm= numpy.linspace(-1.,1.,5)
_modelAtmKurucz_afegrid_midm= numpy.linspace(-1.5,1.,11)
_modelAtmKurucz_afegrid_him= numpy.linspace(-1.5,1.,6)
def _modelAtmKurucz_afeString(afe,metals):
if metals <= -3.5:
tgrid= _modelAtmKurucz_afegrid_lowm
elif metals >= 1.:
tgrid= _modelAtmKurucz_afegrid_him
else:
tgrid= _modelAtmKurucz_afegrid_midm
afep= tgrid[numpy.argmin(numpy.fabs(tgrid-afe))]
if afep >= 0.:
afestr= 'op%i%i' % (int(afep),int(_py2_round((afep % 1)*10.)))
else:
afestr= 'om%i%i' % (int(-afep),int(_py2_round((-afep % 1)*10.)))
return afestr
# Teff
_modelAtmKurucz_teffgrid= numpy.array([3500,3750,4000,4250,4500,
4750,5000,5250,5500,5750,
6000,6250,6500,6750,7000,
7250,7500,7750,8000,8250,
8500,8750,9000,9250,9500,
9750,10000,10250,10500,
10750,11000,11250,11500,11750,
12000,12500,13000,13500,14000,
14500,15000,15500,16000,16500,
17000,17500,18000,18500,19000,
19500,20000,21000,22000,23000,
24000,25000,26000,27000,28000,
29000,30000],dtype='int')
def _modelAtmKurucz_teffString(teff):
teffp= _modelAtmKurucz_teffgrid[numpy.argmin(numpy.fabs(_modelAtmKurucz_teffgrid-teff))]
return 't%i' % teffp
# log g
_modelAtmKurucz_logggrid_G= numpy.linspace(0.,5.,11)
_modelAtmKurucz_logggrid_F= numpy.linspace(1.,5.,9)
_modelAtmKurucz_logggrid_A= numpy.linspace(2.,5.,7)
_modelAtmKurucz_logggrid_B= numpy.linspace(3.,5.,5)
_modelAtmKurucz_logggrid_O= numpy.linspace(4.,5.,3)
def _modelAtmKurucz_loggString(logg,teff):
if teff <= 6000.:
tgrid= _modelAtmKurucz_logggrid_G
elif teff <= 8000.:
tgrid= _modelAtmKurucz_logggrid_F
elif teff <= 12000.:
tgrid= _modelAtmKurucz_logggrid_A
elif teff <= 20000.:
tgrid= _modelAtmKurucz_logggrid_B
else:
tgrid= _modelAtmKurucz_logggrid_O
loggp= tgrid[numpy.argmin(numpy.fabs(tgrid-logg))]
return 'g%i%i' % (int(loggp),int(_py2_round((loggp % 1)*10.)))
| []
| []
| [
"APOGEE_APOKASC_REDUX",
"APOGEE_REDUX",
"SDSS_LOCAL_SAS_MIRROR",
"APOGEE_DATA",
"RESULTS_VERS",
"APOGEE_ASPCAP_REDUX"
]
| [] | ["APOGEE_APOKASC_REDUX", "APOGEE_REDUX", "SDSS_LOCAL_SAS_MIRROR", "APOGEE_DATA", "RESULTS_VERS", "APOGEE_ASPCAP_REDUX"] | python | 6 | 0 | |
pkg/socketservice/socketservice.go | package socketservice
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strconv"
"sync"
"time"
"github.com/pkg/errors"
"github.com/replicatedhq/kots/kotskinds/multitype"
appstatustypes "github.com/replicatedhq/kots/pkg/api/appstatus/types"
downstreamtypes "github.com/replicatedhq/kots/pkg/api/downstream/types"
"github.com/replicatedhq/kots/pkg/app"
apptypes "github.com/replicatedhq/kots/pkg/app/types"
"github.com/replicatedhq/kots/pkg/appstatus"
identitydeploy "github.com/replicatedhq/kots/pkg/identity/deploy"
identitytypes "github.com/replicatedhq/kots/pkg/identity/types"
downstream "github.com/replicatedhq/kots/pkg/kotsadmdownstream"
snapshot "github.com/replicatedhq/kots/pkg/kotsadmsnapshot"
"github.com/replicatedhq/kots/pkg/kotsutil"
"github.com/replicatedhq/kots/pkg/logger"
"github.com/replicatedhq/kots/pkg/midstream"
"github.com/replicatedhq/kots/pkg/redact"
"github.com/replicatedhq/kots/pkg/render"
"github.com/replicatedhq/kots/pkg/socket"
"github.com/replicatedhq/kots/pkg/socket/transport"
"github.com/replicatedhq/kots/pkg/store"
"github.com/replicatedhq/kots/pkg/supportbundle"
supportbundletypes "github.com/replicatedhq/kots/pkg/supportbundle/types"
"github.com/replicatedhq/kots/pkg/version"
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
)
type ClusterSocket struct {
ClusterID string
SocketID string
SentPreflightURLs map[string]bool
LastDeployedSequences map[string]int64
}
type DeployArgs struct {
AppID string `json:"app_id"`
AppSlug string `json:"app_slug"`
KubectlVersion string `json:"kubectl_version"`
AdditionalNamespaces []string `json:"additional_namespaces"`
ImagePullSecret string `json:"image_pull_secret"`
Namespace string `json:"namespace"`
PreviousManifests string `json:"previous_manifests"`
Manifests string `json:"manifests"`
Wait bool `json:"wait"`
ResultCallback string `json:"result_callback"`
ClearNamespaces []string `json:"clear_namespaces"`
ClearPVCs bool `json:"clear_pvcs"`
AnnotateSlug bool `json:"annotate_slug"`
}
type AppInformersArgs struct {
AppID string `json:"app_id"`
Informers []string `json:"informers"`
}
type SupportBundleArgs struct {
URI string `json:"uri"`
RedactURI string `json:"redactURI"`
}
var server *socket.Server
var clusterSocketHistory = []*ClusterSocket{}
var socketMtx sync.Mutex
// SocketService uses special cluster authorization
func Start() *socket.Server {
logger.Debug("starting socket service")
server = socket.NewServer(transport.GetDefaultWebsocketTransport())
server.On(socket.OnConnection, func(c *socket.Channel, args interface{}) {
socketMtx.Lock()
defer socketMtx.Unlock()
clusterID, err := store.GetStore().GetClusterIDFromDeployToken(c.RequestURL().Query().Get("token"))
if err != nil {
logger.Error(errors.Wrap(err, "failed to get cluster id from deploy token"))
return
}
logger.Info(fmt.Sprintf("Cluster %s connected to the socket service", clusterID))
c.Join(clusterID)
clusterSocket := &ClusterSocket{
ClusterID: clusterID,
SocketID: c.Id(),
SentPreflightURLs: make(map[string]bool, 0),
LastDeployedSequences: make(map[string]int64, 0),
}
clusterSocketHistory = append(clusterSocketHistory, clusterSocket)
})
server.On(socket.OnDisconnection, func(c *socket.Channel) {
socketMtx.Lock()
defer socketMtx.Unlock()
updatedClusterSocketHistory := []*ClusterSocket{}
for _, clusterSocket := range clusterSocketHistory {
if clusterSocket.SocketID != c.Id() {
updatedClusterSocketHistory = append(updatedClusterSocketHistory, clusterSocket)
}
}
clusterSocketHistory = updatedClusterSocketHistory
})
startLoop(deployLoop, 1)
startLoop(supportBundleLoop, 1)
startLoop(restoreLoop, 1)
return server
}
func startLoop(fn func(), intervalInSeconds time.Duration) {
go func() {
for {
fn()
time.Sleep(time.Second * intervalInSeconds)
}
}()
}
func deployLoop() {
for _, clusterSocket := range clusterSocketHistory {
apps, err := store.GetStore().ListAppsForDownstream(clusterSocket.ClusterID)
if err != nil {
logger.Error(errors.Wrap(err, "failed to list installed apps for downstream"))
continue
}
for _, a := range apps {
if err := processDeploySocketForApp(clusterSocket, a); err != nil {
logger.Error(errors.Wrapf(err, "failed to run deploy loop for app %s in cluster %s", a.ID, clusterSocket.ClusterID))
continue
}
}
}
}
func processDeploySocketForApp(clusterSocket *ClusterSocket, a *apptypes.App) error {
if a.RestoreInProgressName != "" {
return nil
}
deployedVersion, err := downstream.GetCurrentVersion(a.ID, clusterSocket.ClusterID)
if err != nil {
return errors.Wrap(err, "failed to get current downstream version")
}
if deployedVersion == nil {
return nil
}
if value, ok := clusterSocket.LastDeployedSequences[a.ID]; ok && value == deployedVersion.ParentSequence {
// this version is already the currently deployed version
return nil
}
d, err := store.GetStore().GetDownstream(clusterSocket.ClusterID)
if err != nil {
return errors.Wrap(err, "failed to get downstream")
}
var deployError error
defer func() {
if deployError != nil {
err := downstream.UpdateDownstreamStatus(a.ID, deployedVersion.Sequence, "failed", deployError.Error())
if err != nil {
logger.Error(errors.Wrap(err, "failed to update downstream status"))
}
}
}()
deployedVersionArchive, err := ioutil.TempDir("", "kotsadm")
if err != nil {
deployError = errors.Wrap(err, "failed to create temp dir")
return deployError
}
defer os.RemoveAll(deployedVersionArchive)
err = store.GetStore().GetAppVersionArchive(a.ID, deployedVersion.ParentSequence, deployedVersionArchive)
if err != nil {
deployError = errors.Wrap(err, "failed to get app version archive")
return deployError
}
// ensure disaster recovery label transformer in midstream
additionalLabels := map[string]string{
"kots.io/app-slug": a.Slug,
}
if err := midstream.EnsureDisasterRecoveryLabelTransformer(deployedVersionArchive, additionalLabels); err != nil {
deployError = errors.Wrap(err, "failed to ensure disaster recovery label transformer")
return deployError
}
kotsKinds, err := kotsutil.LoadKotsKindsFromPath(deployedVersionArchive)
if err != nil {
deployError = errors.Wrap(err, "failed to load kotskinds")
return deployError
}
registrySettings, err := store.GetStore().GetRegistryDetailsForApp(a.ID)
if err != nil {
return errors.Wrap(err, "failed to get registry settings for app")
}
builder, err := render.NewBuilder(kotsKinds, registrySettings, a.Slug, deployedVersion.Sequence, a.IsAirgap)
if err != nil {
return errors.Wrap(err, "failed to get template builder")
}
requireIdentityProvider := false
if kotsKinds.Identity != nil {
if kotsKinds.Identity.Spec.RequireIdentityProvider.Type == multitype.String {
requireIdentityProvider, err = builder.Bool(kotsKinds.Identity.Spec.RequireIdentityProvider.StrVal, false)
if err != nil {
deployError = errors.Wrap(err, "failed to build kotsv1beta1.Identity.spec.requireIdentityProvider")
return deployError
}
} else {
requireIdentityProvider = kotsKinds.Identity.Spec.RequireIdentityProvider.BoolVal
}
}
if requireIdentityProvider && !identitydeploy.IsEnabled(kotsKinds.Identity, kotsKinds.IdentityConfig) {
deployError = errors.New("identity service is required but is not enabled")
return deployError
}
cmd := exec.Command(fmt.Sprintf("kustomize%s", kotsKinds.KustomizeVersion()), "build", filepath.Join(deployedVersionArchive, "overlays", "downstreams", d.Name))
renderedManifests, err := cmd.Output()
if err != nil {
if ee, ok := err.(*exec.ExitError); ok {
err = fmt.Errorf("kustomize stderr: %q", string(ee.Stderr))
}
deployError = errors.Wrap(err, "failed to run kustomize")
return deployError
}
base64EncodedManifests := base64.StdEncoding.EncodeToString(renderedManifests)
imagePullSecret := ""
secretFilename := filepath.Join(deployedVersionArchive, "overlays", "midstream", "secret.yaml")
_, err = os.Stat(secretFilename)
if err != nil && !os.IsNotExist(err) {
deployError = errors.Wrap(err, "failed to os stat image pull secret file")
return deployError
}
if err == nil {
b, err := ioutil.ReadFile(secretFilename)
if err != nil {
deployError = errors.Wrap(err, "failed to read image pull secret file")
return deployError
}
imagePullSecret = string(b)
}
// get previous manifests (if any)
base64EncodedPreviousManifests := ""
previouslyDeployedSequence, err := downstream.GetPreviouslyDeployedSequence(a.ID, clusterSocket.ClusterID)
if err != nil {
deployError = errors.Wrap(err, "failed to get previously deployed sequence")
return deployError
}
if previouslyDeployedSequence != -1 {
previouslyDeployedParentSequence, err := downstream.GetParentSequenceForSequence(a.ID, clusterSocket.ClusterID, previouslyDeployedSequence)
if err != nil {
deployError = errors.Wrap(err, "failed to get previously deployed parent sequence")
return deployError
}
if previouslyDeployedParentSequence != -1 {
previouslyDeployedVersionArchive, err := ioutil.TempDir("", "kotsadm")
if err != nil {
deployError = errors.Wrap(err, "failed to create temp dir")
return deployError
}
defer os.RemoveAll(previouslyDeployedVersionArchive)
err = store.GetStore().GetAppVersionArchive(a.ID, previouslyDeployedParentSequence, previouslyDeployedVersionArchive)
if err != nil {
deployError = errors.Wrap(err, "failed to get previously deployed app version archive")
return deployError
}
previousKotsKinds, err := kotsutil.LoadKotsKindsFromPath(previouslyDeployedVersionArchive)
if err != nil {
deployError = errors.Wrap(err, "failed to load kotskinds for previously deployed app version")
return deployError
}
cmd := exec.Command(fmt.Sprintf("kustomize%s", previousKotsKinds.KustomizeVersion()), "build", filepath.Join(previouslyDeployedVersionArchive, "overlays", "downstreams", d.Name))
previousRenderedManifests, err := cmd.Output()
if err != nil {
if ee, ok := err.(*exec.ExitError); ok {
err = fmt.Errorf("kustomize stderr: %q", string(ee.Stderr))
}
deployError = errors.Wrap(err, "failed to run kustomize for previously deployed app version")
return deployError
}
base64EncodedPreviousManifests = base64.StdEncoding.EncodeToString(previousRenderedManifests)
}
}
deployArgs := DeployArgs{
AppID: a.ID,
AppSlug: a.Slug,
KubectlVersion: kotsKinds.KotsApplication.Spec.KubectlVersion,
AdditionalNamespaces: kotsKinds.KotsApplication.Spec.AdditionalNamespaces,
ImagePullSecret: imagePullSecret,
Namespace: ".",
Manifests: base64EncodedManifests,
PreviousManifests: base64EncodedPreviousManifests,
ResultCallback: "/api/v1/deploy/result",
Wait: false,
AnnotateSlug: os.Getenv("ANNOTATE_SLUG") != "",
}
c, err := server.GetChannel(clusterSocket.SocketID)
if err != nil {
return errors.Wrap(err, "failed to get socket channel from server")
}
// Event is sent here
c.Emit("deploy", deployArgs)
socketMtx.Lock()
clusterSocket.LastDeployedSequences[a.ID] = deployedVersion.ParentSequence
socketMtx.Unlock()
renderedInformers := []string{}
// deploy status informers
if len(kotsKinds.KotsApplication.Spec.StatusInformers) > 0 {
// render status informers
for _, informer := range kotsKinds.KotsApplication.Spec.StatusInformers {
renderedInformer, err := builder.String(informer)
if err != nil {
logger.Error(errors.Wrap(err, "failed to render status informer"))
continue
}
if renderedInformer == "" {
continue
}
renderedInformers = append(renderedInformers, renderedInformer)
}
}
if identitydeploy.IsEnabled(kotsKinds.Identity, kotsKinds.IdentityConfig) {
renderedInformers = append(renderedInformers, fmt.Sprintf("deployment/%s", identitytypes.DeploymentName(a.Slug)))
}
if len(renderedInformers) > 0 {
// send to kots operator
appInformersArgs := AppInformersArgs{
AppID: a.ID,
Informers: renderedInformers,
}
c.Emit("appInformers", appInformersArgs)
} else {
// no informers, set state to ready
defaultReadyState := []appstatustypes.ResourceState{
{
Kind: "EMPTY",
Name: "EMPTY",
Namespace: "EMPTY",
State: appstatustypes.StateReady,
},
}
err := appstatus.Set(a.ID, defaultReadyState, time.Now())
if err != nil {
return errors.Wrap(err, "failed to set app status")
}
}
return nil
}
func supportBundleLoop() {
for _, clusterSocket := range clusterSocketHistory {
apps, err := store.GetStore().ListAppsForDownstream(clusterSocket.ClusterID)
if err != nil {
logger.Error(errors.Wrap(err, "failed to list apps for cluster"))
}
pendingSupportBundles := []*supportbundletypes.PendingSupportBundle{}
for _, app := range apps {
appPendingSupportBundles, err := store.GetStore().ListPendingSupportBundlesForApp(app.ID)
if err != nil {
logger.Error(errors.Wrap(err, "failed to list pending support bundles for app"))
continue
}
pendingSupportBundles = append(pendingSupportBundles, appPendingSupportBundles...)
}
for _, sb := range pendingSupportBundles {
if err := processSupportBundle(clusterSocket, *sb); err != nil {
logger.Error(errors.Wrapf(err, "failed to process support bundle %s for app %s", sb.ID, sb.AppID))
continue
}
}
}
}
func processSupportBundle(clusterSocket *ClusterSocket, pendingSupportBundle supportbundletypes.PendingSupportBundle) error {
a, err := store.GetStore().GetApp(pendingSupportBundle.AppID)
if err != nil {
return errors.Wrapf(err, "failed to get app %s", pendingSupportBundle.AppID)
}
c, err := server.GetChannel(clusterSocket.SocketID)
if err != nil {
return errors.Wrap(err, "failed to get socket channel from server")
}
sequence := int64(0)
currentVersion, err := downstream.GetCurrentVersion(a.ID, clusterSocket.ClusterID)
if err != nil {
return errors.Wrap(err, "failed to get current downstream version")
}
if currentVersion != nil {
sequence = currentVersion.Sequence
}
archivePath, err := ioutil.TempDir("", "kotsadm")
if err != nil {
return errors.Wrap(err, "failed to create temp dir")
}
defer os.RemoveAll(archivePath)
err = store.GetStore().GetAppVersionArchive(a.ID, sequence, archivePath)
if err != nil {
return errors.Wrap(err, "failed to get current archive")
}
kotsKinds, err := kotsutil.LoadKotsKindsFromPath(archivePath)
if err != nil {
return errors.Wrap(err, "failed to load current kotskinds")
}
err = supportbundle.CreateRenderedSpec(a.ID, sequence, "", true, kotsKinds)
if err != nil {
return errors.Wrap(err, "failed to create rendered support bundle spec")
}
err = redact.WriteRedactSpecConfigMap()
if err != nil {
return errors.Wrap(err, "failed to write redact spec configmap")
}
supportBundleArgs := SupportBundleArgs{
URI: supportbundle.GetSpecURI(a.Slug),
RedactURI: redact.GetRedactSpecURI(),
}
c.Emit("supportbundle", supportBundleArgs)
if err := supportbundle.ClearPending(pendingSupportBundle.ID); err != nil {
return errors.Wrap(err, "failed to clear pending support bundle")
}
return nil
}
func restoreLoop() {
for _, clusterSocket := range clusterSocketHistory {
apps, err := store.GetStore().ListAppsForDownstream(clusterSocket.ClusterID)
if err != nil {
logger.Error(errors.Wrap(err, "failed to list installed apps for downstream"))
continue
}
for _, a := range apps {
if err := processRestoreForApp(clusterSocket, a); err != nil {
logger.Error(errors.Wrapf(err, "failed to handle restoe for app %s", a.ID))
continue
}
}
}
}
func processRestoreForApp(clusterSocket *ClusterSocket, a *apptypes.App) error {
if a.RestoreInProgressName == "" {
return nil
}
switch a.RestoreUndeployStatus {
case apptypes.UndeployInProcess:
// no-op
break
case apptypes.UndeployCompleted:
if err := handleUndeployCompleted(clusterSocket, a); err != nil {
return errors.Wrap(err, "failed to handle undeploy completed")
}
break
case apptypes.UndeployFailed:
// no-op
break
default:
d, err := store.GetStore().GetDownstream(clusterSocket.ClusterID)
if err != nil {
return errors.Wrap(err, "failed to get downstream")
}
if err := undeployApp(a, d, clusterSocket); err != nil {
return errors.Wrap(err, "failed to undeploy app")
}
break
}
return nil
}
func handleUndeployCompleted(clusterSocket *ClusterSocket, a *apptypes.App) error {
snapshotName := a.RestoreInProgressName
restoreName := a.RestoreInProgressName
backup, err := snapshot.GetBackup(context.Background(), os.Getenv("POD_NAMESPACE"), snapshotName)
if err != nil {
return errors.Wrap(err, "failed to get backup")
}
if backup.Annotations["kots.io/instance"] == "true" {
restoreName = fmt.Sprintf("%s.%s", snapshotName, a.Slug)
}
restore, err := snapshot.GetRestore(context.Background(), os.Getenv("POD_NAMESPACE"), restoreName)
if err != nil {
return errors.Wrap(err, "failed to get restore")
}
if restore == nil {
return errors.Wrap(startVeleroRestore(snapshotName, a.Slug), "failed to start velero restore")
}
return errors.Wrap(checkRestoreComplete(clusterSocket, a, restore), "failed to check restore complete")
}
func startVeleroRestore(snapshotName string, appSlug string) error {
logger.Info(fmt.Sprintf("creating velero restore object from snapshot %s", snapshotName))
if err := snapshot.CreateApplicationRestore(context.Background(), os.Getenv("POD_NAMESPACE"), snapshotName, appSlug); err != nil {
return errors.Wrap(err, "failed to create restore")
}
return nil
}
func checkRestoreComplete(clusterSocket *ClusterSocket, a *apptypes.App, restore *velerov1.Restore) error {
switch restore.Status.Phase {
case velerov1.RestorePhaseCompleted:
backup, err := snapshot.GetBackup(context.Background(), os.Getenv("POD_NAMESPACE"), restore.Spec.BackupName)
if err != nil {
return errors.Wrap(err, "failed to get backup")
}
backupAnnotations := backup.ObjectMeta.GetAnnotations()
if backupAnnotations == nil {
return errors.New("backup is missing required annotations")
}
var sequence int64 = 0
if backupAnnotations["kots.io/instance"] == "true" {
b, ok := backupAnnotations["kots.io/apps-sequences"]
if !ok || b == "" {
return errors.New("instance backup is missing apps sequences annotation")
}
var appsSequences map[string]int64
if err := json.Unmarshal([]byte(b), &appsSequences); err != nil {
return errors.Wrap(err, "failed to unmarshal apps sequences")
}
s, ok := appsSequences[a.Slug]
if !ok {
return errors.New("instance backup is missing sequence annotation")
}
sequence = s
} else {
sequenceStr, ok := backupAnnotations["kots.io/app-sequence"]
if !ok || sequenceStr == "" {
return errors.New("backup is missing sequence annotation")
}
s, err := strconv.ParseInt(sequenceStr, 10, 64)
if err != nil {
return errors.Wrap(err, "failed to parse sequence")
}
sequence = s
}
logger.Info(fmt.Sprintf("restore complete, re-deploying version %d", sequence))
if err := RedeployAppVersion(a.ID, sequence, clusterSocket); err != nil {
return errors.Wrap(err, "failed to redeploy app version")
}
if err := createSupportBundle(a.ID, sequence, "", true); err != nil {
// support bundle is not essential. keep processing restore status
logger.Error(errors.Wrapf(err, "failed to create support bundle for sequence %d post restore", sequence))
}
if err := app.ResetRestore(a.ID); err != nil {
return errors.Wrap(err, "failed to reset restore")
}
break
case velerov1.RestorePhaseFailed, velerov1.RestorePhasePartiallyFailed:
logger.Info("restore failed, resetting app restore")
if err := app.ResetRestore(a.ID); err != nil {
return errors.Wrap(err, "failed to reset restore")
}
break
default:
// restore is in progress
break
}
return nil
}
func createSupportBundle(appID string, sequence int64, origin string, inCluster bool) error {
archivePath, err := ioutil.TempDir("", "kotsadm")
if err != nil {
return errors.Wrap(err, "failed to create temp dir")
}
defer os.RemoveAll(archivePath)
err = store.GetStore().GetAppVersionArchive(appID, sequence, archivePath)
if err != nil {
return errors.Wrap(err, "failed to get current archive")
}
kotsKinds, err := kotsutil.LoadKotsKindsFromPath(archivePath)
if err != nil {
return errors.Wrap(err, "failed to load current kotskinds")
}
err = supportbundle.CreateRenderedSpec(appID, sequence, origin, inCluster, kotsKinds)
if err != nil {
return errors.Wrap(err, "failed to create rendered support bundle spec")
}
return nil
}
func undeployApp(a *apptypes.App, d *downstreamtypes.Downstream, clusterSocket *ClusterSocket) error {
deployedVersion, err := downstream.GetCurrentVersion(a.ID, d.ClusterID)
if err != nil {
return errors.Wrap(err, "failed to get current downstream version")
}
deployedVersionArchive, err := ioutil.TempDir("", "kotsadm")
if err != nil {
return errors.Wrap(err, "failed to create temp dir")
}
defer os.RemoveAll(deployedVersionArchive)
err = store.GetStore().GetAppVersionArchive(a.ID, deployedVersion.ParentSequence, deployedVersionArchive)
if err != nil {
return errors.Wrap(err, "failed to get app version archive")
}
kotsKinds, err := kotsutil.LoadKotsKindsFromPath(deployedVersionArchive)
if err != nil {
return errors.Wrap(err, "failed to load kotskinds")
}
cmd := exec.Command(fmt.Sprintf("kustomize%s", kotsKinds.KustomizeVersion()), "build", filepath.Join(deployedVersionArchive, "overlays", "downstreams", d.Name))
renderedManifests, err := cmd.Output()
if err != nil {
if ee, ok := err.(*exec.ExitError); ok {
err = fmt.Errorf("kustomize stderr: %q", string(ee.Stderr))
}
return errors.Wrap(err, "failed to run kustomize")
}
base64EncodedManifests := base64.StdEncoding.EncodeToString(renderedManifests)
backup, err := snapshot.GetBackup(context.Background(), os.Getenv("POD_NAMESPACE"), a.RestoreInProgressName)
if err != nil {
return errors.Wrap(err, "failed to get backup")
}
args := DeployArgs{
AppID: a.ID,
AppSlug: a.Slug,
KubectlVersion: kotsKinds.KotsApplication.Spec.KubectlVersion,
Namespace: ".",
Manifests: "",
PreviousManifests: base64EncodedManifests,
ResultCallback: "/api/v1/undeploy/result",
Wait: true,
ClearNamespaces: backup.Spec.IncludedNamespaces,
ClearPVCs: true,
}
c, err := server.GetChannel(clusterSocket.SocketID)
if err != nil {
return errors.Wrap(err, "failed to get socket channel from server")
}
c.Emit("deploy", args)
if err := app.SetRestoreUndeployStatus(a.ID, apptypes.UndeployInProcess); err != nil {
return errors.Wrap(err, "failed to set restore undeploy status")
}
return nil
}
// RedeployAppVersion will force trigger a redeploy of the app version, even if it's currently deployed
// if clusterSocket is nil, a redeploy to all the cluster sockets (downstreams - which theoratically should always be 1) will be triggered
func RedeployAppVersion(appID string, sequence int64, clusterSocket *ClusterSocket) error {
if err := version.DeployVersion(appID, sequence); err != nil {
return errors.Wrap(err, "failed to deploy version")
}
socketMtx.Lock()
defer socketMtx.Unlock()
if clusterSocket != nil {
delete(clusterSocket.LastDeployedSequences, appID)
} else {
for _, clusterSocket := range clusterSocketHistory {
delete(clusterSocket.LastDeployedSequences, appID)
}
}
return nil
}
| [
"\"ANNOTATE_SLUG\"",
"\"POD_NAMESPACE\"",
"\"POD_NAMESPACE\"",
"\"POD_NAMESPACE\"",
"\"POD_NAMESPACE\"",
"\"POD_NAMESPACE\""
]
| []
| [
"POD_NAMESPACE",
"ANNOTATE_SLUG"
]
| [] | ["POD_NAMESPACE", "ANNOTATE_SLUG"] | go | 2 | 0 | |
hw3/SimpleMachine/ui/AbstractUI.java | package ui;
import java.util.HashMap;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.ArrayList;
import java.util.EnumSet;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import isa.AbstractISA;
import machine.AbstractMainMemory;
import machine.AbstractCPU;
import isa.Memory;
import ui.Machine;
/**
* Base class for Simulator UI. Extended by UI implementations.
*/
public abstract class AbstractUI {
protected static final String APPLICATION_NAME = "Simple Machine";
protected static final String MANIFSET_VERSION = AbstractUI.class.getPackage().getImplementationVersion();
protected static final String APPLICATION_VERSION = String.format ("Version %s", MANIFSET_VERSION!=null? MANIFSET_VERSION: "not specified");
protected static final String[] APPLICATION_COPYRIGHT = new String[] {"University of British Columbia", "Copyright \u00A9 2010-2014 Mike Feeley.", "All rights reserved."};
protected static final String APPLICATION_USAGE = "SimpleMachine -i [gui|cli] -a <arch> -v [student|solution]";
protected static final String APPLICATION_ENV_PREFIX = "SIMPLE_MACHINE_";
protected static final int MAIN_MEMORY_SIZE = 1*1024*1024;
protected static final String SOLUTION_VARIANT = "solution";
private static enum AppEnv {ARCHITECTURE, VARIANT, UI, UI_OPTIONS};
private static Env <AppEnv> env = new Env <AppEnv> (AppEnv.class);
protected static List <String> usageList = new ArrayList <String> ();
protected static List <Env <?>> envList = new ArrayList <Env <?>> ();
{
usageList.add (APPLICATION_USAGE);
envList.add (env);
}
static {
System.setProperty ("com.apple.mrj.application.apple.menu.about.name", APPLICATION_NAME);
}
protected Machine machine;
protected String applicationFullName;
// Machine Configuration
/**
* Configuration Definitions
*/
static private HashMap <String, Config> configs = new HashMap <String, Config> ();
static {
new Config ("SM213-VM",
"arch.sm213.isa.ISA",
"arch.sm213.machine.solution.MainMemory",
"arch.sm213.machine.solution.VirtualMemoryCPU",
"arch.sm213.machine.<variant>.MainMemory",
"arch.sm213.machine.<variant>.VirtualMemoryCPU",
"[showMac][animation]");
new Config ("SM213",
"arch.sm213.isa.ISA",
"arch.sm213.machine.solution.MainMemory",
"arch.sm213.machine.solution.CPU",
"arch.sm213.machine.<variant>.MainMemory",
"arch.sm213.machine.<variant>.CPU",
"[showMac][animation]");
new Config ("Y86-Seq",
"arch.y86.isa.ISA",
"arch.y86.machine.MainMemory",
"arch.y86.machine.seq.solution.CPU",
"arch.y86.machine.MainMemory",
"arch.y86.machine.seq.<variant>.CPU",
"[twoProcStateCols][showDataAddr][animation]");
new Config ("Y86-Pipe-Minus",
"arch.y86.isa.ISA",
"arch.y86.machine.MainMemory",
"arch.y86.machine.pipeminus.solution.CPU",
"arch.y86.machine.MainMemory",
"arch.y86.machine.pipeminus.<variant>.CPU",
"[twoProcStateCols][smallCurInsDpy][showDataAddr]");
new Config ("Y86-Pipe",
"arch.y86.isa.ISA",
"arch.y86.machine.MainMemory",
"arch.y86.machine.pipe.solution.CPU",
"arch.y86.machine.MainMemory",
"arch.y86.machine.pipe.<variant>.CPU",
"[twoProcStateCols][smallCurInsDpy][showDataAddr]");
new Config ("Y86-Benchmark",
"arch.y86.isa.ISA",
"arch.y86.machine.MainMemory",
"arch.y86.machine.benchmark.solution.CPU",
"arch.y86.machine.MainMemory",
"arch.y86.machine.benchmark.solution.CPU",
"[twoProcStateCols][showDataAddr]");
}
/**
* Machine Configuration Definition
*/
protected static class Config {
String name;
String isa;
String mem;
String cpu;
String memVar;
String cpuVar;
String uiOptions;
private Config (String aName, String anIsa, String aMem, String aCpu, String aMemVar, String aCpuVar, String aUiOptions) {
name=aName; isa=anIsa; mem=aMem; cpu=aCpu; memVar=aMemVar; cpuVar=aCpuVar; uiOptions=aUiOptions;
configs.put (name.toLowerCase(), this);
}
private class ConfigException extends ArgException {
ConfigException (String variant, String message) {
super (String.format ("Missing %s for %s-%s", message, name, variant));
}
}
private AbstractISA newISA (String variant) throws ConfigException {
String errMsg = "ISA definition";
try {
return (AbstractISA) Class.forName(isa).getConstructor ().newInstance ();
} catch (ClassNotFoundException e) {
throw this.new ConfigException (variant, errMsg);
} catch (NoSuchMethodException e) {
throw this.new ConfigException (variant, errMsg);
} catch (InstantiationException e) {
throw this.new ConfigException (variant, errMsg);
} catch (IllegalAccessException e) {
throw this.new ConfigException (variant, errMsg);
} catch (InvocationTargetException e) {
throw this.new ConfigException (variant, errMsg);
}
}
private AbstractMainMemory newMainMemory (String variant, int byteCapacity) throws ConfigException {
String errMsg = "main-memory implementation";
try {
String c = variant.equals (SOLUTION_VARIANT)? mem: memVar.replaceAll ("<variant>", variant);
return (AbstractMainMemory) Class.forName(c).getConstructor (int.class).newInstance (byteCapacity);
} catch (ClassNotFoundException e) {
throw this.new ConfigException (variant, errMsg);
} catch (NoSuchMethodException e) {
throw this.new ConfigException (variant, errMsg);
} catch (InstantiationException e) {
throw this.new ConfigException (variant, errMsg);
} catch (IllegalAccessException e) {
throw this.new ConfigException (variant, errMsg);
} catch (InvocationTargetException e) {
throw this.new ConfigException (variant, errMsg);
}
}
private AbstractCPU newCPU (String variant, AbstractMainMemory mainMemory) throws ConfigException {
String errMsg = "cpu implementation";
try {
String fullname = name;
if (!variant.isEmpty()) {
String vc = variant.substring(0,1).toUpperCase().concat(variant.substring(1,variant.length()));
fullname = fullname.concat ("-".concat (vc));
}
String c = variant.equals (SOLUTION_VARIANT)? cpu: cpuVar.replaceAll ("<variant>", variant);
return (AbstractCPU) Class.forName(c).getConstructor (String.class, AbstractMainMemory.class).newInstance (fullname, mainMemory);
} catch (ClassNotFoundException e) {
throw this.new ConfigException (variant, errMsg);
} catch (NoSuchMethodException e) {
throw this.new ConfigException (variant, errMsg);
} catch (InstantiationException e) {
throw this.new ConfigException (variant, errMsg);
} catch (IllegalAccessException e) {
throw this.new ConfigException (variant, errMsg);
} catch (InvocationTargetException e) {
throw (RuntimeException) e.getTargetException ();
}
}
public static Machine newMachine (String archName, String variantName) throws ArgException {
Config config = configs.get (archName);
if (config==null)
throw new ArgException (String.format ("Unknown architecture %s\n", archName));
AbstractISA isa = config.newISA (variantName);
AbstractMainMemory mainMemory = config.newMainMemory (variantName, MAIN_MEMORY_SIZE);
AbstractCPU cpu = config.newCPU (variantName, mainMemory);
Memory memory = new Memory (isa, mainMemory, cpu.getPC ());
return new Machine (cpu, memory, config.uiOptions);
}
}
// Environment Variables
/**
* Environment Variable Handling
*/
public static class Env <E extends Enum <E>> {
private Class <E> enumClass;
private static String prefix = APPLICATION_ENV_PREFIX;
private static String nameOf (String var) {
return prefix.concat (var);
}
private static String valueOf (String var) {
return System.getenv (nameOf (var));
}
public Env (Class <E> anEnumClass) {
enumClass = anEnumClass;
}
public final String nameOf (E var) {
return nameOf (var.toString());
}
public final String valueOf (E var) {
return valueOf (var.toString());
}
public final String valueOf (E var, String ifNullValue) {
String value = valueOf (var);
return value!=null? value.toLowerCase(): (ifNullValue!=null? ifNullValue.toLowerCase(): null);
}
public final List <String> getNames () {
ArrayList <String> names = new ArrayList <String> ();
for (E var : EnumSet.allOf (enumClass))
names.add (nameOf (var));
return names;
}
}
static void showUsage () {
System.out.print ("Usage: ");
for (String s : usageList)
System.out.printf ("%s\n",s);
}
static void showEnv () {
System.out.print ("Environment Variables:\n");
for (Env <?> e : envList) {
for (String s : e.getNames ())
System.out.printf ("\t%s\n",s);
}
}
// Argument Parsing
/**
* Thrown to indicate syntax error in command line arguments.
*/
protected static class ArgException extends Exception {
public ArgException (String msg) {
super (msg);
}
}
/**
* Parse command-line arguments for specified switch, returning its value.
* @param args list of command-line arguments.
* @param argSwitch string (starting with "-" that starts argument.
* @param isRequired switch is required.
* @param isSwitchValueRequired true iff a value is required to follow switch if it is present.
* @param isSwitchValueListAllowed true iff a list of values is allowed to follow the switch.
* @param valueIfSwitchMissing value to return if switch is missing.
* @return list of values of argSwitch or "" if it is found, but with no value.
* @throws ArgException if switch is not found, isRequired is true, and valueIfSwitchMissing is null
* or if it is found without a value and isSwitchValueRequired is true.
*/
protected static List <String> getArgList (List <String> args,
String argSwitch,
boolean isRequired,
boolean isSwitchValueRequired,
boolean isSwitchValueListAllowed,
String valueIfSwitchMissing)
throws ArgException {
for (int i=0; i<args.size(); i++)
if (args.get(i).equals (argSwitch)) {
args.remove (i);
ArrayList <String> al = new ArrayList <String> ();
while (args.size()>i && !args.get(i).startsWith ("-")) {
al.add (args.remove(i).toLowerCase());
if (!isSwitchValueListAllowed)
break;
}
if (al.size()==0) {
if (isSwitchValueRequired)
throw new ArgException (String.format ("Missing argument value for %s", argSwitch));
else if (valueIfSwitchMissing!=null)
al.add (valueIfSwitchMissing.toLowerCase());
else
al.add ("");
}
return al;
}
if (!isRequired || valueIfSwitchMissing!=null) {
if (valueIfSwitchMissing!=null)
return Arrays.asList (valueIfSwitchMissing.toLowerCase());
else
return new ArrayList <String> ();
} else
throw new ArgException (String.format ("Missing argument %s", argSwitch));
}
protected static String getArg (ArrayList <String> args, String argSwitch, boolean isRequired, boolean isSwitchValueRequired, String valueIfSwitchMissing) throws ArgException {
List <String> argList = getArgList (args, argSwitch, isRequired, isSwitchValueRequired, false, valueIfSwitchMissing);
assert argList.size()<=1;
if (argList.size()>0)
return argList.get (0);
else
return null;
}
protected static Integer getArgInt (ArrayList <String> args, String argSwitch, boolean isRequired, boolean isSwitchValueRequired, String valueIfSwitchMissing) throws ArgException {
String arg = getArg (args, argSwitch, isRequired, isSwitchValueRequired, valueIfSwitchMissing);
if (arg!=null)
try {
int radix;
if (arg.substring(0,2).toLowerCase().equals ("0x")) {
arg = arg.substring (2, arg.length());
radix = 16;
} else
radix = 10;
return new Integer (Integer.parseInt (arg, radix));
} catch (NumberFormatException e) {
throw new ArgException ("Command argument must be a number.");
} else
return null;
}
/**
* Main entry point to Simple Machine (usualled called by SimpleMachine.main() to give the execution
* instance the name "SimpleMachine" instead of "AbstractUI", but starting with this class works fine too).
*/
public static void main (String[] argsArray) {
ArrayList <String> args = new ArrayList <String> (Arrays.asList (argsArray));
String errMessage = "";
AbstractUI ui = null;
// Find constructor for UI class specified in args
try {
String uiName = getArg (args, "-i", true, true, env.valueOf (AppEnv.UI, "gui"));
try {
errMessage = String.format ("UI %s not supported.", uiName);
Class <?> uiClass = Class.forName ("ui.".concat (uiName).concat (".UI"));
Constructor <?> uiCtor = uiClass.getConstructor (ArrayList.class);
ui = (AbstractUI) uiCtor.newInstance (args);
ui.run ();
} catch (ClassNotFoundException cnfe) {
throw new ArgException (errMessage);
} catch (NoSuchMethodException nsme) {
throw new ArgException (errMessage);
} catch (InstantiationException e) {
throw new ArgException (errMessage);
} catch (IllegalAccessException e) {
throw new ArgException (errMessage);
} catch (InvocationTargetException e) {
throw e.getTargetException ();
}
} catch (Throwable e) {
if (e instanceof ArgException) {
System.out.printf ("%s\n", e.getMessage ());
showUsage ();
showEnv ();
} else
throw new AssertionError (e);
}
}
public AbstractUI (ArrayList <String> args) throws ArgException {
// Initialize the AbstractUI Part 1
// Parse args and configure machine
String archName = getArg (args, "-a", true, true, env.valueOf (AppEnv.ARCHITECTURE));
String variantName = getArg (args, "-v", true, true, env.valueOf (AppEnv.VARIANT, "student"));
String uiOptions = getArg (args, "-o", true, true, env.valueOf (AppEnv.UI_OPTIONS, ""));
machine = Config.newMachine (archName, variantName);
// Initialize the AbstractUI Part 2
applicationFullName = String.format ("%s (%s)", APPLICATION_NAME, machine.getName());
}
/**
* Run the UI
*/
public abstract void run ();
} | []
| []
| []
| [] | [] | java | 0 | 0 | |
interferogram/sentinel/sciflo_audit_rsp.py | #!/usr/bin/env python
import os, sys, json, re, shutil
from subprocess import check_call
WORK_RE = re.compile(r'\d{5}-.+')
def copy_sciflo_work(output_dir):
"""Move over sciflo work dirs."""
for root, dirs, files in os.walk(output_dir):
for d in dirs:
if not WORK_RE.search(d): continue
path = os.path.join(root, d)
if os.path.islink(path) and os.path.exists(path):
real_path = os.path.realpath(path)
base_name= os.path.basename(real_path)
new_path = os.path.join(root, base_name)
shutil.copytree(real_path, new_path)
os.unlink(path)
os.symlink(base_name, path)
def extract_error(sfl_json):
"""Extract SciFlo error and traceback for mozart."""
with open(sfl_json) as f: j = json.load(f)
exc_message = j.get('exceptionMessage', None)
if exc_message is not None:
try: exc_list = eval(exc_message)
except: exc_list = []
if len(exc_list) == 3:
proc = exc_list[0]
exc = exc_list[1]
tb = exc_list[2]
try: exc = eval(exc)
except: pass
if isinstance(exc, tuple) and len(exc) == 2:
err = exc[0]
job_json = exc[1]
if isinstance(job_json, dict):
if 'job_id' in job_json:
err_str = 'SciFlo step %s with job_id %s (task %s) failed: %s' % \
(proc, job_json['job_id'], job_json['uuid'], err)
with open('_alt_error.txt', 'w') as f:
f.write("%s\n" % err_str)
with open('_alt_traceback.txt', 'w') as f:
f.write("%s\n" % job_json['traceback'])
else:
err_str = 'SciFlo step %s failed: %s' % (proc, exc)
with open('_alt_error.txt', 'w') as f:
f.write("%s\n" % err_str)
with open('_alt_traceback.txt', 'w') as f:
f.write("%s\n" % tb)
def main():
"""Run S1 audit SLCP sciflo."""
# read in _context.json
context_file = os.path.abspath("_context.json")
if not os.path.exists(context_file):
raise RuntimeError
with open('_context.json') as f:
context = json.load(f)
# get workflow
SFL = os.path.join(os.environ['HOME'], 'ariamh', 'interferogram', 'sentinel', 'AuditSentinelRegSLCPair.sf.xml')
# build sciflo args
sfl_args = ["context_file=%s" % context_file]
# build paths to executables
SFLEXEC_CMD = os.path.join(os.environ['HOME'], 'verdi', 'bin', 'sflExec.py')
# execute sciflo
cmd = [SFLEXEC_CMD, "-d", "-s", "-f", "-o", "output", "--args", '"%s"' % ','.join(sfl_args), SFL]
print("Running sflExec.py command:\n%s" % ' '.join(cmd))
#check_call(cmd, shell)
status = os.system(' '.join(cmd))
print("Exit status is: %d" % status)
if status != 0:
extract_error('output/sciflo.json')
status = 1
# copy sciflo work and exec dir
try: copy_sciflo_work("output")
except: pass
return status
if __name__ == "__main__":
sys.exit(main())
| []
| []
| [
"HOME"
]
| [] | ["HOME"] | python | 1 | 0 | |
build/tools/roomservice.py | #!/usr/bin/env python
# Copyright (C) 2012-2013, The CyanogenMod Project
# (C) 2017-2018,2020-2021, The LineageOS Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import base64
import json
import netrc
import os
import re
import sys
try:
# For python3
import urllib.error
import urllib.parse
import urllib.request
except ImportError:
# For python2
import imp
import urllib2
import urlparse
urllib = imp.new_module('urllib')
urllib.error = urllib2
urllib.parse = urlparse
urllib.request = urllib2
from xml.etree import ElementTree
product = sys.argv[1]
depsonly = sys.argv[2] if len(sys.argv) > 2 else None
try:
device = product[product.index("_") + 1:]
except:
device = product
if not depsonly:
print("Device %s not found. Attempting to retrieve device repository from PixelBlaster-OS Github (http://github.com/PixelBlaster-Devices)." % device)
repositories = []
try:
if authtuple := netrc.netrc().authenticators("api.github.com"):
auth_string = ('%s:%s' % (authtuple[0], authtuple[2])).encode()
githubauth = base64.encodestring(auth_string).decode().replace('\n', '')
else:
githubauth = None
except:
githubauth = None
def add_auth(githubreq):
if githubauth:
githubreq.add_header("Authorization","Basic %s" % githubauth)
if not depsonly:
githubreq = urllib.request.Request("https://api.github.com/search/repositories?q=%s+user:PixelBlaster-Devices+in:name+fork:true" % device)
add_auth(githubreq)
try:
result = json.loads(urllib.request.urlopen(githubreq).read().decode())
except urllib.error.URLError:
print("Failed to search GitHub")
sys.exit(1)
except ValueError:
print("Failed to parse return data from GitHub")
sys.exit(1)
for res in result.get('items', []):
repositories.append(res)
local_manifests = r'.repo/local_manifests'
if not os.path.exists(local_manifests): os.makedirs(local_manifests)
def exists_in_tree(lm, path):
return any(child.attrib['path'] == path for child in lm.getchildren())
# in-place prettyprint formatter
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = f'{i} '
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
elif level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def get_manifest_path():
'''Find the current manifest path
In old versions of repo this is at .repo/manifest.xml
In new versions, .repo/manifest.xml includes an include
to some arbitrary file in .repo/manifests'''
m = ElementTree.parse(".repo/manifest.xml")
try:
m.findall('default')[0]
return '.repo/manifest.xml'
except IndexError:
return ".repo/manifests/{}".format(m.find("include").get("name"))
def get_default_revision():
m = ElementTree.parse(get_manifest_path())
d = m.findall('default')[0]
r = d.get('revision')
return r.replace('refs/heads/', '').replace('refs/tags/', '')
def get_from_manifest(devicename):
try:
lm = ElementTree.parse(".repo/local_manifests/roomservice.xml")
lm = lm.getroot()
except:
lm = ElementTree.Element("manifest")
return next(
(localpath.get("path") for localpath in lm.findall("project")
if re.search("android_device_.*_%s$" % device, localpath.get("name"))),
None,
)
def is_in_manifest(projectpath):
try:
lm = ElementTree.parse(".repo/local_manifests/roomservice.xml")
lm = lm.getroot()
except:
lm = ElementTree.Element("manifest")
for localpath in lm.findall("project"):
if localpath.get("path") == projectpath:
return True
# Search in main manifest, too
try:
lm = ElementTree.parse(get_manifest_path())
lm = lm.getroot()
except:
lm = ElementTree.Element("manifest")
for localpath in lm.findall("project"):
if localpath.get("path") == projectpath:
return True
# ... and don't forget the lineage snippet
try:
lm = ElementTree.parse(".repo/manifests/snippets/extras.xml")
lm = lm.getroot()
except:
lm = ElementTree.Element("manifest")
for localpath in lm.findall("project"):
if localpath.get("path") == projectpath:
return True
# ... and don't forget the blaster snippet
try:
lm = ElementTree.parse(".repo/manifests/snippets/blaster.xml")
lm = lm.getroot()
except:
lm = ElementTree.Element("manifest")
return any(
localpath.get("path") == projectpath
for localpath in lm.findall("project"))
def add_to_manifest(repositories, fallback_branch = None):
try:
lm = ElementTree.parse(".repo/local_manifests/roomservice.xml")
lm = lm.getroot()
except:
lm = ElementTree.Element("manifest")
for repository in repositories:
repo_name = repository['repository']
repo_target = repository['target_path']
print('Checking if %s is fetched from %s' % (repo_target, repo_name))
if is_in_manifest(repo_target):
print('PixelBlaster-OS/%s already fetched to %s' % (repo_name, repo_target))
continue
print('Adding dependency: PixelBlaster-OS/%s -> %s' % (repo_name, repo_target))
project = ElementTree.Element("project", attrib = { "path": repo_target,
"remote": "github", "name": "PixelBlaster-OS/%s" % repo_name })
if 'branch' in repository:
project.set('revision',repository['branch'])
elif fallback_branch:
print("Using fallback branch %s for %s" % (fallback_branch, repo_name))
project.set('revision', fallback_branch)
else:
print("Using default branch for %s" % repo_name)
lm.append(project)
indent(lm, 0)
raw_xml = ElementTree.tostring(lm).decode()
raw_xml = '<?xml version="1.0" encoding="UTF-8"?>\n' + raw_xml
with open('.repo/local_manifests/roomservice.xml', 'w') as f:
f.write(raw_xml)
def fetch_dependencies(repo_path, fallback_branch = None):
print('Looking for dependencies in %s' % repo_path)
dependencies_path = f'{repo_path}/aosp.dependencies'
syncable_repos = []
verify_repos = []
if os.path.exists(dependencies_path):
with open(dependencies_path, 'r') as dependencies_file:
dependencies = json.loads(dependencies_file.read())
fetch_list = []
for dependency in dependencies:
if not is_in_manifest(dependency['target_path']):
fetch_list.append(dependency)
syncable_repos.append(dependency['target_path'])
verify_repos.append(dependency['target_path'])
if not os.path.isdir(dependency['target_path']):
syncable_repos.append(dependency['target_path'])
if fetch_list:
print('Adding dependencies to manifest')
add_to_manifest(fetch_list, fallback_branch)
else:
print('%s has no additional dependencies.' % repo_path)
if syncable_repos:
print('Syncing dependencies')
os.system('repo sync --force-sync %s' % ' '.join(syncable_repos))
for deprepo in verify_repos:
fetch_dependencies(deprepo)
def has_branch(branches, revision):
return revision in [branch['name'] for branch in branches]
if depsonly:
if repo_path := get_from_manifest(device):
fetch_dependencies(repo_path)
else:
print("Trying dependencies-only mode on a non-existing device tree?")
sys.exit()
else:
for repository in repositories:
repo_name = repository['name']
if re.match(f"^android_device_[^_]*_{device}$", repo_name):
print("Found repository: %s" % repository['name'])
manufacturer = repo_name.replace("android_device_", "").replace("_" + device, "")
default_revision = get_default_revision()
print("Default revision: %s" % default_revision)
print("Checking branch info")
githubreq = urllib.request.Request(repository['branches_url'].replace('{/branch}', ''))
add_auth(githubreq)
result = json.loads(urllib.request.urlopen(githubreq).read().decode())
## Try tags, too, since that's what releases use
if not has_branch(result, default_revision):
githubreq = urllib.request.Request(repository['tags_url'].replace('{/tag}', ''))
add_auth(githubreq)
result.extend (json.loads(urllib.request.urlopen(githubreq).read().decode()))
repo_path = "device/%s/%s" % (manufacturer, device)
adding = {'repository':repo_name,'target_path':repo_path}
fallback_branch = None
if not has_branch(result, default_revision):
if os.getenv('ROOMSERVICE_BRANCHES'):
fallbacks = list(filter(bool, os.getenv('ROOMSERVICE_BRANCHES').split(' ')))
for fallback in fallbacks:
if has_branch(result, fallback):
print("Using fallback branch: %s" % fallback)
fallback_branch = fallback
break
if not fallback_branch:
print("Default revision %s not found in %s. Bailing." % (default_revision, repo_name))
print("Branches found:")
for branch in [branch['name'] for branch in result]:
print(branch)
print("Use the ROOMSERVICE_BRANCHES environment variable to specify a list of fallback branches.")
sys.exit()
add_to_manifest([adding], fallback_branch)
print("Syncing repository to retrieve project.")
os.system('repo sync --force-sync %s' % repo_path)
print("Repository synced!")
fetch_dependencies(repo_path, fallback_branch)
print("Done")
sys.exit()
print("Repository for %s not found in the PixelBlaster-OS Github repository list. If this is in error, you may need to manually add it to your local_manifests/roomservice.xml." % device)
| []
| []
| [
"ROOMSERVICE_BRANCHES"
]
| [] | ["ROOMSERVICE_BRANCHES"] | python | 1 | 0 | |
node/node.go | package node
import (
"context"
"encoding/json"
"fmt"
"os"
"reflect"
"runtime"
"sync"
"time"
ps "github.com/cskr/pubsub"
"github.com/ipfs/go-bitswap"
bsnet "github.com/ipfs/go-bitswap/network"
bserv "github.com/ipfs/go-blockservice"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-graphsync"
"github.com/ipfs/go-graphsync/ipldbridge"
gsnet "github.com/ipfs/go-graphsync/network"
gsstoreutil "github.com/ipfs/go-graphsync/storeutil"
"github.com/ipfs/go-hamt-ipld"
bstore "github.com/ipfs/go-ipfs-blockstore"
"github.com/ipfs/go-ipfs-exchange-interface"
"github.com/ipfs/go-ipfs-exchange-offline"
offroute "github.com/ipfs/go-ipfs-routing/offline"
logging "github.com/ipfs/go-log"
"github.com/ipfs/go-merkledag"
"github.com/libp2p/go-libp2p"
autonatsvc "github.com/libp2p/go-libp2p-autonat-svc"
circuit "github.com/libp2p/go-libp2p-circuit"
"github.com/libp2p/go-libp2p-core/host"
p2pmetrics "github.com/libp2p/go-libp2p-core/metrics"
"github.com/libp2p/go-libp2p-core/routing"
"github.com/libp2p/go-libp2p-kad-dht"
"github.com/libp2p/go-libp2p-kad-dht/opts"
libp2pps "github.com/libp2p/go-libp2p-pubsub"
rhost "github.com/libp2p/go-libp2p/p2p/host/routed"
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
ma "github.com/multiformats/go-multiaddr"
"github.com/pkg/errors"
"github.com/filecoin-project/go-filecoin/address"
"github.com/filecoin-project/go-filecoin/chain"
"github.com/filecoin-project/go-filecoin/clock"
"github.com/filecoin-project/go-filecoin/config"
"github.com/filecoin-project/go-filecoin/consensus"
"github.com/filecoin-project/go-filecoin/core"
"github.com/filecoin-project/go-filecoin/flags"
"github.com/filecoin-project/go-filecoin/metrics"
"github.com/filecoin-project/go-filecoin/mining"
"github.com/filecoin-project/go-filecoin/net"
"github.com/filecoin-project/go-filecoin/net/pubsub"
"github.com/filecoin-project/go-filecoin/paths"
"github.com/filecoin-project/go-filecoin/plumbing"
"github.com/filecoin-project/go-filecoin/plumbing/cfg"
"github.com/filecoin-project/go-filecoin/plumbing/cst"
"github.com/filecoin-project/go-filecoin/plumbing/dag"
"github.com/filecoin-project/go-filecoin/plumbing/msg"
"github.com/filecoin-project/go-filecoin/plumbing/strgdls"
"github.com/filecoin-project/go-filecoin/porcelain"
"github.com/filecoin-project/go-filecoin/proofs/sectorbuilder"
"github.com/filecoin-project/go-filecoin/proofs/verification"
"github.com/filecoin-project/go-filecoin/protocol/block"
"github.com/filecoin-project/go-filecoin/protocol/hello"
"github.com/filecoin-project/go-filecoin/protocol/retrieval"
"github.com/filecoin-project/go-filecoin/protocol/storage"
"github.com/filecoin-project/go-filecoin/repo"
"github.com/filecoin-project/go-filecoin/sampling"
"github.com/filecoin-project/go-filecoin/state"
"github.com/filecoin-project/go-filecoin/types"
"github.com/filecoin-project/go-filecoin/util/moresync"
vmerr "github.com/filecoin-project/go-filecoin/vm/errors"
"github.com/filecoin-project/go-filecoin/wallet"
)
var log = logging.Logger("node") // nolint: deadcode
var (
// ErrNoMinerAddress is returned when the node is not configured to have any miner addresses.
ErrNoMinerAddress = errors.New("no miner addresses configured")
)
type pubSubHandler func(ctx context.Context, msg pubsub.Message) error
type nodeChainReader interface {
GenesisCid() cid.Cid
GetHead() types.TipSetKey
GetTipSet(types.TipSetKey) (types.TipSet, error)
GetTipSetState(ctx context.Context, tsKey types.TipSetKey) (state.Tree, error)
HeadEvents() *ps.PubSub
Load(context.Context) error
Stop()
}
type nodeChainSyncer interface {
HandleNewTipSet(ctx context.Context, ci *types.ChainInfo, trusted bool) error
}
// Node represents a full Filecoin node.
type Node struct {
host host.Host
PeerHost host.Host
Consensus consensus.Protocol
ChainReader nodeChainReader
MessageStore *chain.MessageStore
Syncer nodeChainSyncer
PowerTable consensus.PowerTableView
BlockMiningAPI *block.MiningAPI
PorcelainAPI *porcelain.API
RetrievalAPI *retrieval.API
StorageAPI *storage.API
// HeavyTipSetCh is a subscription to the heaviest tipset topic on the chain.
// https://github.com/filecoin-project/go-filecoin/issues/2309
HeaviestTipSetCh chan interface{}
// cancelChainSync cancels the context for chain sync subscriptions and handlers.
cancelChainSync context.CancelFunc
// Incoming messages for block mining.
Inbox *core.Inbox
// Messages sent and not yet mined.
Outbox *core.Outbox
Wallet *wallet.Wallet
// Mining stuff.
AddNewlyMinedBlock newBlockFunc
// cancelMining cancels the context for block production and sector commitments.
cancelMining context.CancelFunc
MiningWorker mining.Worker
MiningScheduler mining.Scheduler
mining struct {
sync.Mutex
isMining bool
}
miningDoneWg *sync.WaitGroup
// Storage Market Interfaces
StorageMiner *storage.Miner
// Retrieval Interfaces
RetrievalMiner *retrieval.Miner
// Network Fields
BlockSub pubsub.Subscription
MessageSub pubsub.Subscription
HelloSvc *hello.Handler
Bootstrapper *net.Bootstrapper
// Data Storage Fields
// Repo is the repo this node was created with
// it contains all persistent artifacts of the filecoin node
Repo repo.Repo
// SectorBuilder is used by the miner to fill and seal sectors.
sectorBuilder sectorbuilder.SectorBuilder
// PeerTracker maintains a list of peers good for fetching.
PeerTracker *net.PeerTracker
// Fetcher is the interface for fetching data from nodes.
Fetcher net.Fetcher
// Exchange is the interface for fetching data from other nodes.
Exchange exchange.Interface
// Blockstore is the un-networked blocks interface
Blockstore bstore.Blockstore
// Blockservice is a higher level interface for fetching data
blockservice bserv.BlockService
// CborStore is a temporary interface for interacting with IPLD objects.
cborStore *hamt.CborIpldStore
// OfflineMode, when true, disables libp2p
OfflineMode bool
// Router is a router from IPFS
Router routing.Routing
}
// Config is a helper to aid in the construction of a filecoin node.
type Config struct {
BlockTime time.Duration
Libp2pOpts []libp2p.Option
OfflineMode bool
Verifier verification.Verifier
Rewarder consensus.BlockRewarder
Repo repo.Repo
IsRelay bool
}
// ConfigOpt is a configuration option for a filecoin node.
type ConfigOpt func(*Config) error
// OfflineMode enables or disables offline mode.
func OfflineMode(offlineMode bool) ConfigOpt {
return func(c *Config) error {
c.OfflineMode = offlineMode
return nil
}
}
// IsRelay configures node to act as a libp2p relay.
func IsRelay() ConfigOpt {
return func(c *Config) error {
c.IsRelay = true
return nil
}
}
// BlockTime sets the blockTime.
func BlockTime(blockTime time.Duration) ConfigOpt {
return func(c *Config) error {
c.BlockTime = blockTime
return nil
}
}
// Libp2pOptions returns a node config option that sets up the libp2p node
func Libp2pOptions(opts ...libp2p.Option) ConfigOpt {
return func(nc *Config) error {
// Quietly having your options overridden leads to hair loss
if len(nc.Libp2pOpts) > 0 {
panic("Libp2pOptions can only be called once")
}
nc.Libp2pOpts = opts
return nil
}
}
// VerifierConfigOption returns a function that sets the verifier to use in the node consensus
func VerifierConfigOption(verifier verification.Verifier) ConfigOpt {
return func(c *Config) error {
c.Verifier = verifier
return nil
}
}
// RewarderConfigOption returns a function that sets the rewarder to use in the node consensus
func RewarderConfigOption(rewarder consensus.BlockRewarder) ConfigOpt {
return func(c *Config) error {
c.Rewarder = rewarder
return nil
}
}
// New creates a new node.
func New(ctx context.Context, opts ...ConfigOpt) (*Node, error) {
n := &Config{}
for _, o := range opts {
if err := o(n); err != nil {
return nil, err
}
}
return n.Build(ctx)
}
type blankValidator struct{}
func (blankValidator) Validate(_ string, _ []byte) error { return nil }
func (blankValidator) Select(_ string, _ [][]byte) (int, error) { return 0, nil }
// readGenesisCid is a helper function that queries the provided datastore for
// an entry with the genesisKey cid, returning if found.
func readGenesisCid(ds datastore.Datastore) (cid.Cid, error) {
bb, err := ds.Get(chain.GenesisKey)
if err != nil {
return cid.Undef, errors.Wrap(err, "failed to read genesisKey")
}
var c cid.Cid
err = json.Unmarshal(bb, &c)
if err != nil {
return cid.Undef, errors.Wrap(err, "failed to cast genesisCid")
}
return c, nil
}
// buildHost determines if we are publically dialable. If so use public
// Address, if not configure node to announce relay address.
func (nc *Config) buildHost(ctx context.Context, makeDHT func(host host.Host) (routing.Routing, error)) (host.Host, error) {
// Node must build a host acting as a libp2p relay. Additionally it
// runs the autoNAT service which allows other nodes to check for their
// own dialability by having this node attempt to dial them.
makeDHTRightType := func(h host.Host) (routing.PeerRouting, error) {
return makeDHT(h)
}
if nc.IsRelay {
cfg := nc.Repo.Config()
publicAddr, err := ma.NewMultiaddr(cfg.Swarm.PublicRelayAddress)
if err != nil {
return nil, err
}
publicAddrFactory := func(lc *libp2p.Config) error {
lc.AddrsFactory = func(addrs []ma.Multiaddr) []ma.Multiaddr {
if cfg.Swarm.PublicRelayAddress == "" {
return addrs
}
return append(addrs, publicAddr)
}
return nil
}
relayHost, err := libp2p.New(
ctx,
libp2p.EnableRelay(circuit.OptHop),
libp2p.EnableAutoRelay(),
libp2p.Routing(makeDHTRightType),
publicAddrFactory,
libp2p.ChainOptions(nc.Libp2pOpts...),
)
if err != nil {
return nil, err
}
// Set up autoNATService as a streamhandler on the host.
_, err = autonatsvc.NewAutoNATService(ctx, relayHost)
if err != nil {
return nil, err
}
return relayHost, nil
}
return libp2p.New(
ctx,
libp2p.EnableAutoRelay(),
libp2p.Routing(makeDHTRightType),
libp2p.ChainOptions(nc.Libp2pOpts...),
)
}
// Build instantiates a filecoin Node from the settings specified in the config.
func (nc *Config) Build(ctx context.Context) (*Node, error) {
if nc.Repo == nil {
nc.Repo = repo.NewInMemoryRepo()
}
bs := bstore.NewBlockstore(nc.Repo.Datastore())
validator := blankValidator{}
var peerHost host.Host
var router routing.Routing
bandwidthTracker := p2pmetrics.NewBandwidthCounter()
nc.Libp2pOpts = append(nc.Libp2pOpts, libp2p.BandwidthReporter(bandwidthTracker))
if !nc.OfflineMode {
makeDHT := func(h host.Host) (routing.Routing, error) {
r, err := dht.New(
ctx,
h,
dhtopts.Datastore(nc.Repo.Datastore()),
dhtopts.NamespacedValidator("v", validator),
dhtopts.Protocols(net.FilecoinDHT),
)
if err != nil {
return nil, errors.Wrap(err, "failed to setup routing")
}
router = r
return r, err
}
var err error
peerHost, err = nc.buildHost(ctx, makeDHT)
if err != nil {
return nil, err
}
} else {
router = offroute.NewOfflineRouter(nc.Repo.Datastore(), validator)
peerHost = rhost.Wrap(noopLibP2PHost{}, router)
}
// set up pinger
pingService := ping.NewPingService(peerHost)
// setup block validation
// TODO when #2961 is resolved do the needful here.
blkValid := consensus.NewDefaultBlockValidator(nc.BlockTime, clock.NewSystemClock())
// set up peer tracking
peerTracker := net.NewPeerTracker()
// set up bitswap
nwork := bsnet.NewFromIpfsHost(peerHost, router)
//nwork := bsnet.NewFromIpfsHost(innerHost, router)
bswap := bitswap.New(ctx, nwork, bs)
bservice := bserv.New(bs, bswap)
graphsyncNetwork := gsnet.NewFromLibp2pHost(peerHost)
bridge := ipldbridge.NewIPLDBridge()
loader := gsstoreutil.LoaderForBlockstore(bs)
storer := gsstoreutil.StorerForBlockstore(bs)
gsync := graphsync.New(ctx, graphsyncNetwork, bridge, loader, storer)
fetcher := net.NewGraphSyncFetcher(ctx, gsync, bs, blkValid, peerTracker)
ipldCborStore := hamt.CborIpldStore{Blocks: bserv.New(bs, offline.Exchange(bs))}
genCid, err := readGenesisCid(nc.Repo.Datastore())
if err != nil {
return nil, err
}
// set up chain and message stores
chainStore := chain.NewStore(nc.Repo.ChainDatastore(), &ipldCborStore, &state.TreeStateLoader{}, genCid)
messageStore := chain.NewMessageStore(&ipldCborStore)
chainState := cst.NewChainStateProvider(chainStore, messageStore, &ipldCborStore)
powerTable := &consensus.MarketView{}
// set up processor
var processor consensus.Processor
if nc.Rewarder == nil {
processor = consensus.NewDefaultProcessor()
} else {
processor = consensus.NewConfiguredProcessor(consensus.NewDefaultMessageValidator(), nc.Rewarder)
}
// set up consensus
var nodeConsensus consensus.Protocol
if nc.Verifier == nil {
nodeConsensus = consensus.NewExpected(&ipldCborStore, bs, processor, blkValid, powerTable, genCid, &verification.RustVerifier{}, nc.BlockTime)
} else {
nodeConsensus = consensus.NewExpected(&ipldCborStore, bs, processor, blkValid, powerTable, genCid, nc.Verifier, nc.BlockTime)
}
// Set up libp2p network
// TODO PubSub requires strict message signing, disabled for now
// reference issue: #3124
fsub, err := libp2pps.NewFloodSub(ctx, peerHost, libp2pps.WithMessageSigning(false))
if err != nil {
return nil, errors.Wrap(err, "failed to set up network")
}
backend, err := wallet.NewDSBackend(nc.Repo.WalletDatastore())
if err != nil {
return nil, errors.Wrap(err, "failed to set up wallet backend")
}
fcWallet := wallet.New(backend)
// only the syncer gets the storage which is online connected
chainSyncer := chain.NewSyncer(nodeConsensus, chainStore, messageStore, fetcher)
msgPool := core.NewMessagePool(nc.Repo.Config().Mpool, consensus.NewIngestionValidator(chainState, nc.Repo.Config().Mpool))
inbox := core.NewInbox(msgPool, core.InboxMaxAgeTipsets, chainStore, messageStore)
msgQueue := core.NewMessageQueue()
outboxPolicy := core.NewMessageQueuePolicy(chainStore, messageStore, core.OutboxMaxAgeRounds)
msgPublisher := newDefaultMessagePublisher(pubsub.NewPublisher(fsub), net.MessageTopic, msgPool)
outbox := core.NewOutbox(fcWallet, consensus.NewOutboundMessageValidator(), msgQueue, msgPublisher, outboxPolicy, chainStore, chainState)
nd := &Node{
blockservice: bservice,
Blockstore: bs,
cborStore: &ipldCborStore,
Consensus: nodeConsensus,
ChainReader: chainStore,
MessageStore: messageStore,
Syncer: chainSyncer,
PowerTable: powerTable,
PeerTracker: peerTracker,
Fetcher: fetcher,
Exchange: bswap,
host: peerHost,
Inbox: inbox,
OfflineMode: nc.OfflineMode,
Outbox: outbox,
PeerHost: peerHost,
Repo: nc.Repo,
Wallet: fcWallet,
Router: router,
}
nd.PorcelainAPI = porcelain.New(plumbing.New(&plumbing.APIDeps{
Bitswap: bswap,
Chain: chainState,
Config: cfg.NewConfig(nc.Repo),
DAG: dag.NewDAG(merkledag.NewDAGService(bservice)),
Deals: strgdls.New(nc.Repo.DealsDatastore()),
Expected: nodeConsensus,
MsgPool: msgPool,
MsgPreviewer: msg.NewPreviewer(chainStore, &ipldCborStore, bs),
MsgQueryer: msg.NewQueryer(chainStore, &ipldCborStore, bs),
MsgWaiter: msg.NewWaiter(chainStore, messageStore, bs, &ipldCborStore),
Network: net.New(peerHost, pubsub.NewPublisher(fsub), pubsub.NewSubscriber(fsub), net.NewRouter(router), bandwidthTracker, net.NewPinger(peerHost, pingService)),
Outbox: outbox,
SectorBuilder: nd.SectorBuilder,
Wallet: fcWallet,
}))
// Bootstrapping network peers.
periodStr := nd.Repo.Config().Bootstrap.Period
period, err := time.ParseDuration(periodStr)
if err != nil {
return nil, errors.Wrapf(err, "couldn't parse bootstrap period %s", periodStr)
}
// Bootstrapper maintains connections to some subset of addresses
ba := nd.Repo.Config().Bootstrap.Addresses
bpi, err := net.PeerAddrsToAddrInfo(ba)
if err != nil {
return nil, errors.Wrapf(err, "couldn't parse bootstrap addresses [%s]", ba)
}
minPeerThreshold := nd.Repo.Config().Bootstrap.MinPeerThreshold
nd.Bootstrapper = net.NewBootstrapper(bpi, nd.Host(), nd.Host().Network(), nd.Router, minPeerThreshold, period)
return nd, nil
}
// Start boots up the node.
func (node *Node) Start(ctx context.Context) error {
if err := metrics.RegisterPrometheusEndpoint(node.Repo.Config().Observability.Metrics); err != nil {
return errors.Wrap(err, "failed to setup metrics")
}
if err := metrics.RegisterJaeger(node.host.ID().Pretty(), node.Repo.Config().Observability.Tracing); err != nil {
return errors.Wrap(err, "failed to setup tracing")
}
var err error
if err = node.ChainReader.Load(ctx); err != nil {
return err
}
// Only set these up if there is a miner configured.
if _, err := node.MiningAddress(); err == nil {
if err := node.setupMining(ctx); err != nil {
log.Errorf("setup mining failed: %v", err)
return err
}
}
// TODO: defer establishing these API endpoints until the chain is synced when the commands
// can handle their absence: https://github.com/filecoin-project/go-filecoin/issues/3137
err = node.setupProtocols()
if err != nil {
return errors.Wrap(err, "failed to set up protocols:")
}
node.RetrievalMiner = retrieval.NewMiner(node)
var syncCtx context.Context
syncCtx, node.cancelChainSync = context.WithCancel(context.Background())
// Wire up propagation of new chain heads from the chain store to other components.
head, err := node.PorcelainAPI.ChainHead()
if err != nil {
return errors.Wrap(err, "failed to get chain head")
}
go node.handleNewChainHeads(syncCtx, head)
if !node.OfflineMode {
// Start bootstrapper.
node.Bootstrapper.Start(context.Background())
// Register peer tracker disconnect function with network.
net.TrackerRegisterDisconnect(node.host.Network(), node.PeerTracker)
// Establish a barrier to be released when the initial chain sync has completed.
// Services which depend on a more-or-less synced chain can wait for this before starting up.
chainSynced := moresync.NewLatch(1)
// Start up 'hello' handshake service
helloCallback := func(ci *types.ChainInfo) {
node.PeerTracker.Track(ci)
// TODO Implement principled trusting of ChainInfo's
// to address in #2674
trusted := true
err := node.Syncer.HandleNewTipSet(context.Background(), ci, trusted)
if err != nil {
log.Infof("error handling blocks: %s", ci.Head.String())
return
}
// For now, consider the initial bootstrap done after the syncer has (synchronously)
// processed the chain up to the head reported by the first peer to respond to hello.
// This is an interim sequence until a secure network bootstrap is implemented:
// https://github.com/filecoin-project/go-filecoin/issues/2674.
// For now, we trust that the first node to respond will be a configured bootstrap node
// and that we trust that node to inform us of the chain head.
// TODO: when the syncer rejects too-far-ahead blocks received over pubsub, don't consider
// sync done until it's caught up enough that it will accept blocks from pubsub.
// This might require additional rounds of hello.
// See https://github.com/filecoin-project/go-filecoin/issues/1105
chainSynced.Done()
}
node.HelloSvc = hello.New(node.Host(), node.ChainReader.GenesisCid(), helloCallback, node.PorcelainAPI.ChainHead, node.Repo.Config().Net, flags.Commit)
// Subscribe to block pubsub after the initial sync completes.
go func() {
chainSynced.Wait()
if syncCtx.Err() == nil {
// Subscribe to block pubsub topic to learn about new chain heads.
node.BlockSub, err = node.pubsubscribe(syncCtx, net.BlockTopic, node.processBlock)
if err != nil {
log.Error(err)
}
}
}()
// Subscribe to the message pubsub topic to learn about messages to mine into blocks.
// TODO: defer this subscription until after mining (block production) is started:
// https://github.com/filecoin-project/go-filecoin/issues/2145.
// This is blocked by https://github.com/filecoin-project/go-filecoin/issues/2959, which
// is necessary for message_propagate_test to start mining before testing this behaviour.
node.MessageSub, err = node.pubsubscribe(syncCtx, net.MessageTopic, node.processMessage)
if err != nil {
return err
}
// Start heartbeats.
if err := node.setupHeartbeatServices(ctx); err != nil {
return errors.Wrap(err, "failed to start heartbeat services")
}
}
return nil
}
// Subscribes a handler function to a pubsub topic.
func (node *Node) pubsubscribe(ctx context.Context, topic string, handler pubSubHandler) (pubsub.Subscription, error) {
sub, err := node.PorcelainAPI.PubSubSubscribe(topic)
if err != nil {
return nil, errors.Wrapf(err, "failed to subscribe to %s", topic)
}
go node.handleSubscription(ctx, sub, handler)
return sub, nil
}
func (node *Node) setupHeartbeatServices(ctx context.Context) error {
mag := func() address.Address {
addr, err := node.MiningAddress()
// the only error MiningAddress() returns is ErrNoMinerAddress.
// if there is no configured miner address, simply send a zero
// address across the wire.
if err != nil {
return address.Undef
}
return addr
}
// start the primary heartbeat service
if len(node.Repo.Config().Heartbeat.BeatTarget) > 0 {
hbs := metrics.NewHeartbeatService(node.Host(), node.ChainReader.GenesisCid(), node.Repo.Config().Heartbeat, node.PorcelainAPI.ChainHead, metrics.WithMinerAddressGetter(mag))
go hbs.Start(ctx)
}
// check if we want to connect to an alert service. An alerting service is a heartbeat
// service that can trigger alerts based on the contents of heatbeats.
if alertTarget := os.Getenv("FIL_HEARTBEAT_ALERTS"); len(alertTarget) > 0 {
ahbs := metrics.NewHeartbeatService(node.Host(), node.ChainReader.GenesisCid(), &config.HeartbeatConfig{
BeatTarget: alertTarget,
BeatPeriod: "10s",
ReconnectPeriod: "10s",
Nickname: node.Repo.Config().Heartbeat.Nickname,
}, node.PorcelainAPI.ChainHead, metrics.WithMinerAddressGetter(mag))
go ahbs.Start(ctx)
}
return nil
}
func (node *Node) setupMining(ctx context.Context) error {
// initialize a sector builder
sectorBuilder, err := initSectorBuilderForNode(ctx, node)
if err != nil {
return errors.Wrap(err, "failed to initialize sector builder")
}
node.sectorBuilder = sectorBuilder
return nil
}
func (node *Node) setIsMining(isMining bool) {
node.mining.Lock()
defer node.mining.Unlock()
node.mining.isMining = isMining
}
func (node *Node) handleNewMiningOutput(ctx context.Context, miningOutCh <-chan mining.Output) {
defer func() {
node.miningDoneWg.Done()
}()
for {
select {
case <-ctx.Done():
return
case output, ok := <-miningOutCh:
if !ok {
return
}
if output.Err != nil {
log.Errorf("stopping mining. error: %s", output.Err.Error())
node.StopMining(context.Background())
} else {
node.miningDoneWg.Add(1)
go func() {
if node.IsMining() {
node.AddNewlyMinedBlock(ctx, output.NewBlock)
}
node.miningDoneWg.Done()
}()
}
}
}
}
func (node *Node) handleNewChainHeads(ctx context.Context, prevHead types.TipSet) {
node.HeaviestTipSetCh = node.ChainReader.HeadEvents().Sub(chain.NewHeadTopic)
for {
select {
case ts, ok := <-node.HeaviestTipSetCh:
if !ok {
return
}
newHead, ok := ts.(types.TipSet)
if !ok {
log.Error("non-tipset published on heaviest tipset channel")
continue
}
if !newHead.Defined() {
log.Error("tipset of size 0 published on heaviest tipset channel. ignoring and waiting for a new heaviest tipset.")
continue
}
if err := node.Outbox.HandleNewHead(ctx, prevHead, newHead); err != nil {
log.Error("updating outbound message queue for new tipset", err)
}
if err := node.Inbox.HandleNewHead(ctx, prevHead, newHead); err != nil {
log.Error("updating message pool for new tipset", err)
}
prevHead = newHead
if node.StorageMiner != nil {
err := node.StorageMiner.OnNewHeaviestTipSet(newHead)
if err != nil {
log.Error(err)
}
}
case <-ctx.Done():
return
}
}
}
func (node *Node) cancelSubscriptions() {
if node.cancelChainSync != nil {
node.cancelChainSync()
}
if node.BlockSub != nil {
node.BlockSub.Cancel()
node.BlockSub = nil
}
if node.MessageSub != nil {
node.MessageSub.Cancel()
node.MessageSub = nil
}
}
// Stop initiates the shutdown of the node.
func (node *Node) Stop(ctx context.Context) {
node.ChainReader.HeadEvents().Unsub(node.HeaviestTipSetCh)
node.StopMining(ctx)
node.cancelSubscriptions()
node.ChainReader.Stop()
if node.SectorBuilder() != nil {
if err := node.SectorBuilder().Close(); err != nil {
fmt.Printf("error closing sector builder: %s\n", err)
}
node.sectorBuilder = nil
}
if err := node.Host().Close(); err != nil {
fmt.Printf("error closing host: %s\n", err)
}
if err := node.Repo.Close(); err != nil {
fmt.Printf("error closing repo: %s\n", err)
}
node.Bootstrapper.Stop()
fmt.Println("stopping filecoin :(")
}
type newBlockFunc func(context.Context, *types.Block)
func (node *Node) addNewlyMinedBlock(ctx context.Context, b *types.Block) {
log.Debugf("Got a newly mined block from the mining worker: %s", b)
if err := node.AddNewBlock(ctx, b); err != nil {
log.Warningf("error adding new mined block: %s. err: %s", b.Cid().String(), err.Error())
}
}
// MiningAddress returns the address of the mining actor mining on behalf of
// the node.
func (node *Node) MiningAddress() (address.Address, error) {
addr := node.Repo.Config().Mining.MinerAddress
if addr.Empty() {
return address.Undef, ErrNoMinerAddress
}
return addr, nil
}
// MiningTimes returns the configured time it takes to mine a block, and also
// the mining delay duration, which is currently a fixed fraction of block time.
// Note this is mocked behavior, in production this time is determined by how
// long it takes to generate PoSTs.
func (node *Node) MiningTimes() (time.Duration, time.Duration) {
blockTime := node.PorcelainAPI.BlockTime()
mineDelay := blockTime / mining.MineDelayConversionFactor
return blockTime, mineDelay
}
// StartMining causes the node to start feeding blocks to the mining worker and initializes
// the SectorBuilder for the mining address.
func (node *Node) StartMining(ctx context.Context) error {
if node.IsMining() {
return errors.New("Node is already mining")
}
minerAddr, err := node.MiningAddress()
if err != nil {
return errors.Wrap(err, "failed to get mining address")
}
_, err = node.PorcelainAPI.ActorGet(ctx, minerAddr)
if err != nil {
return errors.Wrap(err, "failed to get miner actor")
}
// ensure we have a sector builder
if node.SectorBuilder() == nil {
if err := node.setupMining(ctx); err != nil {
return err
}
}
minerOwnerAddr, err := node.PorcelainAPI.MinerGetOwnerAddress(ctx, minerAddr)
if err != nil {
return errors.Wrapf(err, "failed to get mining owner address for miner %s", minerAddr)
}
_, mineDelay := node.MiningTimes()
if node.MiningWorker == nil {
if node.MiningWorker, err = node.CreateMiningWorker(ctx); err != nil {
return err
}
}
if node.MiningScheduler == nil {
node.MiningScheduler = mining.NewScheduler(node.MiningWorker, mineDelay, node.PorcelainAPI.ChainHead)
} else if node.MiningScheduler.IsStarted() {
return fmt.Errorf("miner scheduler already started")
}
var miningCtx context.Context
miningCtx, node.cancelMining = context.WithCancel(context.Background())
outCh, doneWg := node.MiningScheduler.Start(miningCtx)
node.miningDoneWg = doneWg
node.AddNewlyMinedBlock = node.addNewlyMinedBlock
node.miningDoneWg.Add(1)
go node.handleNewMiningOutput(miningCtx, outCh)
// initialize a storage miner
storageMiner, err := initStorageMinerForNode(ctx, node)
if err != nil {
return errors.Wrap(err, "failed to initialize storage miner")
}
node.StorageMiner = storageMiner
// loop, turning sealing-results into commitSector messages to be included
// in the chain
go func() {
for {
select {
case result := <-node.SectorBuilder().SectorSealResults():
if result.SealingErr != nil {
log.Errorf("failed to seal sector with id %d: %s", result.SectorID, result.SealingErr.Error())
} else if result.SealingResult != nil {
// TODO: determine these algorithmically by simulating call and querying historical prices
gasPrice := types.NewGasPrice(1)
gasUnits := types.NewGasUnits(300)
val := result.SealingResult
// This call can fail due to, e.g. nonce collisions. Our miners existence depends on this.
// We should deal with this, but MessageSendWithRetry is problematic.
msgCid, err := node.PorcelainAPI.MessageSend(
miningCtx,
minerOwnerAddr,
minerAddr,
types.ZeroAttoFIL,
gasPrice,
gasUnits,
"commitSector",
val.SectorID,
val.CommD[:],
val.CommR[:],
val.CommRStar[:],
val.Proof[:],
)
if err != nil {
log.Errorf("failed to send commitSector message from %s to %s for sector with id %d: %s", minerOwnerAddr, minerAddr, val.SectorID, err)
continue
}
node.StorageMiner.OnCommitmentSent(val, msgCid, nil)
}
case <-miningCtx.Done():
return
}
}
}()
// schedules sealing of staged piece-data
if node.Repo.Config().Mining.AutoSealIntervalSeconds > 0 {
go func() {
for {
select {
case <-miningCtx.Done():
return
case <-time.After(time.Duration(node.Repo.Config().Mining.AutoSealIntervalSeconds) * time.Second):
log.Info("auto-seal has been triggered")
if err := node.SectorBuilder().SealAllStagedSectors(miningCtx); err != nil {
log.Errorf("scheduler received error from node.SectorBuilder.SealAllStagedSectors (%s) - exiting", err.Error())
return
}
}
}
}()
} else {
log.Debug("auto-seal is disabled")
}
node.setIsMining(true)
return nil
}
func initSectorBuilderForNode(ctx context.Context, node *Node) (sectorbuilder.SectorBuilder, error) {
minerAddr, err := node.MiningAddress()
if err != nil {
return nil, errors.Wrap(err, "failed to get node's mining address")
}
sectorSize, err := node.PorcelainAPI.MinerGetSectorSize(ctx, minerAddr)
if err != nil {
return nil, errors.Wrapf(err, "failed to get sector size for miner w/address %s", minerAddr.String())
}
lastUsedSectorID, err := node.PorcelainAPI.MinerGetLastCommittedSectorID(ctx, minerAddr)
if err != nil {
return nil, errors.Wrapf(err, "failed to get last used sector id for miner w/address %s", minerAddr.String())
}
// TODO: Currently, weconfigure the RustSectorBuilder to store its
// metadata in the staging directory, it should be in its own directory.
//
// Tracked here: https://github.com/filecoin-project/rust-fil-proofs/issues/402
repoPath, err := node.Repo.Path()
if err != nil {
return nil, err
}
sectorDir, err := paths.GetSectorPath(node.Repo.Config().SectorBase.RootDir, repoPath)
if err != nil {
return nil, err
}
stagingDir, err := paths.StagingDir(sectorDir)
if err != nil {
return nil, err
}
sealedDir, err := paths.SealedDir(sectorDir)
if err != nil {
return nil, err
}
cfg := sectorbuilder.RustSectorBuilderConfig{
BlockService: node.blockservice,
LastUsedSectorID: lastUsedSectorID,
MetadataDir: stagingDir,
MinerAddr: minerAddr,
SealedSectorDir: sealedDir,
StagedSectorDir: stagingDir,
SectorClass: types.NewSectorClass(sectorSize),
}
sb, err := sectorbuilder.NewRustSectorBuilder(cfg)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("failed to initialize sector builder for miner %s", minerAddr.String()))
}
return sb, nil
}
func initStorageMinerForNode(ctx context.Context, node *Node) (*storage.Miner, error) {
minerAddr, err := node.MiningAddress()
if err != nil {
return nil, errors.Wrap(err, "failed to get node's mining address")
}
ownerAddress, err := node.PorcelainAPI.MinerGetOwnerAddress(ctx, minerAddr)
if err != nil {
return nil, errors.Wrap(err, "no mining owner available, skipping storage miner setup")
}
workerAddress := ownerAddress
sectorSize, err := node.PorcelainAPI.MinerGetSectorSize(ctx, minerAddr)
if err != nil {
return nil, errors.Wrap(err, "failed to fetch miner's sector size")
}
prover := storage.NewProver(minerAddr, workerAddress, sectorSize, node.PorcelainAPI, node.PorcelainAPI)
miner, err := storage.NewMiner(minerAddr, ownerAddress, workerAddress, prover, sectorSize, node, node.Repo.DealsDatastore(), node.PorcelainAPI)
if err != nil {
return nil, errors.Wrap(err, "failed to instantiate storage miner")
}
return miner, nil
}
// StopMining stops mining on new blocks.
func (node *Node) StopMining(ctx context.Context) {
node.setIsMining(false)
if node.cancelMining != nil {
node.cancelMining()
}
if node.miningDoneWg != nil {
node.miningDoneWg.Wait()
}
// TODO: stop node.StorageMiner
}
func (node *Node) handleSubscription(ctx context.Context, sub pubsub.Subscription, handler pubSubHandler) {
for {
received, err := sub.Next(ctx)
if err != nil {
if ctx.Err() != context.Canceled {
log.Errorf("error reading message from topic %s: %s", sub.Topic(), err)
}
return
}
if err := handler(ctx, received); err != nil {
handlerName := runtime.FuncForPC(reflect.ValueOf(handler).Pointer()).Name()
if vmerr.ShouldRevert(err) {
log.Infof("error in handler %s for topic %s: %s", handlerName, sub.Topic(), err)
} else if err != context.Canceled {
log.Errorf("error in handler %s for topic %s: %s", handlerName, sub.Topic(), err)
}
}
}
}
// setupProtocols creates protocol clients and miners, then sets the node's APIs
// for each
func (node *Node) setupProtocols() error {
_, mineDelay := node.MiningTimes()
blockMiningAPI := block.New(
node.MiningAddress,
node.AddNewBlock,
node.ChainReader,
node.IsMining,
mineDelay,
node.StartMining,
node.StopMining,
node.CreateMiningWorker)
node.BlockMiningAPI = &blockMiningAPI
// set up retrieval client and api
retapi := retrieval.NewAPI(retrieval.NewClient(node.host, node.PorcelainAPI))
node.RetrievalAPI = &retapi
// set up storage client and api
smc := storage.NewClient(node.host, node.PorcelainAPI)
smcAPI := storage.NewAPI(smc)
node.StorageAPI = &smcAPI
return nil
}
// CreateMiningWorker creates a mining.Worker for the node using the configured
// getStateTree, getWeight, and getAncestors functions for the node
func (node *Node) CreateMiningWorker(ctx context.Context) (mining.Worker, error) {
processor := consensus.NewDefaultProcessor()
minerAddr, err := node.MiningAddress()
if err != nil {
return nil, errors.Wrap(err, "failed to get mining address")
}
minerWorker, err := node.PorcelainAPI.MinerGetWorker(ctx, minerAddr)
if err != nil {
return nil, errors.Wrap(err, "could not get key from miner actor")
}
minerOwnerAddr, err := node.PorcelainAPI.MinerGetOwnerAddress(ctx, minerAddr)
if err != nil {
log.Errorf("could not get owner address of miner actor")
return nil, err
}
return mining.NewDefaultWorker(mining.WorkerParameters{
API: node.PorcelainAPI,
MinerAddr: minerAddr,
MinerOwnerAddr: minerOwnerAddr,
MinerWorker: minerWorker,
WorkerSigner: node.Wallet,
GetStateTree: node.getStateTree,
GetWeight: node.getWeight,
GetAncestors: node.getAncestors,
MessageSource: node.Inbox.Pool(),
MessageStore: node.MessageStore,
Processor: processor,
PowerTable: node.PowerTable,
Blockstore: node.Blockstore}), nil
}
// getStateTree is the default GetStateTree function for the mining worker.
func (node *Node) getStateTree(ctx context.Context, ts types.TipSet) (state.Tree, error) {
return node.ChainReader.GetTipSetState(ctx, ts.Key())
}
// getWeight is the default GetWeight function for the mining worker.
func (node *Node) getWeight(ctx context.Context, ts types.TipSet) (uint64, error) {
parent, err := ts.Parents()
if err != nil {
return uint64(0), err
}
// TODO handle genesis cid more gracefully
if parent.Len() == 0 {
return node.Consensus.Weight(ctx, ts, nil)
}
pSt, err := node.ChainReader.GetTipSetState(ctx, parent)
if err != nil {
return uint64(0), err
}
return node.Consensus.Weight(ctx, ts, pSt)
}
// getAncestors is the default GetAncestors function for the mining worker.
func (node *Node) getAncestors(ctx context.Context, ts types.TipSet, newBlockHeight *types.BlockHeight) ([]types.TipSet, error) {
ancestorHeight := types.NewBlockHeight(consensus.AncestorRoundsNeeded)
return chain.GetRecentAncestors(ctx, ts, node.ChainReader, newBlockHeight, ancestorHeight, sampling.LookbackParameter)
}
// -- Accessors
// Host returns the nodes host.
func (node *Node) Host() host.Host {
return node.host
}
// SectorBuilder returns the nodes sectorBuilder.
func (node *Node) SectorBuilder() sectorbuilder.SectorBuilder {
return node.sectorBuilder
}
// BlockService returns the nodes blockservice.
func (node *Node) BlockService() bserv.BlockService {
return node.blockservice
}
// CborStore returns the nodes cborStore.
func (node *Node) CborStore() *hamt.CborIpldStore {
return node.cborStore
}
// IsMining returns a boolean indicating whether the node is mining blocks.
func (node *Node) IsMining() bool {
node.mining.Lock()
defer node.mining.Unlock()
return node.mining.isMining
}
| [
"\"FIL_HEARTBEAT_ALERTS\""
]
| []
| [
"FIL_HEARTBEAT_ALERTS"
]
| [] | ["FIL_HEARTBEAT_ALERTS"] | go | 1 | 0 | |
cmd/ooniprobe/internal/cli/geoip/geoip.go | package geoip
import (
"context"
"github.com/alecthomas/kingpin"
"github.com/apex/log"
"github.com/ooni/probe-cli/v3/cmd/ooniprobe/internal/cli/root"
"github.com/ooni/probe-cli/v3/cmd/ooniprobe/internal/ooni"
"github.com/ooni/probe-cli/v3/cmd/ooniprobe/internal/output"
)
func init() {
cmd := root.Command("geoip", "Perform a geoip lookup")
cmd.Action(func(_ *kingpin.ParseContext) error {
return dogeoip(defaultconfig)
})
}
type dogeoipconfig struct {
Logger log.Interface
NewProbeCLI func() (ooni.ProbeCLI, error)
SectionTitle func(string)
}
var defaultconfig = dogeoipconfig{
Logger: log.Log,
NewProbeCLI: root.NewProbeCLI,
SectionTitle: output.SectionTitle,
}
func dogeoip(config dogeoipconfig) error {
config.SectionTitle("GeoIP lookup")
probeCLI, err := config.NewProbeCLI()
if err != nil {
return err
}
engine, err := probeCLI.NewProbeEngine(context.Background())
if err != nil {
return err
}
defer engine.Close()
err = engine.MaybeLookupLocation()
if err != nil {
return err
}
config.Logger.WithFields(log.Fields{
"type": "table",
"asn": engine.ProbeASNString(),
"network_name": engine.ProbeNetworkName(),
"country_code": engine.ProbeCC(),
"ip": engine.ProbeIP(),
}).Info("Looked up your location")
return nil
}
| []
| []
| []
| [] | [] | go | null | null | null |
SideBar.py | # coding=utf8
import sublime, sublime_plugin
import os, shutil
import threading, time
import re
import subprocess, platform
from .edit.Edit import Edit
from .hurry.filesize import size as hurry_size
try:
from urllib import unquote as urlunquote
except ImportError:
from urllib.parse import unquote as urlunquote
from .SideBarAPI import SideBarItem, SideBarSelection, SideBarProject
Pref = {}
s = {}
Cache = {}
def cli(command):
info = subprocess.STARTUPINFO()
info.dwFlags = subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = 0
p = subprocess.Popen(
command,
startupinfo=info,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=platform.system() == "Windows" or os.name == "nt",
)
stdout, stderr = p.communicate()
try:
p.kill()
except:
pass
p = {"stderr": stderr, "stdout": stdout, "returncode": p.returncode}
return p
def CACHED_SELECTION(paths=[]):
if Cache.cached:
return Cache.cached
else:
return SideBarSelection(paths)
def escapeCMDWindows(string):
return string.replace("^", "^^")
class Pref:
def load(self):
pass
def plugin_loaded():
global Pref, s
s = sublime.load_settings("Side Bar.sublime-settings")
Pref = Pref()
Pref.load()
s.clear_on_change("reload")
s.add_on_change("reload", lambda: Pref.load())
def Window(window=None):
return window if window else sublime.active_window()
def expandVars(path):
for k, v in list(os.environ.items()):
path = path.replace("%" + k + "%", v).replace("%" + k.lower() + "%", v)
return path
def window_set_status(key, name=""):
for window in sublime.windows():
for view in window.views():
view.set_status("SideBar-" + key, name)
class Object:
pass
class Cache:
pass
Cache = Cache()
Cache.cached = False
class OpenWithListener(sublime_plugin.EventListener):
def on_load_async(self, view):
if view and view.file_name() and not view.settings().get("open_with_edit"):
item = SideBarItem(
os.path.join(
sublime.packages_path(),
"User",
"SideBarEnhancements",
"Open With",
"Side Bar.sublime-menu",
),
False,
)
if item.exists():
settings = sublime.decode_value(item.contentUTF8())
selection = SideBarSelection([view.file_name()])
for item in settings[0]["children"]:
try:
if item[
"open_automatically"
] and selection.hasFilesWithExtension(
item["args"]["extensions"]
):
SideBarFilesOpenWithCommand(Window()).run(
[view.file_name()],
item["args"]["application"],
item["args"]["extensions"],
item["args"]["args"],
)
view.close()
break
except:
pass
class aaaaaSideBarCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
pass
def is_visible(self, paths=[]): # <- WORKS AS AN ONPOPUPSHOWN
Cache.cached = SideBarSelection(paths)
return False
class SideBarNewFileCommand(sublime_plugin.WindowCommand):
def run(self, paths=[], name=""):
import functools
Window().run_command("hide_panel")
Window().show_input_panel(
"File Name:",
name,
functools.partial(self.on_done, paths, False),
None,
None,
)
def on_done(self, paths, relative_to_project, name):
_paths = paths
if relative_to_project or s.get("new_files_relative_to_project_root", False):
_paths = SideBarProject().getDirectories()
if _paths:
_paths = [SideBarItem(_paths[0], False)]
if not _paths:
_paths = SideBarSelection(_paths).getSelectedDirectoriesOrDirnames()
else:
_paths = SideBarSelection(_paths).getSelectedDirectoriesOrDirnames()
if not _paths:
_paths = SideBarProject().getDirectories()
if _paths:
_paths = [SideBarItem(_paths[0], False)]
if not _paths:
Window().new_file()
else:
for item in _paths:
item = SideBarItem(item.join(name), False)
if item.exists():
sublime.error_message(
"Unable to create file, file or folder exists."
)
self.run(paths, name)
return
else:
try:
item.create()
item.edit()
except:
sublime.error_message(
"Unable to create file:\n\n" + item.path()
)
self.run(paths, name)
return
SideBarProject().refresh()
class SideBarNewFile2Command(sublime_plugin.WindowCommand):
def run(self, paths=[], name=""):
import functools
Window().run_command("hide_panel")
Window().show_input_panel(
"File Name:",
name,
functools.partial(SideBarNewFileCommand(Window()).on_done, paths, True),
None,
None,
)
class SideBarNewDirectory2Command(sublime_plugin.WindowCommand):
def run(self, paths=[], name=""):
import functools
Window().run_command("hide_panel")
Window().show_input_panel(
"Folder Name:",
name,
functools.partial(
SideBarNewDirectoryCommand(Window()).on_done, paths, True
),
None,
None,
)
class SideBarNewDirectoryCommand(sublime_plugin.WindowCommand):
def run(self, paths=[], name=""):
import functools
Window().run_command("hide_panel")
Window().show_input_panel(
"Folder Name:",
name,
functools.partial(self.on_done, paths, False),
None,
None,
)
def on_done(self, paths, relative_to_project, name):
_paths = paths
if relative_to_project or s.get("new_folders_relative_to_project_root", False):
_paths = SideBarProject().getDirectories()
if _paths:
_paths = [SideBarItem(_paths[0], True)]
if not _paths:
_paths = SideBarSelection(_paths).getSelectedDirectoriesOrDirnames()
else:
_paths = SideBarSelection(_paths).getSelectedDirectoriesOrDirnames()
for item in _paths:
item = SideBarItem(item.join(name), True)
if item.exists():
sublime.error_message("Unable to create folder, folder or file exists.")
self.run(paths, name)
return
else:
item.create()
if not item.exists():
sublime.error_message("Unable to create folder:\n\n" + item.path())
self.run(paths, name)
return
SideBarProject().refresh()
def is_enabled(self, paths=[]):
return CACHED_SELECTION(paths).len() > 0
class SideBarFolderSaveViewsCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
views = []
for item in SideBarSelection(paths).getSelectedDirectories():
views = views + item.views()
for view in views:
view.run_command("save")
def is_enabled(self, paths=[]):
_views = []
has_dirty_view = False
for item in SideBarSelection(paths).getSelectedDirectories():
views = item.views()
_views = _views + views
for view in views:
if view.is_dirty():
has_dirty_view = True
return (
CACHED_SELECTION(paths).hasDirectories()
and len(views) > 0
and has_dirty_view
)
def is_visible(self, paths=[]):
return not s.get("disabled_menuitem_folder_save", False)
class SideBarFolderCloseViewsCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
collapsed = False
for item in SideBarSelection(paths).getSelectedDirectories():
for view in item.views():
if not collapsed:
Window().focus_view(view)
self.collapse_sidebar_folder()
collapsed = True
view.close()
def collapse_sidebar_folder(self):
# Window().run_command("reveal_in_side_bar") the tree animation breaks the functionality
Window().run_command("focus_side_bar")
Window().run_command("move", {"by": "characters", "forward": False})
def is_enabled(self, paths=[]):
views = []
for item in SideBarSelection(paths).getSelectedDirectories():
views = views + item.views()
return CACHED_SELECTION(paths).hasDirectories() and len(views) > 0
def is_visible(self, paths=[]):
return not s.get("disabled_menuitem_folder_close", False)
class SideBarFolderCloseOtherViewsCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
to_close = self.others_views(paths)
for view in to_close:
view.close()
def others_views(self, paths=[]):
window = Window()
opened = []
selected = []
for view in window.views():
opened.append(view)
for item in SideBarSelection(paths).getSelectedDirectories():
for view in item.views():
selected.append(view)
return [view for view in opened if view not in selected]
def is_enabled(self, paths=[]):
views = self.others_views(paths)
return CACHED_SELECTION(paths).hasDirectories() and len(views) > 0
def is_visible(self, paths=[]):
return not s.get("disabled_menuitem_folder_close", False)
class SideBarEditCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
for item in SideBarSelection(paths).getSelectedFiles():
item.edit()
def is_enabled(self, paths=[]):
return CACHED_SELECTION(paths).hasFiles()
def is_visible(self, paths=[]):
return not s.get("disabled_menuitem_edit", False)
class SideBarEditToRightCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
window = Window()
window.run_command(
"set_layout",
{
"cols": [0.0, 0.5, 1.0],
"rows": [0.0, 1.0],
"cells": [[0, 0, 1, 1], [1, 0, 2, 1]],
},
)
window.focus_group(1)
for item in SideBarSelection(paths).getSelectedFiles():
view = item.edit()
window.set_view_index(view, 1, 0)
def is_enabled(self, paths=[]):
return CACHED_SELECTION(paths).hasFiles()
def is_visible(self, paths=[]):
return not s.get("disabled_menuitem_edit", False) and not s.get(
"disabled_menuitem_edit_to_right", False
)
class SideBarOpenCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
for item in SideBarSelection(paths).getSelectedItems():
item.open(s.get("use_powershell", True), s.get("use_command", ""))
def is_enabled(self, paths=[]):
return CACHED_SELECTION(paths).len() > 0
def is_visible(self, paths=[]):
return not s.get("disabled_menuitem_open_run", False)
class SideBarFilesOpenWithEditApplicationsCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
platform = ""
if sublime.platform() == "osx":
platform = "OSX"
elif sublime.platform() == "windows":
platform = "Windows"
else:
platform = "Linux"
item = SideBarItem(
os.path.join(
sublime.packages_path(),
"User",
"SideBarEnhancements",
"Open With",
"Side Bar.sublime-menu",
),
False,
)
if not item.exists() and False:
item = SideBarItem(
os.path.join(
sublime.packages_path(),
"User",
"SideBarEnhancements",
"Open With",
"Side Bar (" + platform + ").sublime-menu",
),
False,
)
if not item.exists():
item.create()
item.write(
"""[
{"id": "side-bar-files-open-with",
"children":
[
//application 1
{
"caption": "Photoshop",
"id": "side-bar-files-open-with-photoshop",
"command": "side_bar_files_open_with",
"args": {
"paths": [],
"application": "Adobe Photoshop CS5.app", // OSX
"extensions":"psd|png|jpg|jpeg", //any file with these extensions
"args":[]
},
"open_automatically" : false // will close the view/tab and launch the application
},
//separator
{"caption":"-"},
//application 2
{
"caption": "SeaMonkey",
"id": "side-bar-files-open-with-seamonkey",
"command": "side_bar_files_open_with",
"args": {
"paths": [],
"application": "C:\\\\Archivos de programa\\\\SeaMonkey\\\\seamonkey.exe", // WINNT
"extensions":"", //open all even folders
"args":[]
},
"open_automatically" : false // will close the view/tab and launch the application
},
//application n
{
"caption": "Chrome",
"id": "side-bar-files-open-with-chrome",
"command": "side_bar_files_open_with",
"args": {
"paths": [],
"application": "C:\\\\Documents and Settings\\\\tito\\\\local\\\\Datos de programa\\\\Google\\\\Chrome\\\\Application\\\\chrome.exe",
"extensions":".*", //any file with extension
"args":[]
},
"open_automatically" : false // will close the view/tab and launch the application
},
{"caption":"-"}
]
}
]"""
)
item.edit()
def is_enabled(self, paths=[]):
return True
class SideBarFilesOpenWithCommand(sublime_plugin.WindowCommand):
def run(self, paths=[], application="", extensions=""):
self.run(self, paths, application, extensions, args=[])
def run(self, paths=[], application="", extensions="", args=[]):
application_dir, application_name = os.path.split(application)
if extensions == "*":
extensions = ".*"
if extensions == "":
items = SideBarSelection(paths).getSelectedItems()
else:
items = SideBarSelection(paths).getSelectedFilesWithExtension(extensions)
import subprocess
try:
for item in items:
# $PATH - The full path to the current file, e. g., C:\Files\Chapter1.txt.
# $PROJECT - The root directory of the current project.
# $DIRNAME - The directory of the current file, e. g., C:\Files.
# $NAME - The name portion of the current file, e. g., Chapter1.txt.
# $EXTENSION - The extension portion of the current file, e. g., txt.
for k in range(len(args)):
args[k] = args[k].replace("$PATH", item.path())
args[k] = args[k].replace("$PROJECT", item.pathProject())
args[k] = args[k].replace(
"$DIRNAME",
item.path() if item.isDirectory() else item.dirname(),
)
args[k] = args[k].replace(
"$NAME_NO_EXTENSION",
item.name().replace("." + item.extension(), ""),
)
args[k] = args[k].replace("$NAME", item.name())
args[k] = args[k].replace("$EXTENSION", item.extension())
if sublime.platform() == "osx":
subprocess.Popen(
["open", "-a", application] + args + [item.name()],
cwd=item.dirname(),
)
elif sublime.platform() == "windows":
try:
subprocess.Popen(
[application_name] + args + [escapeCMDWindows(item.path())],
cwd=expandVars(application_dir),
shell=True,
)
except:
subprocess.Popen(
[application_name] + args + [escapeCMDWindows(item.path())],
shell=True,
)
else:
try:
subprocess.Popen(
[application_name] + args + [escapeCMDWindows(item.name())],
cwd=item.dirname(),
)
except:
subprocess.Popen(
[application_name] + args + [escapeCMDWindows(item.name())]
)
except:
sublime.error_message(
'Unable to "Open With..", probably incorrect path to application.'
)
def is_enabled(self, paths=[], application="", extensions=""):
self.is_enabled(self, paths, application, extensions, args=[])
def is_enabled(self, paths=[], application="", extensions="", args=[]):
if extensions == "*":
extensions = ".*"
if extensions == "":
return CACHED_SELECTION(paths).len() > 0
else:
return CACHED_SELECTION(paths).hasFilesWithExtension(extensions)
def is_visible(self, paths=[], application="", extensions=""):
self.is_visible(self, paths, application, extensions, args=[])
def is_visible(self, paths=[], application="", extensions="", args=[]):
if extensions == "*":
extensions = ".*"
if extensions == "":
return CACHED_SELECTION(paths).len() > 0
else:
has = CACHED_SELECTION(paths).hasFilesWithExtension(extensions)
return has or (
not has
and not s.get(
"hide_open_with_entries_when_there_are_no_applicable", False
)
)
class SideBarFindInSelectedCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
if s.get("find_and_replace_discards_previous_search", False):
window = Window()
views = []
for view in window.views():
if view.name() == "Find Results":
views.append(view)
for view in views:
view.close()
if s.get("find_and_replace_opens_in_new_view", True):
window = Window()
views = []
for view in window.views():
if view.name() == "Find Results":
Window().focus_view(view)
content = view.substr(sublime.Region(0, view.size()))
_view = Window().new_file()
with Edit(_view) as edit:
edit.replace(sublime.Region(0, _view.size()), content)
# the space at the end of the name prevents it from being reused by Sublime Text
# it looks like instead of keeping an internal refrence they just look at the view name -__-
_view.set_name("Find Results ")
_view.set_syntax_file(
"Packages/Default/Find Results.hidden-tmLanguage"
)
_view.sel().clear()
for sel in view.sel():
_view.sel().add(sel)
_view.set_scratch(True)
views.append(view)
for view in views:
view.close()
items = []
for item in SideBarSelection(paths).getSelectedItemsWithoutChildItems():
items.append(item.path())
Window().run_command("hide_panel")
Window().run_command(
"show_panel", {"panel": "find_in_files", "where": ",".join(items)}
)
def is_enabled(self, paths=[]):
return CACHED_SELECTION(paths).len() > 0
class SideBarFindInParentCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
items = []
for item in SideBarSelection(paths).getSelectedItems():
items.append(item.dirname())
items = list(set(items))
Window().run_command("hide_panel")
Window().run_command(
"show_panel", {"panel": "find_in_files", "where": ",".join(items)}
)
def is_enabled(self, paths=[]):
return CACHED_SELECTION(paths).len() > 0
class SideBarFindInProjectFoldersCommand(sublime_plugin.WindowCommand):
def run(self):
Window().run_command("hide_panel")
Window().run_command(
"show_panel", {"panel": "find_in_files", "where": "<project>"}
)
class SideBarFindInProjectCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
Window().run_command("hide_panel")
Window().run_command(
"show_panel", {"panel": "find_in_files", "where": "<project>"}
)
def is_visible(self, paths=[]):
return not s.get("disabled_menuitem_find_in_project", False)
class SideBarFindInProjectFolderCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
items = []
for item in SideBarSelection(paths).getSelectedItemsWithoutChildItems():
items.append(SideBarProject().getDirectoryFromPath(item.path()))
items = list(set(items))
if items:
Window().run_command("hide_panel")
Window().run_command(
"show_panel", {"panel": "find_in_files", "where": ",".join(items)}
)
class SideBarFindInFilesWithExtensionCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
items = []
for item in SideBarSelection(paths).getSelectedItems():
items.append("*" + item.extension())
items = list(set(items))
Window().run_command("hide_panel")
Window().run_command(
"show_panel", {"panel": "find_in_files", "where": ",".join(items)}
)
def is_enabled(self, paths=[]):
return CACHED_SELECTION(paths).hasFiles()
def description(self, paths=[]):
items = []
for item in CACHED_SELECTION(paths).getSelectedFiles():
items.append("*" + item.extension())
items = list(set(items))
if len(items) > 1:
return "In Files With Extensions " + (",".join(items)) + "…"
elif len(items) > 0:
return "In Files With Extension " + (",".join(items)) + "…"
else:
return "In Files With Extension…"
Object.sidebar_instant_search_id = 0
class SideBarFindFilesPathContainingCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
if paths == [] and SideBarProject().getDirectories():
paths = SideBarProject().getDirectories()
else:
paths = [
item.path()
for item in SideBarSelection(paths).getSelectedDirectoriesOrDirnames()
]
if paths == []:
return
view = Window().new_file()
view.settings().set("word_wrap", False)
view.set_name("Instant File Search")
view.set_syntax_file(
"Packages/SideBarEnhancements/SideBar Results.hidden-tmLanguage"
)
view.set_scratch(True)
view.run_command("insert", {"characters": "Type to search: "})
view.sel().clear()
view.sel().add(sublime.Region(16, 16))
view.settings().set("sidebar_instant_search_paths", paths)
def is_enabled(self, paths=[]):
return True
class SideBarFindFilesPathContainingViewListener(sublime_plugin.EventListener):
def on_modified(self, view):
view.settings().has(
"sidebar_instant_search_paths"
) # for some reason the first call in some conditions returns true, but not the next one WTH
if view.settings().has("sidebar_instant_search_paths"):
searchTerm = (
view.substr(view.line(0)).replace("Type to search:", "").strip()
)
if searchTerm and Object.sidebar_instant_search_id != searchTerm:
SideBarFindFilesPathContainingSearchThread(view, searchTerm).start()
elif not searchTerm:
view.set_name("Instant File Search")
class SideBarFindFilesPathContainingSearchThread(threading.Thread):
def __init__(self, view, searchTerm):
self.view = view
self.searchTerm = searchTerm
threading.Thread.__init__(self)
def run(self):
if Object.sidebar_instant_search_id == self.searchTerm:
return
searchTerm = self.searchTerm
Object.sidebar_instant_search_id = searchTerm
view = self.view
paths = view.settings().get("sidebar_instant_search_paths")
self.ignore_paths = view.settings().get("file_exclude_patterns", [])
try:
self.searchTermRegExp = re.compile(searchTerm, re.I | re.U)
self.match_function = self.match_regexp
search_type = "REGEXP"
except:
self.match_function = self.match_string
search_type = "LITERAL"
if Object.sidebar_instant_search_id == searchTerm:
total = 0
highlight_from = 0
match_result = ""
match_result += "Type to search: " + searchTerm + "\n"
find = self.find
for item in SideBarSelection(paths).getSelectedDirectoriesOrDirnames():
self.files = []
self.num_files = 0
find(item.path())
match_result += "\n"
length = len(self.files)
if length > 1:
match_result += str(length) + " matches"
elif length > 0:
match_result += "1 match"
else:
match_result += "No match"
match_result += (
" in "
+ str(self.num_files)
+ ' files for term "'
+ searchTerm
+ '" using '
+ search_type
+ ' under \n"'
+ item.path()
+ '"\n\n'
)
if highlight_from == 0:
highlight_from = len(match_result)
match_result += "\n".join(self.files)
total += length
match_result += "\n"
if Object.sidebar_instant_search_id == searchTerm:
sel = view.sel()
position = sel[0].begin()
if position > 16 + len(searchTerm):
position = 16 + len(searchTerm)
view.run_command(
"side_bar_enhancements_write_to_view",
{
"content": match_result,
"position": position,
"searchTerm": searchTerm,
},
)
view.set_name(searchTerm + " - IFS")
if Object.sidebar_instant_search_id == searchTerm:
view.erase_regions("sidebar_search_instant_highlight")
if total < 5000 and len(searchTerm) > 1:
if search_type == "REGEXP":
regions = [
item
for item in view.find_all(
searchTerm, sublime.IGNORECASE
)
if item.begin() >= highlight_from
]
else:
regions = [
item
for item in view.find_all(
searchTerm, sublime.LITERAL | sublime.IGNORECASE
)
if item.begin() >= highlight_from
]
if Object.sidebar_instant_search_id == searchTerm:
view.add_regions(
"sidebar_search_instant_highlight",
regions,
"entity.name.function",
"",
sublime.PERSISTENT
| sublime.DRAW_SQUIGGLY_UNDERLINE
| sublime.DRAW_NO_FILL
| sublime.DRAW_NO_OUTLINE
| sublime.DRAW_EMPTY_AS_OVERWRITE,
)
def find(self, path):
if os.path.isfile(path) or os.path.islink(path):
self.num_files = self.num_files + 1
if self.match_function(path):
self.files.append(path)
elif os.path.isdir(path):
for content in os.listdir(path):
file = os.path.join(path, content)
if os.path.isfile(file) or os.path.islink(file):
self.num_files = self.num_files + 1
if self.match_function(file):
self.files.append(file)
else:
self.find(file)
def match_regexp(self, path):
return self.searchTermRegExp.search(path) and not [
1 for s in self.ignore_paths if s in path
]
def match_string(self, path):
return self.searchTerm in path and not [
1 for s in self.ignore_paths if s in path
]
class SideBarEnhancementsWriteToViewCommand(sublime_plugin.TextCommand):
def run(self, edit, content, position, searchTerm):
if Object.sidebar_instant_search_id == searchTerm:
view = self.view
view.replace(edit, sublime.Region(0, view.size()), content)
view.sel().clear()
view.sel().add(sublime.Region(position, position))
view.end_edit(edit)
class SideBarCutCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
s = sublime.load_settings("SideBarEnhancements/Clipboard.sublime-settings")
items = []
for item in SideBarSelection(paths).getSelectedItemsWithoutChildItems():
items.append(item.path())
if len(items) > 0:
s.set("cut", "\n".join(items))
s.set("copy", "")
if len(items) > 1:
sublime.status_message("Items cut")
else:
sublime.status_message("Item cut")
def is_enabled(self, paths=[]):
return (
CACHED_SELECTION(paths).len() > 0
and CACHED_SELECTION(paths).hasProjectDirectories() is False
)
class SideBarCopyCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
s = sublime.load_settings("SideBarEnhancements/Clipboard.sublime-settings")
items = []
for item in SideBarSelection(paths).getSelectedItemsWithoutChildItems():
items.append(item.path())
if len(items) > 0:
s.set("cut", "")
s.set("copy", "\n".join(items))
if len(items) > 1:
sublime.status_message("Items copied")
else:
sublime.status_message("Item copied")
def is_enabled(self, paths=[]):
return CACHED_SELECTION(paths).len() > 0
class SideBarPasteCommand(sublime_plugin.WindowCommand):
def run(self, paths=[], in_parent="False", test="True", replace="False"):
key = "paste-" + str(time.time())
SideBarPasteThread(paths, in_parent, test, replace, key).start()
def is_enabled(self, paths=[], in_parent=False):
s = sublime.load_settings("SideBarEnhancements/Clipboard.sublime-settings")
return (s.get("cut", "") + s.get("copy", "")) != "" and len(
CACHED_SELECTION(paths).getSelectedDirectoriesOrDirnames()
) == 1
def is_visible(self, paths=[], in_parent=False):
if in_parent == "True":
return not s.get("disabled_menuitem_paste_in_parent", False)
else:
return True
class SideBarPasteThread(threading.Thread):
def __init__(
self, paths=[], in_parent="False", test="True", replace="False", key=""
):
self.paths = paths
self.in_parent = in_parent
self.test = test
self.replace = replace
self.key = key
threading.Thread.__init__(self)
def run(self):
SideBarPasteCommand2(Window()).run(
self.paths, self.in_parent, self.test, self.replace, self.key
)
class SideBarPasteCommand2(sublime_plugin.WindowCommand):
def run(self, paths=[], in_parent="False", test="True", replace="False", key=""):
window_set_status(key, "Pasting…")
s = sublime.load_settings("SideBarEnhancements/Clipboard.sublime-settings")
cut = s.get("cut", "")
copy = s.get("copy", "")
already_exists_paths = []
if SideBarSelection(paths).len() > 0:
if in_parent == "False":
location = SideBarSelection(paths).getSelectedItems()[0].path()
else:
location = (
SideBarSelection(paths)
.getSelectedDirectoriesOrDirnames()[0]
.dirname()
)
if os.path.isdir(location) is False:
location = SideBarItem(os.path.dirname(location), True)
else:
location = SideBarItem(location, True)
if cut != "":
cut = cut.split("\n")
for path in cut:
path = SideBarItem(path, os.path.isdir(path))
new = os.path.join(location.path(), path.name())
if test == "True" and os.path.exists(new):
already_exists_paths.append(new)
elif test == "False":
if os.path.exists(new) and replace == "False":
pass
else:
try:
if not path.move(new, replace == "True"):
window_set_status(key, "")
sublime.error_message(
"Unable to cut and paste, destination exists."
)
return
except:
window_set_status(key, "")
sublime.error_message(
"Unable to move:\n\n"
+ path.path()
+ "\n\nto\n\n"
+ new
)
return
if copy != "":
copy = copy.split("\n")
for path in copy:
path = SideBarItem(path, os.path.isdir(path))
new = os.path.join(location.path(), path.name())
if test == "True" and os.path.exists(new):
already_exists_paths.append(new)
elif test == "False":
if os.path.exists(new) and replace == "False":
pass
else:
try:
if not path.copy(new, replace == "True"):
window_set_status(key, "")
sublime.error_message(
"Unable to copy and paste, destination exists."
)
return
except:
window_set_status(key, "")
sublime.error_message(
"Unable to copy:\n\n"
+ path.path()
+ "\n\nto\n\n"
+ new
)
return
if test == "True" and len(already_exists_paths):
self.confirm(paths, in_parent, already_exists_paths, key)
elif test == "True" and not len(already_exists_paths):
SideBarPasteThread(paths, in_parent, "False", "False", key).start()
elif test == "False":
cut = s.set("cut", "")
SideBarProject().refresh()
window_set_status(key, "")
else:
window_set_status(key, "")
def confirm(self, paths, in_parent, data, key):
import functools
window = Window()
window.show_input_panel("BUG!", "", "", None, None)
window.run_command("hide_panel")
yes = []
yes.append("Yes, Replace the following items:")
for item in data:
yes.append(SideBarItem(item, os.path.isdir(item)).pathWithoutProject())
no = []
no.append("No")
no.append("Continue without replacing")
while len(no) != len(yes):
no.append("ST3 BUG")
window.show_quick_panel(
[yes, no], functools.partial(self.on_done, paths, in_parent, key)
)
def on_done(self, paths, in_parent, key, result):
window_set_status(key, "")
if result != -1:
if result == 0:
SideBarPasteThread(paths, in_parent, "False", "True", key).start()
else:
SideBarPasteThread(paths, in_parent, "False", "False", key).start()
class SideBarCopyNameCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
items = []
for item in SideBarSelection(paths).getSelectedItems():
items.append(item.name())
if len(items) > 0:
sublime.set_clipboard("\n".join(items))
if len(items) > 1:
sublime.status_message("Items copied")
else:
sublime.status_message("Item copied")
def is_enabled(self, paths=[]):
return CACHED_SELECTION(paths).len() > 0
def is_visible(self, paths=[]):
return not s.get("disabled_menuitem_copy_name", False)
class SideBarCopyNameEncodedCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
items = []
for item in SideBarSelection(paths).getSelectedItems():
items.append(item.nameEncoded())
if len(items) > 0:
sublime.set_clipboard("\n".join(items))
if len(items) > 1:
sublime.status_message("Items copied")
else:
sublime.status_message("Item copied")
def is_enabled(self, paths=[]):
return CACHED_SELECTION(paths).len() > 0
class SideBarCopyPathCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
items = []
for item in SideBarSelection(paths).getSelectedItems():
items.append(item.path())
if len(items) > 0:
sublime.set_clipboard("\n".join(items))
if len(items) > 1:
sublime.status_message("Items copied")
else:
sublime.status_message("Item copied")
def is_enabled(self, paths=[]):
return CACHED_SELECTION(paths).len() > 0
class SideBarCopyPathQuotedCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
items = []
for item in SideBarSelection(paths).getSelectedItems():
items.append('"' + item.path() + '"')
if len(items) > 0:
sublime.set_clipboard("\n".join(items))
if len(items) > 1:
sublime.status_message("Items copied")
else:
sublime.status_message("Item copied")
def is_enabled(self, paths=[]):
return CACHED_SELECTION(paths).len() > 0
class SideBarCopyDirPathCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
items = []
for item in SideBarSelection(paths).getSelectedDirectoriesOrDirnames():
items.append(item.path())
if len(items) > 0:
sublime.set_clipboard("\n".join(items))
if len(items) > 1:
sublime.status_message("Items copied")
else:
sublime.status_message("Item copied")
def is_enabled(self, paths=[]):
return CACHED_SELECTION(paths).len() > 0
def is_visible(self, paths=[]):
return not s.get("disabled_menuitem_copy_dir_path", False)
class SideBarCopyPathEncodedCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
items = []
for item in SideBarSelection(paths).getSelectedItems():
items.append(item.uri())
if len(items) > 0:
sublime.set_clipboard("\n".join(items))
if len(items) > 1:
sublime.status_message("Items copied")
else:
sublime.status_message("Item copied")
def is_enabled(self, paths=[]):
return CACHED_SELECTION(paths).len() > 0
class SideBarCopyPathRelativeFromProjectCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
items = []
for item in SideBarSelection(paths).getSelectedItems():
items.append(item.pathRelativeFromProject())
if len(items) > 0:
sublime.set_clipboard("\n".join(items))
if len(items) > 1:
sublime.status_message("Items copied")
else:
sublime.status_message("Item copied")
def is_enabled(self, paths=[]):
return (
CACHED_SELECTION(paths).len() > 0
and CACHED_SELECTION(paths).hasItemsUnderProject()
)
class SideBarCopyPathRelativeFromProjectEncodedCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
items = []
for item in SideBarSelection(paths).getSelectedItems():
items.append(item.pathRelativeFromProjectEncoded())
if len(items) > 0:
sublime.set_clipboard("\n".join(items))
if len(items) > 1:
sublime.status_message("Items copied")
else:
sublime.status_message("Item copied")
def is_enabled(self, paths=[]):
return (
CACHED_SELECTION(paths).len() > 0
and CACHED_SELECTION(paths).hasItemsUnderProject()
)
class SideBarCopyPathRelativeFromViewCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
items = []
for item in SideBarSelection(paths).getSelectedItems():
items.append(item.pathRelativeFromView())
if len(items) > 0:
sublime.set_clipboard("\n".join(items))
if len(items) > 1:
sublime.status_message("Items copied")
else:
sublime.status_message("Item copied")
def is_enabled(self, paths=[]):
return CACHED_SELECTION(paths).len() > 0
class SideBarCopyPathRelativeFromViewEncodedCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
items = []
for item in SideBarSelection(paths).getSelectedItems():
path = item.pathRelativeFromViewEncoded()
if path:
items.append(path)
if len(items) > 0:
sublime.set_clipboard("\n".join(items))
if len(items) > 1:
sublime.status_message("Items copied")
else:
sublime.status_message("Item copied")
def is_enabled(self, paths=[]):
return CACHED_SELECTION(paths).len() > 0
view_locations_stack = []
class view_locations_stack_listener(sublime_plugin.EventListener):
def on_activated(self, v):
global view_locations_stack
if (
v
and v.file_name()
and (not view_locations_stack or view_locations_stack[-1] != v.file_name())
):
view_locations_stack.append(v.file_name())
view_locations_stack = view_locations_stack[-5:]
class SideBarCopyPathRelativeToLastSelectedViewCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
try:
origin = view_locations_stack[-2]
items = []
for item in SideBarSelection(paths).getSelectedItems():
items.append(item.path())
temp = []
for index in range(len(items)):
if not os.path.samefile(items[index], origin):
temp.append(
os.path.join(
".", os.path.relpath(items[index], os.path.dirname(origin))
)
)
items = temp
if len(items) > 0:
sublime.set_clipboard("\n".join(items))
if len(items) > 1:
sublime.status_message("Items copied")
else:
sublime.status_message("Item copied")
except:
pass
def is_enabled(self, paths=[]):
return CACHED_SELECTION(paths).len() > 0 and len(view_locations_stack) > 1
class SideBarCopyPathAbsoluteFromProjectCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
items = []
for item in SideBarSelection(paths).getSelectedItems():
if (
s.get("copy_path_absolute_from_project_includes_line_number", False)
and item.views()
):
view = item.views()[0]
if view.sel():
line, col = view.rowcol(view.sel()[0].b)
items.append(item.pathAbsoluteFromProject() + ":" + str(line + 1))
else:
items.append(item.pathAbsoluteFromProject())
else:
items.append(item.pathAbsoluteFromProject())
if len(items) > 0:
sublime.set_clipboard("\n".join(items))
if len(items) > 1:
sublime.status_message("Items copied")
else:
sublime.status_message("Item copied")
def is_enabled(self, paths=[]):
return (
CACHED_SELECTION(paths).len() > 0
and CACHED_SELECTION(paths).hasItemsUnderProject()
)
class SideBarCopyPathAbsoluteFromProjectEncodedCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
items = []
for item in SideBarSelection(paths).getSelectedItems():
items.append(item.pathAbsoluteFromProjectEncoded())
if len(items) > 0:
sublime.set_clipboard("\n".join(items))
if len(items) > 1:
sublime.status_message("Items copied")
else:
sublime.status_message("Item copied")
def is_enabled(self, paths=[]):
return (
CACHED_SELECTION(paths).len() > 0
and CACHED_SELECTION(paths).hasItemsUnderProject()
)
def is_visible(self, paths=[]):
return not s.get("disabled_menuitem_copy_path", False)
class SideBarCopyPathAbsoluteFromProjectEncodedWindowsCommand(
sublime_plugin.WindowCommand
):
def run(self, paths=[]):
items = []
for item in SideBarSelection(paths).getSelectedItems():
items.append(item.pathAbsoluteFromProjectEncoded())
if len(items) > 0:
sublime.set_clipboard(("\n".join(items)).replace("/", "\\"))
if len(items) > 1:
sublime.status_message("Items copied")
else:
sublime.status_message("Item copied")
def is_enabled(self, paths=[]):
return (
CACHED_SELECTION(paths).len() > 0
and CACHED_SELECTION(paths).hasItemsUnderProject()
)
def is_visible(self, paths=[]):
return not s.get("disabled_menuitem_copy_path_windows", True)
class SideBarCopyTagAhrefCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
items = []
for item in SideBarSelection(paths).getSelectedItems():
items.append(
'<a href="'
+ item.pathAbsoluteFromProjectEncoded()
+ '">'
+ item.namePretty()
+ "</a>"
)
if len(items) > 0:
sublime.set_clipboard("\n".join(items))
if len(items) > 1:
sublime.status_message("Items copied")
else:
sublime.status_message("Item copied")
def is_enabled(self, paths=[]):
return (
CACHED_SELECTION(paths).len() > 0
and CACHED_SELECTION(paths).hasItemsUnderProject()
)
class SideBarCopyTagImgCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
items = []
for item in SideBarSelection(paths).getSelectedImages():
try:
image_type, width, height = self.getImageInfo(item.path())
items.append(
'<img src="'
+ item.pathAbsoluteFromProjectEncoded()
+ '" width="'
+ str(width)
+ '" height="'
+ str(height)
+ '">'
)
except:
items.append(
'<img src="' + item.pathAbsoluteFromProjectEncoded() + '">'
)
if len(items) > 0:
sublime.set_clipboard("\n".join(items))
if len(items) > 1:
sublime.status_message("Items copied")
else:
sublime.status_message("Item copied")
# http://stackoverflow.com/questions/8032642/how-to-obtain-image-size-using-standard-python-class-without-using-external-lib
def getImageInfo(self, fname):
import struct
import imghdr
"""Determine the image type of fhandle and return its size.
from draco"""
fhandle = open(fname, "rb")
head = fhandle.read(24)
if len(head) != 24:
return
if imghdr.what(fname) == "png":
check = struct.unpack(">i", head[4:8])[0]
if check != 0x0d0a1a0a:
return
width, height = struct.unpack(">ii", head[16:24])
elif imghdr.what(fname) == "gif":
width, height = struct.unpack("<HH", head[6:10])
elif imghdr.what(fname) == "jpeg":
try:
fhandle.seek(0) # Read 0xff next
size = 2
ftype = 0
while not 0xc0 <= ftype <= 0xcf:
fhandle.seek(size, 1)
byte = fhandle.read(1)
while ord(byte) == 0xff:
byte = fhandle.read(1)
ftype = ord(byte)
size = struct.unpack(">H", fhandle.read(2))[0] - 2
# We are at a SOFn block
fhandle.seek(1, 1) # Skip `precision' byte.
height, width = struct.unpack(">HH", fhandle.read(4))
except Exception: # IGNORE:W0703
return
else:
return
return None, width, height
def is_enabled(self, paths=[]):
return (
CACHED_SELECTION(paths).hasImages()
and CACHED_SELECTION(paths).hasItemsUnderProject()
)
class SideBarCopyTagStyleCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
items = []
for item in SideBarSelection(paths).getSelectedFilesWithExtension("css"):
items.append(
'<link rel="stylesheet" type="text/css" href="'
+ item.pathAbsoluteFromProjectEncoded()
+ '"/>'
)
if len(items) > 0:
sublime.set_clipboard("\n".join(items))
if len(items) > 1:
sublime.status_message("Items copied")
else:
sublime.status_message("Item copied")
def is_enabled(self, paths=[]):
return (
CACHED_SELECTION(paths).hasFilesWithExtension("css")
and CACHED_SELECTION(paths).hasItemsUnderProject()
)
class SideBarCopyTagScriptCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
items = []
for item in SideBarSelection(paths).getSelectedFilesWithExtension("js"):
items.append(
'<script type="text/javascript" src="'
+ item.pathAbsoluteFromProjectEncoded()
+ '"></script>'
)
if len(items) > 0:
sublime.set_clipboard("\n".join(items))
if len(items) > 1:
sublime.status_message("Items copied")
else:
sublime.status_message("Item copied")
def is_enabled(self, paths=[]):
return (
CACHED_SELECTION(paths).hasFilesWithExtension("js")
and CACHED_SELECTION(paths).hasItemsUnderProject()
)
class SideBarCopyProjectDirectoriesCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
items = []
for directory in SideBarProject().getDirectories():
items.append(directory)
if len(items) > 0:
sublime.set_clipboard("\n".join(items))
if len(items) > 1:
sublime.status_message("Items copied")
else:
sublime.status_message("Item copied")
def is_enabled(self, paths=[]):
return True
class SideBarCopyContentUtf8Command(sublime_plugin.WindowCommand):
def run(self, paths=[]):
items = []
for item in SideBarSelection(paths).getSelectedFiles():
items.append(item.contentUTF8())
if len(items) > 0:
sublime.set_clipboard("\n".join(items))
if len(items) > 1:
sublime.status_message("Items content copied")
else:
sublime.status_message("Item content copied")
def is_enabled(self, paths=[]):
return CACHED_SELECTION(paths).hasFiles()
class SideBarCopyContentBase64Command(sublime_plugin.WindowCommand):
def run(self, paths=[]):
items = []
for item in SideBarSelection(paths).getSelectedFiles():
items.append(item.contentBase64())
if len(items) > 0:
sublime.set_clipboard("\n".join(items))
if len(items) > 1:
sublime.status_message("Items content copied")
else:
sublime.status_message("Item content copied")
def is_enabled(self, paths=[]):
return CACHED_SELECTION(paths).hasFiles()
class SideBarCopyUrlCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
items = []
for item in SideBarSelection(paths).getSelectedItems():
if item.isUnderCurrentProject():
items.append(item.url("url_production"))
if len(items) > 0:
sublime.set_clipboard("\n".join(items))
if len(items) > 1:
sublime.status_message("Items URL copied")
else:
sublime.status_message("Item URL copied")
def is_enabled(self, paths=[]):
return CACHED_SELECTION(paths).hasItemsUnderProject()
class SideBarCopyUrlDecodedCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
items = []
for item in SideBarSelection(paths).getSelectedItems():
if item.isUnderCurrentProject():
txt = item.url("url_production")
try:
txt = urlunquote(txt.encode("utf8")).decode("utf8")
except TypeError:
txt = urlunquote(txt)
items.append(txt)
if len(items) > 0:
sublime.set_clipboard("\n".join(items))
if len(items) > 1:
sublime.status_message("Items URL copied")
else:
sublime.status_message("Item URL copied")
def is_enabled(self, paths=[]):
return CACHED_SELECTION(paths).hasItemsUnderProject()
class SideBarDuplicateCommand(sublime_plugin.WindowCommand):
def run(self, paths=[], new=False):
import functools
Window().run_command("hide_panel")
view = Window().show_input_panel(
"Duplicate As:",
new or SideBarSelection(paths).getSelectedItems()[0].path(),
functools.partial(
self.on_done, SideBarSelection(paths).getSelectedItems()[0].path()
),
None,
None,
)
view.sel().clear()
view.sel().add(
sublime.Region(
view.size() - len(SideBarSelection(paths).getSelectedItems()[0].name()),
view.size()
- len(SideBarSelection(paths).getSelectedItems()[0].extension()),
)
)
def on_done(self, old, new):
key = "duplicate-" + str(time.time())
SideBarDuplicateThread(old, new, key).start()
def is_enabled(self, paths=[]):
return (
CACHED_SELECTION(paths).len() == 1
and CACHED_SELECTION(paths).hasProjectDirectories() is False
)
class SideBarDuplicateThread(threading.Thread):
def __init__(self, old, new, key):
self.old = old
self.new = new
self.key = key
threading.Thread.__init__(self)
def run(self):
old = self.old
new = self.new
key = self.key
window_set_status(key, "Duplicating…")
item = SideBarItem(old, os.path.isdir(old))
try:
if not item.copy(new):
window_set_status(key, "")
if SideBarItem(new, os.path.isdir(new)).overwrite():
self.run()
else:
SideBarDuplicateCommand(Window()).run([old], new)
return
except:
window_set_status(key, "")
sublime.error_message("Unable to copy:\n\n" + old + "\n\nto\n\n" + new)
SideBarDuplicateCommand(Window()).run([old], new)
return
item = SideBarItem(new, os.path.isdir(new))
if item.isFile():
item.edit()
SideBarProject().refresh()
window_set_status(key, "")
class SideBarRenameCommand(sublime_plugin.WindowCommand):
def run(self, paths=[], newLeaf=False):
import functools
branch, leaf = os.path.split(
SideBarSelection(paths).getSelectedItems()[0].path()
)
Window().run_command("hide_panel")
view = Window().show_input_panel(
"New Name:",
newLeaf or leaf,
functools.partial(
self.on_done,
SideBarSelection(paths).getSelectedItems()[0].path(),
branch,
),
None,
None,
)
view.sel().clear()
view.sel().add(
sublime.Region(
view.size() - len(SideBarSelection(paths).getSelectedItems()[0].name()),
view.size()
- len(SideBarSelection(paths).getSelectedItems()[0].extension()),
)
)
def on_done(self, old, branch, leaf):
key = "rename-" + str(time.time())
SideBarRenameThread(old, branch, leaf, key).start()
def is_enabled(self, paths=[]):
return (
CACHED_SELECTION(paths).len() == 1
and CACHED_SELECTION(paths).hasProjectDirectories() is False
)
class SideBarRenameThread(threading.Thread):
def __init__(self, old, branch, leaf, key):
self.old = old
self.branch = branch
self.leaf = leaf
self.key = key
threading.Thread.__init__(self)
def run(self):
old = self.old
branch = self.branch
leaf = self.leaf
key = self.key
window_set_status(key, "Renaming…")
Window().run_command("hide_panel")
leaf = leaf.strip()
new = os.path.join(branch, leaf)
item = SideBarItem(old, os.path.isdir(old))
try:
if not item.move(new):
if SideBarItem(new, os.path.isdir(new)).overwrite():
self.run()
else:
window_set_status(key, "")
SideBarRenameCommand(Window()).run([old], leaf)
except:
window_set_status(key, "")
sublime.error_message("Unable to rename:\n\n" + old + "\n\nto\n\n" + new)
SideBarRenameCommand(Window()).run([old], leaf)
raise
return
SideBarProject().refresh()
window_set_status(key, "")
class SideBarMassRenameCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
import functools
Window().run_command("hide_panel")
Window().show_input_panel(
"Find:", "", functools.partial(self.on_find, paths), None, None
)
def on_find(self, paths, find):
if not find:
return
import functools
Window().run_command("hide_panel")
Window().show_input_panel(
"Replace:", "", functools.partial(self.on_replace, paths, find), None, None
)
def on_replace(self, paths, find, replace):
key = "mass-renaming-" + str(time.time())
SideBarMassRenameThread(paths, find, replace, key).start()
def is_enabled(self, paths=[]):
return CACHED_SELECTION(paths).len() > 0
class SideBarMassRenameThread(threading.Thread):
def __init__(self, paths, find, replace, key):
self.paths = paths
self.find = find
self.replace = replace
self.key = key
threading.Thread.__init__(self)
def run(self):
paths = self.paths
find = self.find
replace = self.replace
key = self.key
if find == "":
return None
else:
window_set_status(key, "Mass renaming…")
to_rename_or_move = []
for item in SideBarSelection(paths).getSelectedItemsWithoutChildItems():
self.recurse(item.path(), to_rename_or_move)
to_rename_or_move.sort()
to_rename_or_move.reverse()
for item in to_rename_or_move:
if find in item:
origin = SideBarItem(item, os.path.isdir(item))
destination = SideBarItem(
origin.pathProject()
+ ""
+ (origin.pathWithoutProject().replace(find, replace)),
os.path.isdir(item),
)
origin.move(destination.path())
SideBarProject().refresh()
window_set_status(key, "")
def recurse(self, path, paths):
if os.path.isfile(path) or os.path.islink(path):
paths.append(path)
else:
for content in os.listdir(path):
file = os.path.join(path, content)
if os.path.isfile(file) or os.path.islink(file):
paths.append(file)
else:
self.recurse(file, paths)
paths.append(path)
class SideBarMoveCommand(sublime_plugin.WindowCommand):
def run(self, paths=[], new=False):
import functools
Window().run_command("hide_panel")
view = Window().show_input_panel(
"New Location:",
new or SideBarSelection(paths).getSelectedItems()[0].path(),
functools.partial(
self.on_done, SideBarSelection(paths).getSelectedItems()[0].path()
),
None,
None,
)
view.sel().clear()
view.sel().add(
sublime.Region(
view.size() - len(SideBarSelection(paths).getSelectedItems()[0].name()),
view.size()
- len(SideBarSelection(paths).getSelectedItems()[0].extension()),
)
)
def on_done(self, old, new):
key = "move-" + str(time.time())
SideBarMoveThread(old, new, key).start()
def is_enabled(self, paths=[]):
return (
CACHED_SELECTION(paths).len() == 1
and CACHED_SELECTION(paths).hasProjectDirectories() is False
)
class SideBarMoveThread(threading.Thread):
def __init__(self, old, new, key):
self.old = old
self.new = new
self.key = key
threading.Thread.__init__(self)
def run(self):
old = self.old
new = self.new
key = self.key
window_set_status(key, "Moving…")
item = SideBarItem(old, os.path.isdir(old))
try:
if not item.move(new):
if SideBarItem(new, os.path.isdir(new)).overwrite():
self.run()
else:
window_set_status(key, "")
SideBarMoveCommand(Window()).run([old], new)
return
except:
window_set_status(key, "")
sublime.error_message("Unable to move:\n\n" + old + "\n\nto\n\n" + new)
SideBarMoveCommand(Window()).run([old], new)
raise
return
SideBarProject().refresh()
window_set_status(key, "")
class SideBarDeleteThread(threading.Thread):
def __init__(self, paths):
self.paths = paths
threading.Thread.__init__(self)
def run(self):
SideBarDeleteCommand(Window())._delete_threaded(self.paths)
class SideBarDeleteCommand(sublime_plugin.WindowCommand):
def run(self, paths=[], confirmed="False"):
if confirmed == "False" and s.get("confirm_before_deleting", True):
if sublime.platform() == "osx":
if sublime.ok_cancel_dialog("Delete the selected items?"):
self.run(paths, "True")
else:
self.confirm(
[
item.path()
for item in SideBarSelection(paths).getSelectedItems()
],
[
item.pathWithoutProject()
for item in SideBarSelection(paths).getSelectedItems()
],
)
else:
SideBarDeleteThread(paths).start()
def _delete_threaded(self, paths):
key = "delete-" + str(time.time())
window_set_status(key, "Deleting…")
try:
from .send2trash import send2trash
for item in SideBarSelection(paths).getSelectedItemsWithoutChildItems():
if s.get("close_affected_buffers_when_deleting_even_if_dirty", False):
item.closeViews()
if s.get("disable_send_to_trash", False):
if sublime.platform() == "windows":
self.remove("\\\\?\\" + item.path())
else:
self.remove(item.path())
else:
send2trash(item.path())
SideBarProject().refresh()
except:
should_confirm = s.get("confirm_before_permanently_deleting", True)
if not should_confirm or sublime.ok_cancel_dialog(
"There is no trash bin, permanently delete?", "Yes, Permanent Deletion"
):
for item in SideBarSelection(paths).getSelectedItemsWithoutChildItems():
if s.get(
"close_affected_buffers_when_deleting_even_if_dirty", False
):
item.closeViews()
if sublime.platform() == "windows":
self.remove("\\\\?\\" + item.path())
else:
self.remove(item.path())
SideBarProject().refresh()
window_set_status(key, "")
def confirm(self, paths, display_paths):
import functools
window = Window()
window.show_input_panel("BUG!", "", "", None, None)
window.run_command("hide_panel")
yes = []
yes.append("Yes, delete the selected items.")
for item in display_paths:
yes.append(item)
no = []
no.append("No")
no.append("Cancel the operation.")
while len(no) != len(yes):
no.append("")
if sublime.platform() == "osx":
sublime.set_timeout(
lambda: window.show_quick_panel(
[yes, no], functools.partial(self.on_confirm, paths)
),
200,
)
else:
window.show_quick_panel(
[yes, no], functools.partial(self.on_confirm, paths)
)
def on_confirm(self, paths, result):
if result != -1:
if result == 0:
self.run(paths, "True")
def on_done(self, old, new):
if s.get("close_affected_buffers_when_deleting_even_if_dirty", False):
item = SideBarItem(new, os.path.isdir(new))
item.closeViews()
if sublime.platform() == "windows":
self.remove("\\\\?\\" + new)
else:
self.remove(new)
SideBarProject().refresh()
def remove(self, path):
if os.path.isfile(path) or os.path.islink(path):
self.remove_safe_file(path)
else:
for content in os.listdir(path):
file = os.path.join(path, content)
if os.path.isfile(file) or os.path.islink(file):
self.remove_safe_file(file)
else:
self.remove(file)
self.remove_safe_dir(path)
def remove_safe_file(self, path):
if not SideBarSelection().isNone(path):
try:
os.remove(path)
except:
try:
if not os.access(path, os.W_OK):
import stat
os.chmod(path, stat.S_IWUSR)
os.remove(path)
except:
# raise error in case we were unable to delete.
if os.path.exists(path):
print("Unable to remove file:\n" + path)
os.remove(path)
else:
print("path is none")
print(path)
def remove_safe_dir(self, path):
if not SideBarSelection().isNone(path):
try:
shutil.rmtree(path)
except:
try:
if not os.access(path, os.W_OK):
import stat
os.chmod(path, stat.S_IWUSR)
shutil.rmtree(path)
except:
# raise error in case we were unable to delete.
if os.path.exists(path):
print("Unable to remove folder:\n" + path)
shutil.rmtree(path)
def is_enabled(self, paths=[]):
return (
CACHED_SELECTION(paths).len() > 0
and CACHED_SELECTION(paths).hasProjectDirectories() is False
)
class SideBarEmptyCommand(sublime_plugin.WindowCommand):
def run(self, paths=[], confirmed="False"):
if confirmed == "False" and s.get("confirm_before_deleting", True):
if sublime.platform() == "osx":
if sublime.ok_cancel_dialog("empty the content of the folder?"):
self.run(paths, "True")
else:
self.confirm(
[
item.path()
for item in SideBarSelection(
paths
).getSelectedDirectoriesOrDirnames()
],
[
item.pathWithoutProject()
for item in SideBarSelection(
paths
).getSelectedDirectoriesOrDirnames()
],
)
else:
key = "move-" + str(time.time())
SideBarEmptyThread(paths, key).start()
def confirm(self, paths, display_paths):
import functools
window = Window()
window.show_input_panel("BUG!", "", "", None, None)
window.run_command("hide_panel")
yes = []
yes.append("Yes, empty the selected items.")
for item in display_paths:
yes.append(item)
no = []
no.append("No")
no.append("Cancel the operation.")
while len(no) != len(yes):
no.append("")
if sublime.platform() == "osx":
sublime.set_timeout(
lambda: window.show_quick_panel(
[yes, no], functools.partial(self.on_confirm, paths)
),
200,
)
else:
window.show_quick_panel(
[yes, no], functools.partial(self.on_confirm, paths)
)
def on_confirm(self, paths, result):
if result != -1:
if result == 0:
self.run(paths, "True")
def is_enabled(self, paths=[]):
return CACHED_SELECTION(paths).len() > 0
def is_visible(self, paths=[]):
return not s.get("disabled_menuitem_empty", True)
class SideBarEmptyThread(threading.Thread):
def __init__(self, paths, key):
self.paths = paths
self.key = key
threading.Thread.__init__(self)
def run(self):
paths = self.paths
key = self.key
window_set_status(key, "Emptying…")
try:
from .send2trash import send2trash
for item in SideBarSelection(paths).getSelectedDirectoriesOrDirnames():
for content in os.listdir(item.path()):
file = os.path.join(item.path(), content)
if not SideBarSelection().isNone(file):
send2trash(file)
if s.get("close_affected_buffers_when_deleting_even_if_dirty", False):
item.closeViews()
except:
pass
SideBarProject().refresh()
window_set_status(key, "")
class SideBarRevealCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
if len(paths) > 1:
paths = SideBarSelection(paths).getSelectedDirectoriesOrDirnames()
else:
paths = SideBarSelection(paths).getSelectedItems()
for item in paths:
item.reveal()
def is_enabled(self, paths=[]):
return CACHED_SELECTION(paths).len() > 0
class SideBarProjectOpenFileCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
project = SideBarProject()
if project.hasOpenedProject():
SideBarItem(project.getProjectFile(), False).edit()
def is_enabled(self, paths=[]):
return SideBarProject().hasOpenedProject()
class SideBarPreviewEditUrlsCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
item = SideBarItem(
os.path.dirname(sublime.packages_path())
+ "/Settings/SideBarEnhancements.json",
False,
)
item.dirnameCreate()
item.edit()
class SideBarProjectItemAddCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
project = SideBarProject()
for item in SideBarSelection(paths).getSelectedDirectories():
project.add(item.path())
def is_enabled(self, paths=[]):
return (
CACHED_SELECTION(paths).hasDirectories()
and CACHED_SELECTION(paths).hasProjectDirectories() is False
)
class SideBarProjectItemRemoveFolderCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
Window().run_command("remove_folder", {"dirs": paths})
def is_enabled(self, paths=[]):
selection = CACHED_SELECTION(paths)
project = SideBarProject()
return project.hasDirectories() and all(
[
item.path() in project.getDirectories() or not item.exists()
for item in selection.getSelectedItems()
]
)
class SideBarProjectItemExcludeCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
project = SideBarProject()
for item in SideBarSelection(paths).getSelectedItems():
if item.isDirectory():
project.excludeDirectory(item.path(), item.pathRelativeFromProject())
else:
project.excludeFile(item.path(), item.pathRelativeFromProject())
def is_enabled(self, paths=[]):
return (
CACHED_SELECTION(paths).len() > 0
and CACHED_SELECTION(paths).hasProjectDirectories() is False
)
class SideBarProjectItemExcludeFromIndexCommand(sublime_plugin.WindowCommand):
def run(self, paths=[], type="item"):
Preferences = sublime.load_settings("Preferences.sublime-settings")
excluded = Preferences.get("binary_file_patterns", [])
for item in self.items(paths, type, SideBarSelection(paths)):
excluded.append(item)
for k, v in enumerate(excluded):
excluded[k] = excluded[k].replace("\\", "/")
excluded[k] = re.sub("([a-z])\:/", "/\\1/", excluded[k], 0, re.I)
excluded[k] = re.sub("/$", "/**", excluded[k])
excluded = list(set(excluded))
excluded = sorted(excluded)
Preferences.set("binary_file_patterns", excluded)
sublime.save_settings("Preferences.sublime-settings")
def is_visible(self, paths=[], type="item"):
return len(self.items(paths, type, CACHED_SELECTION(paths))) > 0
def description(self, paths=[], type="item"):
items = self.items(paths, type, CACHED_SELECTION(paths))
return 'Exclude From Index (mark as binary) "' + (",".join(items)) + '"'
def items(self, paths=[], type="item", object=None):
items = []
if type == "item":
for item in object.getSelectedItems():
if item.isDirectory():
items.append(
re.sub(
"([a-z])\:/",
"/\\1/",
(item.path().replace("\\", "/") + "/**"),
0,
re.I,
)
)
else:
items.append(
re.sub(
"([a-z])\:/",
"/\\1/",
(item.path().replace("\\", "/")),
0,
re.I,
)
)
elif type == "relative":
for item in object.getSelectedItems():
if item.isDirectory():
items.append(
item.pathRelativeFromProject().replace("\\", "/") + "/**"
)
else:
items.append(item.pathRelativeFromProject().replace("\\", "/"))
elif type == "extension":
for item in object.getSelectedFiles():
items.append("*" + item.extension())
elif type == "file":
for item in object.getSelectedFiles():
items.append(item.name())
elif type == "directory":
for item in object.getSelectedDirectories():
items.append(item.name() + "/**")
items = list(set(items))
return items
class SideBarOpenBrowsersCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
browsers = s.get("open_all_browsers", [])
if browsers:
window = Window()
for browser in browsers:
window.run_command(
"side_bar_open_in_browser",
{"paths": paths, "type": "testing", "browser": browser},
)
class SideBarOpenInBrowserCommand(sublime_plugin.WindowCommand):
def run(self, paths=[], type=False, browser=""):
if not browser:
browser = s.get("default_browser", "")
if type == False or type == "testing":
type = "url_testing"
elif type == "production":
type = "url_production"
else:
type = "url_testing"
SideBarOpenInBrowserThread(paths, type, browser).start()
def is_enabled(self, paths=[]):
return CACHED_SELECTION(paths).len() > 0
def is_visible(self, paths=[]):
return not s.get("disabled_menuitem_open_in_browser", False)
class SideBarOpenInBrowserThread(threading.Thread):
def __init__(self, paths, type, browser):
self.paths = paths
self.type = type
self.browser = browser
threading.Thread.__init__(self)
def run(self):
paths = self.paths
type = self.type
browser = self.browser
for item in SideBarSelection(paths).getSelectedItems():
url = item.url(type) or item.uri()
self.try_open(url, browser)
def try_open(self, url, browser):
import subprocess
if sublime.platform() == "windows":
import winreg
browser = browser.lower().strip()
items = []
if browser == "chrome":
if sublime.platform() == "osx":
items.extend(["open"])
commands = ["-a", "/Applications/Google Chrome.app", url]
elif sublime.platform() == "windows":
aKey = winreg.OpenKey(
winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders",
)
reg_value, reg_type = winreg.QueryValueEx(aKey, "Local AppData")
if s.get("portable_browser", "") != "":
items.extend([s.get("portable_browser", "")])
items.extend(
[
"%HOMEPATH%\\AppData\\Local\\Google\\Chrome\\Application\\chrome.exe",
reg_value + "\\Chrome\\Application\\chrome.exe",
reg_value + "\\Google\\Chrome\\Application\\chrome.exe",
"%HOMEPATH%\\Google\\Chrome\\Application\\chrome.exe",
"%PROGRAMFILES%\\Google\\Chrome\\Application\\chrome.exe",
"%PROGRAMFILES(X86)%\\Google\\Chrome\\Application\\chrome.exe",
"%USERPROFILE%\\Local\ Settings\\Application\ Data\\Google\\Chrome\\chrome.exe",
"%HOMEPATH%\\Chromium\\Application\\chrome.exe",
"%PROGRAMFILES%\\Chromium\\Application\\chrome.exe",
"%PROGRAMFILES(X86)%\\Chromium\\Application\\chrome.exe",
"%HOMEPATH%\\Local\ Settings\\Application\ Data\\Google\\Chrome\\Application\\chrome.exe",
"%HOMEPATH%\\Local Settings\\Application Data\\Google\\Chrome\\Application\\chrome.exe",
"chrome.exe",
]
)
commands = ["-new-tab", url]
else:
if s.get("portable_browser", "") != "":
items.extend([s.get("portable_browser", "")])
items.extend(
[
"/usr/bin/google-chrome",
"/opt/google/chrome/chrome",
"chrome",
"google-chrome",
]
)
commands = ["-new-tab", url]
elif browser == "canary":
if sublime.platform() == "osx":
items.extend(["open"])
commands = ["-a", "/Applications/Google Chrome Canary.app", url]
elif sublime.platform() == "windows":
aKey = winreg.OpenKey(
winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders",
)
reg_value, reg_type = winreg.QueryValueEx(aKey, "Local AppData")
if s.get("portable_browser", "") != "":
items.extend([s.get("portable_browser", "")])
items.extend(
[
"%HOMEPATH%\\AppData\\Local\\Google\\Chrome SxS\\Application\\chrome.exe",
reg_value + "\\Chrome SxS\\Application\\chrome.exe",
reg_value + "\\Google\\Chrome SxS\\Application\\chrome.exe",
"%HOMEPATH%\\Google\\Chrome SxS\\Application\\chrome.exe",
"%PROGRAMFILES%\\Google\\Chrome SxS\\Application\\chrome.exe",
"%PROGRAMFILES(X86)%\\Google\\Chrome SxS\\Application\\chrome.exe",
"%USERPROFILE%\\Local\ Settings\\Application\ Data\\Google\\Chrome SxS\\chrome.exe",
"%HOMEPATH%\\Local\ Settings\\Application\ Data\\Google\\Chrome SxS\\Application\\chrome.exe",
"%HOMEPATH%\\Local Settings\\Application Data\\Google\\Chrome SxS\\Application\\chrome.exe",
]
)
commands = ["-new-tab", url]
elif browser == "chromium":
if sublime.platform() == "osx":
items.extend(["open"])
commands = ["-a", "/Applications/Chromium.app", url]
elif sublime.platform() == "windows":
aKey = winreg.OpenKey(
winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders",
)
reg_value, reg_type = winreg.QueryValueEx(aKey, "Local AppData")
if s.get("portable_browser", "") != "":
items.extend([s.get("portable_browser", "")])
items.extend(
[
"%HOMEPATH%\\AppData\\Local\\Google\\Chrome SxS\\Application\\chrome.exe",
reg_value + "\\Chromium\\Application\\chromium.exe",
"%USERPROFILE%\\Local Settings\\Application Data\\Google\\Chrome\\chromium.exe",
"%USERPROFILE%\\Local\ Settings\\Application\ Data\\Google\\Chrome\\chromium.exe",
"%HOMEPATH%\\Chromium\\Application\\chromium.exe",
"%PROGRAMFILES%\\Chromium\\Application\\chromium.exe",
"%PROGRAMFILES(X86)%\\Chromium\\Application\\chromium.exe",
"%HOMEPATH%\\Local Settings\\Application\ Data\\Google\\Chrome\\Application\\chromium.exe",
"%HOMEPATH%\\Local Settings\\Application Data\\Google\\Chrome\\Application\\chromium.exe",
"chromium.exe",
reg_value + "\\Chromium\\Application\\chrome.exe",
"%USERPROFILE%\\Local Settings\\Application Data\\Google\\Chrome\\chrome.exe",
"%USERPROFILE%\\Local\ Settings\\Application\ Data\\Google\\Chrome\\chrome.exe",
"%HOMEPATH%\\Chromium\\Application\\chrome.exe",
"%PROGRAMFILES%\\Chromium\\Application\\chrome.exe",
"%PROGRAMFILES(X86)%\\Chromium\\Application\\chrome.exe",
"%HOMEPATH%\\Local\ Settings\\Application\ Data\\Google\\Chrome\\Application\\chrome.exe",
"%HOMEPATH%\\Local Settings\\Application Data\\Google\\Chrome\\Application\\chrome.exe",
"chrome.exe",
]
)
commands = ["-new-tab", url]
else:
if s.get("portable_browser", "") != "":
items.extend([s.get("portable_browser", "")])
items.extend(
[
"/usr/bin/chromium",
"chromium",
"/usr/bin/chromium-browser",
"chromium-browser",
]
)
commands = ["-new-tab", url]
elif browser == "firefox-developer-edition":
if sublime.platform() == "osx":
items.extend(["open"])
commands = ["-a", "/Applications/Firefox Developer Edition.app", url]
else:
if s.get("portable_browser", "") != "":
items.extend([s.get("portable_browser", "")])
items.extend(
[
"/usr/bin/firefox",
"%PROGRAMFILES%\\Firefox Developer Edition\\firefox.exe",
"%PROGRAMFILES(X86)%\\Firefox Developer Edition\\firefox.exe",
"%PROGRAMFILES%\\Nightly\\firefox.exe",
"%PROGRAMFILES(X86)%\\Nightly\\firefox.exe",
"%PROGRAMFILES%\\Mozilla Firefox\\firefox.exe",
"%PROGRAMFILES(X86)%\\Mozilla Firefox\\firefox.exe",
"firefox",
"firefox.exe",
]
)
commands = ["-new-tab", url]
elif browser == "firefox":
if sublime.platform() == "osx":
items.extend(["open"])
commands = ["-a", "/Applications/Firefox.app", url]
else:
if s.get("portable_browser", "") != "":
items.extend([s.get("portable_browser", "")])
items.extend(
[
"/usr/bin/firefox",
"%PROGRAMFILES%\\Mozilla Firefox\\firefox.exe",
"%PROGRAMFILES(X86)%\\Mozilla Firefox\\firefox.exe",
"firefox",
"firefox.exe",
"%PROGRAMFILES%\\Firefox Developer Edition\\firefox.exe",
"%PROGRAMFILES(X86)%\\Firefox Developer Edition\\firefox.exe",
"%PROGRAMFILES%\\Nightly\\firefox.exe",
"%PROGRAMFILES(X86)%\\Nightly\\firefox.exe",
]
)
commands = ["-new-tab", url]
elif browser == "opera":
if sublime.platform() == "osx":
items.extend(["open"])
commands = ["-a", "/Applications/Opera.app", url]
else:
if s.get("portable_browser", "") != "":
items.extend([s.get("portable_browser", "")])
items.extend(
[
"/usr/bin/opera",
"/usr/bin/opera-next",
"/usr/bin/operamobile",
"%PROGRAMFILES%\\Opera\\opera.exe",
"%PROGRAMFILES(X86)%\\Opera\\opera.exe",
"%PROGRAMFILES%\\Opera\\launcher.exe",
"%PROGRAMFILES(X86)%\\Opera\\launcher.exe",
"%PROGRAMFILES%\\Opera Next\\opera.exe",
"%PROGRAMFILES(X86)%\\Opera Next\\opera.exe",
"%PROGRAMFILES%\\Opera Mobile Emulator\\OperaMobileEmu.exe",
"%PROGRAMFILES(X86)%\\Opera Mobile Emulator\\OperaMobileEmu.exe",
"opera",
"opera.exe",
]
)
commands = ["-newtab", url]
elif browser == "ie":
if s.get("portable_browser", "") != "":
items.extend([s.get("portable_browser", "")])
items.extend(
[
"%PROGRAMFILES%\\Internet Explorer\\iexplore.exe",
"%PROGRAMFILES(X86)%\\Internet Explorer\\iexplore.exe",
"iexplore",
"iexplore.exe",
]
)
commands = ["-newtab", url]
elif browser == "edge":
if s.get("portable_browser", "") != "":
items.extend([s.get("portable_browser", "")])
items.extend(["open"])
commands = ["-newtab", url]
elif browser == "safari":
if sublime.platform() == "osx":
items.extend(["open"])
commands = ["-a", "Safari", url]
else:
if s.get("portable_browser", "") != "":
items.extend([s.get("portable_browser", "")])
items.extend(
[
"/usr/bin/safari",
"%PROGRAMFILES%\\Safari\\Safari.exe",
"%PROGRAMFILES(X86)%\\Safari\\Safari.exe",
"Safari",
"Safari.exe",
]
)
commands = ["-new-tab", "-url", url]
else:
if s.get("portable_browser", "") != "":
items.extend([s.get("portable_browser", "")])
commands = ["-new-tab", url]
for item in items:
try:
command2 = list(commands)
command2.insert(0, expandVars(item))
subprocess.Popen(command2)
return
except:
try:
command2 = list(commands)
command2.insert(0, item)
subprocess.Popen(command2)
return
except:
pass
try:
if sublime.platform() == "windows":
if browser and browser == "edge":
commands = ["cmd", "/c", "start", "microsoft-edge:" + url]
else:
commands = ["cmd", "/c", "start", "", url]
subprocess.Popen(commands)
elif sublime.platform() == "linux":
commands = ["xdg-open", url]
subprocess.Popen(commands)
else:
commands = ["open", url]
subprocess.Popen(commands)
return
except:
pass
sublime.error_message(
'Browser "' + browser + '" not found!\nIs installed? Which location...?'
)
class SideBarOpenInNewWindowCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
import subprocess
items = []
executable_path = sublime.executable_path()
if sublime.platform() == "osx":
app_path = executable_path[: executable_path.rfind(".app/") + 5]
executable_path = app_path + "Contents/SharedSupport/bin/subl"
items.append(executable_path)
for item in SideBarSelection(paths).getSelectedItems():
items.append(item.forCwdSystemPath())
items.append(item.path())
subprocess.Popen(items, cwd=items[1])
def is_visible(self, paths=[]):
return not s.get("disabled_menuitem_open_in_new_window", False)
class SideBarOpenWithFinderCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
import subprocess
for item in SideBarSelection(paths).getSelectedDirectoriesOrDirnames():
subprocess.Popen(["open", item.name()], cwd=item.dirname())
def is_visible(self, paths=[]):
return sublime.platform() == "osx"
class SideBarStatusBarFileSize(sublime_plugin.EventListener):
def on_activated(self, v):
if v.file_name() and s.get("statusbar_file_size", False):
try:
self.show(v, hurry_size(os.path.getsize(v.file_name())))
except:
pass
def on_post_save(self, v):
if v.file_name() and s.get("statusbar_file_size", False):
try:
self.show(v, hurry_size(os.path.getsize(v.file_name())))
except:
pass
def show(self, v, size):
v.set_status("statusbar_file_size", size)
class SideBarSaveAsAdminCommand(sublime_plugin.WindowCommand):
def run(self):
import tempfile
view = sublime.active_window().active_view()
path = os.path.dirname(__file__) + "/"
with (tempfile.NamedTemporaryFile(delete=False)) as f:
f.write(bytes(view.substr(sublime.Region(0, view.size())), "UTF-8"))
cli(
[
escapeCMDWindows(path + "bin/elevate.exe"),
escapeCMDWindows(path + "bin/elevate.bat"),
escapeCMDWindows(f.name),
escapeCMDWindows(view.file_name()),
]
)
f.close()
view.run_command("revert")
def is_visible(self):
return platform.system() == "Windows" or os.name == "nt"
class SideBarStatusBarModifiedTime(sublime_plugin.EventListener):
def on_activated(self, v):
if v.file_name() and s.get("statusbar_modified_time", False):
try:
self.show(v, os.path.getmtime(v.file_name()))
except:
pass
def on_post_save(self, v):
if v.file_name() and s.get("statusbar_modified_time", False):
try:
self.show(v, os.path.getmtime(v.file_name()))
except:
pass
def show(self, v, mtime):
modified_time = time.strftime(
s.get("statusbar_modified_time_format", "%A %b %d %H:%M:%S %Y"),
time.localtime(mtime),
)
if s.get("statusbar_modified_time_locale", "") != "":
modified_time = modified_time.decode(
s.get("statusbar_modified_time_locale", "")
)
v.set_status("statusbar_modified_time", modified_time)
class DefaultDirectory:
pass
DefaultDirectory = DefaultDirectory()
DefaultDirectory.path = False
class SideBarDefaultNewFolder(sublime_plugin.EventListener):
def on_new(self, view):
path = None
if not DefaultDirectory.path:
paths = SideBarProject().getDirectories()
if paths:
path = paths[0]
else:
path = DefaultDirectory.path
if path:
view.settings().set("default_dir", path)
def on_activated(self, view):
if view and view.file_name():
path = SideBarItem(view.file_name(), False).dirname()
if path:
DefaultDirectory.path = path
class SideBarDonateCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
sublime.message_dialog("Sidebar Enhancements: Thanks for your support ^.^")
browser = s.get("default_browser", "")
SideBarOpenInBrowserThread("", "", "").try_open(
"https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=DD4SL2AHYJGBW",
browser,
)
def is_visible(self, paths=[]):
return not (
s.get("i_donated_to_sidebar_enhancements_developer", False)
== "https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=DD4SL2AHYJGBW"
)
class zzzzzSideBarCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
pass
def is_visible(self, paths=[]): # <- WORKS AS AN ONPOPUPSHOWN
Cache.cached = False
return False
class zzzzzcacheSideBarCommand(sublime_plugin.EventListener):
def on_activated(self, view):
if view and view.file_name():
Cache.cached = SideBarSelection([view.file_name()])
| []
| []
| []
| [] | [] | python | 0 | 0 | |
server/proxy/server.go | // Package proxy is a cli proxy
package proxy
import (
"os"
"strings"
"github.com/go-acme/lego/v3/providers/dns/cloudflare"
"github.com/improbable-eng/grpc-web/go/grpcweb"
"github.com/micro/micro/v3/client"
"github.com/micro/micro/v3/internal/api/server/acme"
"github.com/micro/micro/v3/internal/api/server/acme/autocert"
"github.com/micro/micro/v3/internal/api/server/acme/certmagic"
"github.com/micro/micro/v3/internal/helper"
"github.com/micro/micro/v3/internal/muxer"
"github.com/micro/micro/v3/internal/sync/memory"
"github.com/micro/micro/v3/service"
bmem "github.com/micro/micro/v3/service/broker/memory"
muclient "github.com/micro/micro/v3/service/client"
log "github.com/micro/micro/v3/service/logger"
"github.com/micro/micro/v3/service/proxy"
"github.com/micro/micro/v3/service/proxy/grpc"
"github.com/micro/micro/v3/service/proxy/http"
"github.com/micro/micro/v3/service/proxy/mucp"
"github.com/micro/micro/v3/service/registry/noop"
murouter "github.com/micro/micro/v3/service/router"
"github.com/micro/micro/v3/service/server"
sgrpc "github.com/micro/micro/v3/service/server/grpc"
"github.com/micro/micro/v3/service/store"
"github.com/urfave/cli/v2"
)
var (
// Name of the proxy
Name = "proxy"
// The address of the proxy
Address = ":8081"
// Is gRPCWeb enabled
GRPCWebEnabled = false
// The address of the proxy
GRPCWebAddress = ":8082"
// the proxy protocol
Protocol = "grpc"
// The endpoint host to route to
Endpoint string
// ACME (Cert management)
ACMEProvider = "autocert"
ACMEChallengeProvider = "cloudflare"
ACMECA = acme.LetsEncryptProductionCA
)
func Run(ctx *cli.Context) error {
if len(ctx.String("server_name")) > 0 {
Name = ctx.String("server_name")
}
if len(ctx.String("address")) > 0 {
Address = ctx.String("address")
}
if ctx.Bool("grpc-web") {
GRPCWebEnabled = ctx.Bool("grpcWeb")
}
if len(ctx.String("grpc-web-port")) > 0 {
GRPCWebAddress = ctx.String("grpcWebAddr")
}
if len(ctx.String("endpoint")) > 0 {
Endpoint = ctx.String("endpoint")
}
if len(ctx.String("protocol")) > 0 {
Protocol = ctx.String("protocol")
}
if len(ctx.String("acme_provider")) > 0 {
ACMEProvider = ctx.String("acme_provider")
}
// new service
service := service.New(service.Name(Name))
// set the context
popts := []proxy.Option{
proxy.WithRouter(murouter.DefaultRouter),
proxy.WithClient(muclient.DefaultClient),
}
// set endpoint
if len(Endpoint) > 0 {
ep := Endpoint
switch {
case strings.HasPrefix(Endpoint, "grpc://"):
ep = strings.TrimPrefix(Endpoint, "grpc://")
Protocol = "grpc"
case strings.HasPrefix(Endpoint, "http://"):
Protocol = "http"
case strings.HasPrefix(Endpoint, "mucp://"):
ep = strings.TrimPrefix(Endpoint, "mucp://")
Protocol = "mucp"
}
popts = append(popts, proxy.WithEndpoint(ep))
}
serverOpts := []server.Option{
server.Name(Name),
server.Address(Address),
server.Registry(noop.NewRegistry()),
server.Broker(bmem.NewBroker()),
}
// enable acme will create a net.Listener which
if ctx.Bool("enable_acme") {
var ap acme.Provider
switch ACMEProvider {
case "autocert":
ap = autocert.NewProvider()
case "certmagic":
if ACMEChallengeProvider != "cloudflare" {
log.Fatal("The only implemented DNS challenge provider is cloudflare")
}
apiToken := os.Getenv("CF_API_TOKEN")
if len(apiToken) == 0 {
log.Fatal("env variables CF_API_TOKEN and CF_ACCOUNT_ID must be set")
}
storage := certmagic.NewStorage(
memory.NewSync(),
store.DefaultStore,
)
config := cloudflare.NewDefaultConfig()
config.AuthToken = apiToken
config.ZoneToken = apiToken
challengeProvider, err := cloudflare.NewDNSProviderConfig(config)
if err != nil {
log.Fatal(err.Error())
}
// define the provider
ap = certmagic.NewProvider(
acme.AcceptToS(true),
acme.CA(ACMECA),
acme.Cache(storage),
acme.ChallengeProvider(challengeProvider),
acme.OnDemand(false),
)
default:
log.Fatalf("Unsupported acme provider: %s\n", ACMEProvider)
}
// generate the tls config
config, err := ap.TLSConfig(helper.ACMEHosts(ctx)...)
if err != nil {
log.Fatalf("Failed to generate acme tls config: %v", err)
}
// set the tls config
serverOpts = append(serverOpts, server.TLSConfig(config))
// enable tls will leverage tls certs and generate a tls.Config
} else if ctx.Bool("enable_tls") {
// get certificates from the context
config, err := helper.TLSConfig(ctx)
if err != nil {
log.Fatal(err)
return err
}
serverOpts = append(serverOpts, server.TLSConfig(config))
}
// new proxy
var p proxy.Proxy
// set proxy
switch Protocol {
case "http":
p = http.NewProxy(popts...)
// TODO: http server
case "mucp":
p = mucp.NewProxy(popts...)
default:
// default to the grpc proxy
p = grpc.NewProxy(popts...)
}
// wrap the proxy using the proxy's authHandler
authOpt := server.WrapHandler(authHandler())
serverOpts = append(serverOpts, authOpt)
serverOpts = append(serverOpts, server.WithRouter(p))
if len(Endpoint) > 0 {
log.Infof("Proxy [%s] serving endpoint: %s", p.String(), Endpoint)
} else {
log.Infof("Proxy [%s] serving protocol: %s", p.String(), Protocol)
}
if GRPCWebEnabled {
serverOpts = append(serverOpts, sgrpc.GRPCWebPort(GRPCWebAddress))
serverOpts = append(serverOpts, sgrpc.GRPCWebOptions(
grpcweb.WithCorsForRegisteredEndpointsOnly(false),
grpcweb.WithOriginFunc(func(origin string) bool { return true })))
log.Infof("Proxy [%s] serving gRPC-Web on %s", p.String(), GRPCWebAddress)
}
// create a new grpc server
srv := sgrpc.NewServer(serverOpts...)
// create a new proxy muxer which includes the debug handler
muxer := muxer.New(Name, p)
// set the router
service.Server().Init(
server.WithRouter(muxer),
)
// Start the proxy server
if err := srv.Start(); err != nil {
log.Fatal(err)
}
// Run internal service
if err := service.Run(); err != nil {
log.Fatal(err)
}
// Stop the server
if err := srv.Stop(); err != nil {
log.Fatal(err)
}
return nil
}
var (
Flags = append(client.Flags,
&cli.StringFlag{
Name: "address",
Usage: "Set the proxy http address e.g 0.0.0.0:8081",
EnvVars: []string{"MICRO_PROXY_ADDRESS"},
},
&cli.StringFlag{
Name: "protocol",
Usage: "Set the protocol used for proxying e.g mucp, grpc, http",
EnvVars: []string{"MICRO_PROXY_PROTOCOL"},
},
&cli.StringFlag{
Name: "endpoint",
Usage: "Set the endpoint to route to e.g greeter or localhost:9090",
EnvVars: []string{"MICRO_PROXY_ENDPOINT"},
},
&cli.BoolFlag{
Name: "grpc-web",
Usage: "Enable the gRPCWeb server",
EnvVars: []string{"MICRO_PROXY_GRPC_WEB"},
},
&cli.StringFlag{
Name: "grpc-web-addr",
Usage: "Set the gRPC web addr on the proxy",
EnvVars: []string{"MICRO_PROXY_GRPC_WEB_ADDRESS"},
},
)
)
| [
"\"CF_API_TOKEN\""
]
| []
| [
"CF_API_TOKEN"
]
| [] | ["CF_API_TOKEN"] | go | 1 | 0 | |
cmd/clone.go | // Package cmd encapsulates the logic for all cli commands
package cmd
import (
"bufio"
"fmt"
"log"
"os"
"os/exec"
"strconv"
"strings"
"github.com/gabrie30/ghorg/colorlog"
"github.com/gabrie30/ghorg/configs"
"github.com/gabrie30/ghorg/scm"
"github.com/korovkin/limiter"
"github.com/spf13/cobra"
)
var (
protocol string
path string
parentFolder string
branch string
token string
cloneType string
scmType string
bitbucketUsername string
namespace string
color string
baseURL string
concurrency string
outputDir string
topics string
skipArchived bool
skipForks bool
backup bool
args []string
cloneErrors []string
cloneInfos []string
targetCloneSource string
matchPrefix string
)
func init() {
rootCmd.PersistentFlags().StringVarP(&color, "color", "", "", "GHORG_COLOR - toggles colorful output on/off (default on)")
rootCmd.AddCommand(cloneCmd)
cloneCmd.Flags().StringVar(&protocol, "protocol", "", "GHORG_CLONE_PROTOCOL - protocol to clone with, ssh or https, (default https)")
cloneCmd.Flags().StringVarP(&path, "path", "p", "", "GHORG_ABSOLUTE_PATH_TO_CLONE_TO - absolute path the ghorg_* directory will be created. Must end with / (default $HOME/Desktop/ghorg)")
cloneCmd.Flags().StringVarP(&branch, "branch", "b", "", "GHORG_BRANCH - branch left checked out for each repo cloned (default master)")
cloneCmd.Flags().StringVarP(&token, "token", "t", "", "GHORG_GITHUB_TOKEN/GHORG_GITLAB_TOKEN/GHORG_GITEA_TOKEN/GHORG_BITBUCKET_APP_PASSWORD - scm token to clone with")
cloneCmd.Flags().StringVarP(&bitbucketUsername, "bitbucket-username", "", "", "GHORG_BITBUCKET_USERNAME - bitbucket only: username associated with the app password")
cloneCmd.Flags().StringVarP(&scmType, "scm", "s", "", "GHORG_SCM_TYPE - type of scm used, github, gitlab, gitea or bitbucket (default github)")
cloneCmd.Flags().StringVarP(&cloneType, "clone-type", "c", "", "GHORG_CLONE_TYPE - clone target type, user or org (default org)")
cloneCmd.Flags().BoolVar(&skipArchived, "skip-archived", false, "GHORG_SKIP_ARCHIVED - skips archived repos, github/gitlab/gitea only")
cloneCmd.Flags().BoolVar(&skipForks, "skip-forks", false, "GHORG_SKIP_FORKS - skips repo if its a fork, github/gitlab/gitea only")
cloneCmd.Flags().BoolVar(&skipArchived, "preserve-dir", false, "GHORG_PRESERVE_DIRECTORY_STRUCTURE - clones repos in a directory structure that matches gitlab namespaces eg company/unit/subunit/app would clone into *_ghorg/unit/subunit/app, gitlab only")
cloneCmd.Flags().BoolVar(&backup, "backup", false, "GHORG_BACKUP - backup mode, clone as mirror, no working copy (ignores branch parameter)")
cloneCmd.Flags().StringVarP(&baseURL, "base-url", "", "", "GHORG_SCM_BASE_URL - change SCM base url, for on self hosted instances (currently gitlab, gitea and github (use format of https://git.mydomain.com/api/v3))")
cloneCmd.Flags().StringVarP(&concurrency, "concurrency", "", "", "GHORG_CONCURRENCY - max goroutines to spin up while cloning (default 25)")
cloneCmd.Flags().StringVarP(&topics, "topics", "", "", "GHORG_TOPICS - comma separated list of github/gitea topics to filter for")
cloneCmd.Flags().StringVarP(&outputDir, "output-dir", "", "", "GHORG_OUTPUT_DIR - name of directory repos will be cloned into, will force underscores and always append _ghorg (default {org/repo being cloned}_ghorg)")
cloneCmd.Flags().StringVarP(&matchPrefix, "match-prefix", "", "", "GHORG_MATCH_PREFIX - only clone repos with matching prefix, can be a comma separated list (default \"\")")
}
var cloneCmd = &cobra.Command{
Use: "clone",
Short: "Clone user or org repos from GitHub, GitLab, Gitea or Bitbucket",
Long: `Clone user or org repos from GitHub, GitLab, Gitea or Bitbucket. See $HOME/ghorg/conf.yaml for defaults, its likely you will need to update some of these values of use the flags to overwrite them. Values are set first by a default value, then based off what is set in $HOME/ghorg/conf.yaml, finally the cli flags, which have the highest level of precedence.`,
Run: cloneFunc,
}
func cloneFunc(cmd *cobra.Command, argz []string) {
if cmd.Flags().Changed("color") {
colorToggle := cmd.Flag("color").Value.String()
if colorToggle == "on" {
os.Setenv("GHORG_COLOR", colorToggle)
} else {
os.Setenv("GHORG_COLOR", "off")
}
}
if cmd.Flags().Changed("path") {
absolutePath := ensureTrailingSlash(cmd.Flag("path").Value.String())
os.Setenv("GHORG_ABSOLUTE_PATH_TO_CLONE_TO", absolutePath)
}
if cmd.Flags().Changed("protocol") {
protocol := cmd.Flag("protocol").Value.String()
os.Setenv("GHORG_CLONE_PROTOCOL", protocol)
}
if cmd.Flags().Changed("branch") {
os.Setenv("GHORG_BRANCH", cmd.Flag("branch").Value.String())
}
if cmd.Flags().Changed("bitbucket-username") {
os.Setenv("GHORG_BITBUCKET_USERNAME", cmd.Flag("bitbucket-username").Value.String())
}
if cmd.Flags().Changed("clone-type") {
cloneType := strings.ToLower(cmd.Flag("clone-type").Value.String())
os.Setenv("GHORG_CLONE_TYPE", cloneType)
}
if cmd.Flags().Changed("scm") {
scmType := strings.ToLower(cmd.Flag("scm").Value.String())
os.Setenv("GHORG_SCM_TYPE", scmType)
}
if cmd.Flags().Changed("base-url") {
url := cmd.Flag("base-url").Value.String()
os.Setenv("GHORG_SCM_BASE_URL", url)
}
if cmd.Flags().Changed("concurrency") {
g := cmd.Flag("concurrency").Value.String()
os.Setenv("GHORG_CONCURRENCY", g)
}
if cmd.Flags().Changed("topics") {
topics := cmd.Flag("topics").Value.String()
os.Setenv("GHORG_TOPICS", topics)
}
if cmd.Flags().Changed("match-prefix") {
prefix := cmd.Flag("match-prefix").Value.String()
os.Setenv("GHORG_MATCH_PREFIX", prefix)
}
if cmd.Flags().Changed("skip-archived") {
os.Setenv("GHORG_SKIP_ARCHIVED", "true")
}
if cmd.Flags().Changed("skip-forks") {
os.Setenv("GHORG_SKIP_FORKS", "true")
}
if cmd.Flags().Changed("preserve-dir") {
os.Setenv("GHORG_PRESERVE_DIRECTORY_STRUCTURE", "true")
}
if cmd.Flags().Changed("backup") {
os.Setenv("GHORG_BACKUP", "true")
}
if cmd.Flags().Changed("output-dir") {
d := cmd.Flag("output-dir").Value.String()
os.Setenv("GHORG_OUTPUT_DIR", d)
}
if len(argz) < 1 {
if os.Getenv("GHORG_SCM_TYPE") == "github" && os.Getenv("GHORG_CLONE_TYPE") == "user" {
argz = append(argz, "")
} else {
colorlog.PrintError("You must provide an org or user to clone")
os.Exit(1)
}
}
configs.GetOrSetToken()
if cmd.Flags().Changed("token") {
if os.Getenv("GHORG_SCM_TYPE") == "github" {
os.Setenv("GHORG_GITHUB_TOKEN", cmd.Flag("token").Value.String())
} else if os.Getenv("GHORG_SCM_TYPE") == "gitlab" {
os.Setenv("GHORG_GITLAB_TOKEN", cmd.Flag("token").Value.String())
} else if os.Getenv("GHORG_SCM_TYPE") == "bitbucket" {
os.Setenv("GHORG_BITBUCKET_APP_PASSWORD", cmd.Flag("token").Value.String())
} else if os.Getenv("GHORG_SCM_TYPE") == "gitea" {
os.Setenv("GHORG_GITEA_TOKEN", cmd.Flag("token").Value.String())
}
}
err := configs.VerifyTokenSet()
if err != nil {
colorlog.PrintError(err)
os.Exit(1)
}
err = configs.VerifyConfigsSetCorrectly()
if err != nil {
colorlog.PrintError(err)
os.Exit(1)
}
parseParentFolder(argz)
args = argz
targetCloneSource = argz[0]
CloneAllRepos()
}
// TODO: Figure out how to use go channels for this
func getAllOrgCloneUrls() ([]scm.Repo, error) {
return getCloneUrls(true)
}
// TODO: Figure out how to use go channels for this
func getAllUserCloneUrls() ([]scm.Repo, error) {
return getCloneUrls(false)
}
func getCloneUrls(isOrg bool) ([]scm.Repo, error) {
asciiTime()
PrintConfigs()
scmType := strings.ToLower(os.Getenv("GHORG_SCM_TYPE"))
if len(scmType) == 0 {
colorlog.PrintError("GHORG_SCM_TYPE not set")
os.Exit(1)
}
client, err := scm.GetClient(scmType)
if err != nil {
colorlog.PrintError(err)
os.Exit(1)
}
if isOrg {
return client.GetOrgRepos(targetCloneSource)
}
return client.GetUserRepos(targetCloneSource)
}
func createDirIfNotExist() {
if _, err := os.Stat(os.Getenv("GHORG_ABSOLUTE_PATH_TO_CLONE_TO") + parentFolder + "_ghorg"); os.IsNotExist(err) {
err = os.MkdirAll(os.Getenv("GHORG_ABSOLUTE_PATH_TO_CLONE_TO"), 0700)
if err != nil {
panic(err)
}
}
}
func repoExistsLocally(path string) bool {
if _, err := os.Stat(path); os.IsNotExist(err) {
return false
}
return true
}
func getAppNameFromURL(url string) string {
withGit := strings.Split(url, "/")
appName := withGit[len(withGit)-1]
split := strings.Split(appName, ".")
return strings.Join(split[0:len(split)-1], ".")
}
func printRemainingMessages() {
if len(cloneInfos) > 0 {
fmt.Println()
colorlog.PrintInfo("============ Info ============")
fmt.Println()
for _, i := range cloneInfos {
colorlog.PrintInfo(i)
}
fmt.Println()
}
if len(cloneErrors) > 0 {
fmt.Println()
colorlog.PrintError("============ Issues ============")
fmt.Println()
for _, e := range cloneErrors {
colorlog.PrintError(e)
}
fmt.Println()
}
}
func readGhorgIgnore() ([]string, error) {
file, err := os.Open(configs.GhorgIgnoreLocation())
if err != nil {
return nil, err
}
defer file.Close()
var lines []string
scanner := bufio.NewScanner(file)
for scanner.Scan() {
if scanner.Text() != "" {
lines = append(lines, scanner.Text())
}
}
return lines, scanner.Err()
}
// CloneAllRepos clones all repos
func CloneAllRepos() {
// resc, errc, infoc := make(chan string), make(chan error), make(chan error)
var cloneTargets []scm.Repo
var err error
if os.Getenv("GHORG_CLONE_TYPE") == "org" {
cloneTargets, err = getAllOrgCloneUrls()
} else if os.Getenv("GHORG_CLONE_TYPE") == "user" {
cloneTargets, err = getAllUserCloneUrls()
} else {
colorlog.PrintError("GHORG_CLONE_TYPE not set or unsupported")
os.Exit(1)
}
if err != nil {
colorlog.PrintError("Encountered an error, aborting")
fmt.Println(err)
os.Exit(1)
}
if len(cloneTargets) == 0 {
colorlog.PrintInfo("No repos found for " + os.Getenv("GHORG_SCM_TYPE") + " " + os.Getenv("GHORG_CLONE_TYPE") + ": " + targetCloneSource + ", check spelling and verify clone-type (user/org) is set correctly e.g. -c=user")
os.Exit(0)
}
// filter repos down based on ghorgignore if one exists
_, err = os.Stat(configs.GhorgIgnoreLocation())
if !os.IsNotExist(err) {
// Open the file parse each line and remove cloneTargets containing
toIgnore, err := readGhorgIgnore()
if err != nil {
colorlog.PrintError("Error parsing your ghorgignore, aborting")
fmt.Println(err)
os.Exit(1)
}
colorlog.PrintInfo("Using ghorgignore, filtering repos down...")
fmt.Println("")
filteredCloneTargets := []scm.Repo{}
var flag bool
for _, cloned := range cloneTargets {
flag = false
for _, ignore := range toIgnore {
if strings.Contains(cloned.URL, ignore) {
flag = true
}
}
if flag == false {
filteredCloneTargets = append(filteredCloneTargets, cloned)
}
}
cloneTargets = filteredCloneTargets
}
colorlog.PrintInfo(strconv.Itoa(len(cloneTargets)) + " repos found in " + targetCloneSource)
fmt.Println()
createDirIfNotExist()
l, err := strconv.Atoi(os.Getenv("GHORG_CONCURRENCY"))
if err != nil {
log.Fatal("Could not determine GHORG_CONCURRENCY")
}
limit := limiter.NewConcurrencyLimiter(l)
for _, target := range cloneTargets {
appName := getAppNameFromURL(target.URL)
branch := target.CloneBranch
repo := target
limit.Execute(func() {
path := appName
if repo.Path != "" && os.Getenv("GHORG_PRESERVE_DIRECTORY_STRUCTURE") == "true" {
path = repo.Path
}
repoDir := os.Getenv("GHORG_ABSOLUTE_PATH_TO_CLONE_TO") + parentFolder + "_ghorg" + "/" + path
if os.Getenv("GHORG_BACKUP") == "true" {
repoDir = os.Getenv("GHORG_ABSOLUTE_PATH_TO_CLONE_TO") + parentFolder + "_ghorg_backup" + "/" + path
}
if repoExistsLocally(repoDir) == true {
if os.Getenv("GHORG_BACKUP") == "true" {
cmd := exec.Command("git", "remote", "update")
cmd.Dir = repoDir
err := cmd.Run()
if err != nil {
e := fmt.Sprintf("Could not update remotes in Repo: %s Error: %v", repo.URL, err)
cloneErrors = append(cloneErrors, e)
return
}
} else {
cmd := exec.Command("git", "checkout", branch)
cmd.Dir = repoDir
err := cmd.Run()
if err != nil {
e := fmt.Sprintf("Could not checkout out %s, branch may not exist, no changes made Repo: %s Error: %v", branch, repo.URL, err)
cloneInfos = append(cloneInfos, e)
return
}
cmd = exec.Command("git", "clean", "-f", "-d")
cmd.Dir = repoDir
err = cmd.Run()
if err != nil {
e := fmt.Sprintf("Problem running git clean: %s Error: %v", repo.URL, err)
cloneErrors = append(cloneErrors, e)
return
}
cmd = exec.Command("git", "reset", "--hard", "origin/"+branch)
cmd.Dir = repoDir
err = cmd.Run()
if err != nil {
e := fmt.Sprintf("Problem resetting %s Repo: %s Error: %v", branch, repo.URL, err)
cloneErrors = append(cloneErrors, e)
return
}
// TODO: handle case where repo was removed, should not give user an error
cmd = exec.Command("git", "pull", "origin", branch)
cmd.Dir = repoDir
err = cmd.Run()
if err != nil {
e := fmt.Sprintf("Problem trying to pull %v Repo: %s Error: %v", branch, repo.URL, err)
cloneErrors = append(cloneErrors, e)
return
}
}
} else {
// if https clone and github/gitlab add personal access token to url
args := []string{"clone", repo.CloneURL, repoDir}
if os.Getenv("GHORG_BACKUP") == "true" {
args = append(args, "--mirror")
}
cmd := exec.Command("git", args...)
err := cmd.Run()
if err != nil {
e := fmt.Sprintf("Problem trying to clone Repo: %s Error: %v", repo.URL, err)
cloneErrors = append(cloneErrors, e)
return
}
if os.Getenv("GHORG_BRANCH") != "" {
cmd = exec.Command("git", "checkout", branch)
cmd.Dir = repoDir
err = cmd.Run()
if err != nil {
e := fmt.Sprintf("Could not checkout out %s, branch may not exist, no changes made Repo: %s Error: %v", branch, repo.URL, err)
cloneInfos = append(cloneInfos, e)
return
}
}
// TODO: make configs around remote name
// we clone with api-key in clone url
args = []string{"remote", "set-url", "origin", repo.URL}
cmd = exec.Command("git", args...)
cmd.Dir = repoDir
err = cmd.Run()
if err != nil {
e := fmt.Sprintf("Problem trying to set remote on Repo: %s Error: %v", repo.URL, err)
cloneErrors = append(cloneErrors, e)
return
}
}
colorlog.PrintSuccess("Success cloning repo: " + repo.URL + " -> branch: " + branch)
})
}
limit.Wait()
printRemainingMessages()
// TODO: fix all these if else checks with ghorg_backups
if os.Getenv("GHORG_BACKUP") == "true" {
colorlog.PrintSuccess(fmt.Sprintf("Finished! %s%s_ghorg_backup", os.Getenv("GHORG_ABSOLUTE_PATH_TO_CLONE_TO"), parentFolder))
} else {
colorlog.PrintSuccess(fmt.Sprintf("Finished! %s%s_ghorg", os.Getenv("GHORG_ABSOLUTE_PATH_TO_CLONE_TO"), parentFolder))
}
}
func asciiTime() {
colorlog.PrintInfo(
`
+-+-+-+-+ +-+-+ +-+-+-+-+-+
|T|I|M|E| |T|O| |G|H|O|R|G|
+-+-+-+-+ +-+-+ +-+-+-+-+-+
`)
}
// PrintConfigs shows the user what is set before cloning
func PrintConfigs() {
colorlog.PrintInfo("*************************************")
colorlog.PrintInfo("* SCM : " + os.Getenv("GHORG_SCM_TYPE"))
colorlog.PrintInfo("* Type : " + os.Getenv("GHORG_CLONE_TYPE"))
colorlog.PrintInfo("* Protocol : " + os.Getenv("GHORG_CLONE_PROTOCOL"))
colorlog.PrintInfo("* Location : " + os.Getenv("GHORG_ABSOLUTE_PATH_TO_CLONE_TO"))
colorlog.PrintInfo("* Concurrency : " + os.Getenv("GHORG_CONCURRENCY"))
if os.Getenv("GHORG_BRANCH") != "" {
colorlog.PrintInfo("* Branch : " + getGhorgBranch())
}
if os.Getenv("GHORG_SCM_BASE_URL") != "" {
colorlog.PrintInfo("* Base URL : " + os.Getenv("GHORG_SCM_BASE_URL"))
}
if os.Getenv("GHORG_SKIP_ARCHIVED") == "true" {
colorlog.PrintInfo("* Skip Archived : " + os.Getenv("GHORG_SKIP_ARCHIVED"))
}
if os.Getenv("GHORG_SKIP_FORKS") == "true" {
colorlog.PrintInfo("* Skip Forks : " + os.Getenv("GHORG_SKIP_FORKS"))
}
if os.Getenv("GHORG_BACKUP") == "true" {
colorlog.PrintInfo("* Backup : " + os.Getenv("GHORG_BACKUP"))
}
if configs.GhorgIgnoreDetected() == true {
colorlog.PrintInfo("* Ghorgignore : true")
}
if os.Getenv("GHORG_OUTPUT_DIR") != "" {
colorlog.PrintInfo("* Output Dir : " + parentFolder + "_ghorg")
}
colorlog.PrintInfo("*************************************")
fmt.Println("")
}
func getGhorgBranch() string {
if os.Getenv("GHORG_BRANCH") == "" {
return "default branch"
}
return os.Getenv("GHORG_BRANCH")
}
func ensureTrailingSlash(path string) string {
if string(path[len(path)-1]) == "/" {
return path
}
return path + "/"
}
func addTokenToHTTPSCloneURL(url string, token string) string {
splitURL := strings.Split(url, "https://")
if os.Getenv("GHORG_SCM_TYPE") == "gitlab" {
return "https://oauth2:" + token + "@" + splitURL[1]
}
return "https://" + token + "@" + splitURL[1]
}
func parseParentFolder(argz []string) {
if os.Getenv("GHORG_OUTPUT_DIR") != "" {
parentFolder = strings.ReplaceAll(os.Getenv("GHORG_OUTPUT_DIR"), "-", "_")
return
}
pf := strings.ReplaceAll(argz[0], "-", "_")
parentFolder = strings.ToLower(pf)
}
| [
"\"GHORG_SCM_TYPE\"",
"\"GHORG_CLONE_TYPE\"",
"\"GHORG_SCM_TYPE\"",
"\"GHORG_SCM_TYPE\"",
"\"GHORG_SCM_TYPE\"",
"\"GHORG_SCM_TYPE\"",
"\"GHORG_SCM_TYPE\"",
"\"GHORG_ABSOLUTE_PATH_TO_CLONE_TO\"",
"\"GHORG_ABSOLUTE_PATH_TO_CLONE_TO\"",
"\"GHORG_CLONE_TYPE\"",
"\"GHORG_CLONE_TYPE\"",
"\"GHORG_SCM_TYPE\"",
"\"GHORG_CLONE_TYPE\"",
"\"GHORG_CONCURRENCY\"",
"\"GHORG_PRESERVE_DIRECTORY_STRUCTURE\"",
"\"GHORG_ABSOLUTE_PATH_TO_CLONE_TO\"",
"\"GHORG_BACKUP\"",
"\"GHORG_ABSOLUTE_PATH_TO_CLONE_TO\"",
"\"GHORG_BACKUP\"",
"\"GHORG_BACKUP\"",
"\"GHORG_BRANCH\"",
"\"GHORG_BACKUP\"",
"\"GHORG_ABSOLUTE_PATH_TO_CLONE_TO\"",
"\"GHORG_ABSOLUTE_PATH_TO_CLONE_TO\"",
"\"GHORG_SCM_TYPE\"",
"\"GHORG_CLONE_TYPE\"",
"\"GHORG_CLONE_PROTOCOL\"",
"\"GHORG_ABSOLUTE_PATH_TO_CLONE_TO\"",
"\"GHORG_CONCURRENCY\"",
"\"GHORG_BRANCH\"",
"\"GHORG_SCM_BASE_URL\"",
"\"GHORG_SCM_BASE_URL\"",
"\"GHORG_SKIP_ARCHIVED\"",
"\"GHORG_SKIP_ARCHIVED\"",
"\"GHORG_SKIP_FORKS\"",
"\"GHORG_SKIP_FORKS\"",
"\"GHORG_BACKUP\"",
"\"GHORG_BACKUP\"",
"\"GHORG_OUTPUT_DIR\"",
"\"GHORG_BRANCH\"",
"\"GHORG_BRANCH\"",
"\"GHORG_SCM_TYPE\"",
"\"GHORG_OUTPUT_DIR\"",
"\"GHORG_OUTPUT_DIR\""
]
| []
| [
"GHORG_SKIP_FORKS",
"GHORG_CONCURRENCY",
"GHORG_CLONE_PROTOCOL",
"GHORG_PRESERVE_DIRECTORY_STRUCTURE",
"GHORG_CLONE_TYPE",
"GHORG_OUTPUT_DIR",
"GHORG_SCM_TYPE",
"GHORG_ABSOLUTE_PATH_TO_CLONE_TO",
"GHORG_SCM_BASE_URL",
"GHORG_BACKUP",
"GHORG_BRANCH",
"GHORG_SKIP_ARCHIVED"
]
| [] | ["GHORG_SKIP_FORKS", "GHORG_CONCURRENCY", "GHORG_CLONE_PROTOCOL", "GHORG_PRESERVE_DIRECTORY_STRUCTURE", "GHORG_CLONE_TYPE", "GHORG_OUTPUT_DIR", "GHORG_SCM_TYPE", "GHORG_ABSOLUTE_PATH_TO_CLONE_TO", "GHORG_SCM_BASE_URL", "GHORG_BACKUP", "GHORG_BRANCH", "GHORG_SKIP_ARCHIVED"] | go | 12 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.