filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
daemon/daemon.go
|
// Package daemon exposes the functions that occur on the host server
// that the Docker daemon is running.
//
// In implementing the various functions of the daemon, there is often
// a method-specific struct for configuring the runtime behavior.
package daemon // import "github.com/docker/docker/daemon"
import (
"context"
"fmt"
"io/ioutil"
"net"
"net/url"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
"github.com/docker/docker/pkg/fileutils"
"go.etcd.io/bbolt"
"google.golang.org/grpc"
"google.golang.org/grpc/backoff"
"github.com/containerd/containerd"
"github.com/containerd/containerd/defaults"
"github.com/containerd/containerd/pkg/dialer"
"github.com/containerd/containerd/remotes/docker"
"github.com/containerd/containerd/sys"
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/builder"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/daemon/discovery"
"github.com/docker/docker/daemon/events"
"github.com/docker/docker/daemon/exec"
"github.com/docker/docker/daemon/images"
"github.com/docker/docker/daemon/logger"
"github.com/docker/docker/daemon/network"
"github.com/docker/docker/errdefs"
bkconfig "github.com/moby/buildkit/cmd/buildkitd/config"
"github.com/moby/buildkit/util/resolver"
"github.com/sirupsen/logrus"
// register graph drivers
_ "github.com/docker/docker/daemon/graphdriver/register"
"github.com/docker/docker/daemon/stats"
dmetadata "github.com/docker/docker/distribution/metadata"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/docker/libcontainerd"
libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/truncindex"
"github.com/docker/docker/plugin"
pluginexec "github.com/docker/docker/plugin/executor/containerd"
refstore "github.com/docker/docker/reference"
"github.com/docker/docker/registry"
"github.com/docker/docker/runconfig"
volumesservice "github.com/docker/docker/volume/service"
"github.com/docker/libnetwork"
"github.com/docker/libnetwork/cluster"
nwconfig "github.com/docker/libnetwork/config"
"github.com/moby/locker"
"github.com/pkg/errors"
"golang.org/x/sync/semaphore"
)
// ContainersNamespace is the name of the namespace used for users containers
const (
ContainersNamespace = "moby"
)
var (
errSystemNotSupported = errors.New("the Docker daemon is not supported on this platform")
)
// Daemon holds information about the Docker daemon.
type Daemon struct {
ID string
repository string
containers container.Store
containersReplica container.ViewDB
execCommands *exec.Store
imageService *images.ImageService
idIndex *truncindex.TruncIndex
configStore *config.Config
statsCollector *stats.Collector
defaultLogConfig containertypes.LogConfig
RegistryService registry.Service
EventsService *events.Events
netController libnetwork.NetworkController
volumes *volumesservice.VolumesService
discoveryWatcher discovery.Reloader
root string
seccompEnabled bool
apparmorEnabled bool
shutdown bool
idMapping *idtools.IdentityMapping
// TODO: move graphDrivers field to an InfoService
graphDrivers map[string]string // By operating system
PluginStore *plugin.Store // todo: remove
pluginManager *plugin.Manager
linkIndex *linkIndex
containerdCli *containerd.Client
containerd libcontainerdtypes.Client
defaultIsolation containertypes.Isolation // Default isolation mode on Windows
clusterProvider cluster.Provider
cluster Cluster
genericResources []swarm.GenericResource
metricsPluginListener net.Listener
machineMemory uint64
seccompProfile []byte
seccompProfilePath string
diskUsageRunning int32
pruneRunning int32
hosts map[string]bool // hosts stores the addresses the daemon is listening on
startupDone chan struct{}
attachmentStore network.AttachmentStore
attachableNetworkLock *locker.Locker
// This is used for Windows which doesn't currently support running on containerd
// It stores metadata for the content store (used for manifest caching)
// This needs to be closed on daemon exit
mdDB *bbolt.DB
}
// StoreHosts stores the addresses the daemon is listening on
func (daemon *Daemon) StoreHosts(hosts []string) {
if daemon.hosts == nil {
daemon.hosts = make(map[string]bool)
}
for _, h := range hosts {
daemon.hosts[h] = true
}
}
// HasExperimental returns whether the experimental features of the daemon are enabled or not
func (daemon *Daemon) HasExperimental() bool {
return daemon.configStore != nil && daemon.configStore.Experimental
}
// Features returns the features map from configStore
func (daemon *Daemon) Features() *map[string]bool {
return &daemon.configStore.Features
}
// RegistryHosts returns registry configuration in containerd resolvers format
func (daemon *Daemon) RegistryHosts() docker.RegistryHosts {
var (
registryKey = "docker.io"
mirrors = make([]string, len(daemon.configStore.Mirrors))
m = map[string]bkconfig.RegistryConfig{}
)
// must trim "https://" or "http://" prefix
for i, v := range daemon.configStore.Mirrors {
if uri, err := url.Parse(v); err == nil {
v = uri.Host
}
mirrors[i] = v
}
// set mirrors for default registry
m[registryKey] = bkconfig.RegistryConfig{Mirrors: mirrors}
for _, v := range daemon.configStore.InsecureRegistries {
u, err := url.Parse(v)
c := bkconfig.RegistryConfig{}
if err == nil {
v = u.Host
t := true
if u.Scheme == "http" {
c.PlainHTTP = &t
} else {
c.Insecure = &t
}
}
m[v] = c
}
for k, v := range m {
if d, err := registry.HostCertsDir(k); err == nil {
v.TLSConfigDir = []string{d}
m[k] = v
}
}
certsDir := registry.CertsDir()
if fis, err := ioutil.ReadDir(certsDir); err == nil {
for _, fi := range fis {
if _, ok := m[fi.Name()]; !ok {
m[fi.Name()] = bkconfig.RegistryConfig{
TLSConfigDir: []string{filepath.Join(certsDir, fi.Name())},
}
}
}
}
return resolver.NewRegistryConfig(m)
}
func (daemon *Daemon) restore() error {
var mapLock sync.Mutex
containers := make(map[string]*container.Container)
logrus.Info("Loading containers: start.")
dir, err := ioutil.ReadDir(daemon.repository)
if err != nil {
return err
}
// parallelLimit is the maximum number of parallel startup jobs that we
// allow (this is the limited used for all startup semaphores). The multipler
// (128) was chosen after some fairly significant benchmarking -- don't change
// it unless you've tested it significantly (this value is adjusted if
// RLIMIT_NOFILE is small to avoid EMFILE).
parallelLimit := adjustParallelLimit(len(dir), 128*runtime.NumCPU())
// Re-used for all parallel startup jobs.
var group sync.WaitGroup
sem := semaphore.NewWeighted(int64(parallelLimit))
for _, v := range dir {
group.Add(1)
go func(id string) {
defer group.Done()
_ = sem.Acquire(context.Background(), 1)
defer sem.Release(1)
log := logrus.WithField("container", id)
c, err := daemon.load(id)
if err != nil {
log.WithError(err).Error("failed to load container")
return
}
if !system.IsOSSupported(c.OS) {
log.Errorf("failed to load container: %s (%q)", system.ErrNotSupportedOperatingSystem, c.OS)
return
}
// Ignore the container if it does not support the current driver being used by the graph
currentDriverForContainerOS := daemon.graphDrivers[c.OS]
if (c.Driver == "" && currentDriverForContainerOS == "aufs") || c.Driver == currentDriverForContainerOS {
rwlayer, err := daemon.imageService.GetLayerByID(c.ID, c.OS)
if err != nil {
log.WithError(err).Error("failed to load container mount")
return
}
c.RWLayer = rwlayer
log.WithFields(logrus.Fields{
"running": c.IsRunning(),
"paused": c.IsPaused(),
}).Debug("loaded container")
mapLock.Lock()
containers[c.ID] = c
mapLock.Unlock()
} else {
log.Debugf("cannot load container because it was created with another storage driver")
}
}(v.Name())
}
group.Wait()
removeContainers := make(map[string]*container.Container)
restartContainers := make(map[*container.Container]chan struct{})
activeSandboxes := make(map[string]interface{})
for _, c := range containers {
group.Add(1)
go func(c *container.Container) {
defer group.Done()
_ = sem.Acquire(context.Background(), 1)
defer sem.Release(1)
log := logrus.WithField("container", c.ID)
if err := daemon.registerName(c); err != nil {
log.WithError(err).Errorf("failed to register container name: %s", c.Name)
mapLock.Lock()
delete(containers, c.ID)
mapLock.Unlock()
return
}
if err := daemon.Register(c); err != nil {
log.WithError(err).Error("failed to register container")
mapLock.Lock()
delete(containers, c.ID)
mapLock.Unlock()
return
}
// The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver.
// We should rewrite it to use the daemon defaults.
// Fixes https://github.com/docker/docker/issues/22536
if c.HostConfig.LogConfig.Type == "" {
if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil {
log.WithError(err).Error("failed to verify log config for container")
}
}
}(c)
}
group.Wait()
for _, c := range containers {
group.Add(1)
go func(c *container.Container) {
defer group.Done()
_ = sem.Acquire(context.Background(), 1)
defer sem.Release(1)
log := logrus.WithField("container", c.ID)
daemon.backportMountSpec(c)
if err := daemon.checkpointAndSave(c); err != nil {
log.WithError(err).Error("error saving backported mountspec to disk")
}
daemon.setStateCounter(c)
log.WithFields(logrus.Fields{
"running": c.IsRunning(),
"paused": c.IsPaused(),
}).Debug("restoring container")
var (
err error
alive bool
ec uint32
exitedAt time.Time
process libcontainerdtypes.Process
)
alive, _, process, err = daemon.containerd.Restore(context.Background(), c.ID, c.InitializeStdio)
if err != nil && !errdefs.IsNotFound(err) {
log.WithError(err).Error("failed to restore container with containerd")
return
}
if !alive && process != nil {
ec, exitedAt, err = process.Delete(context.Background())
if err != nil && !errdefs.IsNotFound(err) {
log.WithError(err).Error("failed to delete container from containerd")
return
}
} else if !daemon.configStore.LiveRestoreEnabled {
if err := daemon.shutdownContainer(c); err != nil && !errdefs.IsNotFound(err) {
log.WithError(err).Error("error shutting down container")
return
}
c.ResetRestartManager(false)
}
if c.IsRunning() || c.IsPaused() {
c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking
if c.IsPaused() && alive {
s, err := daemon.containerd.Status(context.Background(), c.ID)
if err != nil {
log.WithError(err).Error("failed to get container status")
} else {
log.WithField("state", s).Info("restored container paused")
switch s {
case containerd.Paused, containerd.Pausing:
// nothing to do
case containerd.Stopped:
alive = false
case containerd.Unknown:
log.Error("unknown status for paused container during restore")
default:
// running
c.Lock()
c.Paused = false
daemon.setStateCounter(c)
if err := c.CheckpointTo(daemon.containersReplica); err != nil {
log.WithError(err).Error("failed to update paused container state")
}
c.Unlock()
}
}
}
if !alive {
c.Lock()
c.SetStopped(&container.ExitStatus{ExitCode: int(ec), ExitedAt: exitedAt})
daemon.Cleanup(c)
if err := c.CheckpointTo(daemon.containersReplica); err != nil {
log.WithError(err).Error("failed to update stopped container state")
}
c.Unlock()
}
// we call Mount and then Unmount to get BaseFs of the container
if err := daemon.Mount(c); err != nil {
// The mount is unlikely to fail. However, in case mount fails
// the container should be allowed to restore here. Some functionalities
// (like docker exec -u user) might be missing but container is able to be
// stopped/restarted/removed.
// See #29365 for related information.
// The error is only logged here.
log.WithError(err).Warn("failed to mount container to get BaseFs path")
} else {
if err := daemon.Unmount(c); err != nil {
log.WithError(err).Warn("failed to umount container to get BaseFs path")
}
}
c.ResetRestartManager(false)
if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() {
options, err := daemon.buildSandboxOptions(c)
if err != nil {
log.WithError(err).Warn("failed to build sandbox option to restore container")
}
mapLock.Lock()
activeSandboxes[c.NetworkSettings.SandboxID] = options
mapLock.Unlock()
}
}
// get list of containers we need to restart
// Do not autostart containers which
// has endpoints in a swarm scope
// network yet since the cluster is
// not initialized yet. We will start
// it after the cluster is
// initialized.
if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint && c.HasBeenStartedBefore {
mapLock.Lock()
restartContainers[c] = make(chan struct{})
mapLock.Unlock()
} else if c.HostConfig != nil && c.HostConfig.AutoRemove {
mapLock.Lock()
removeContainers[c.ID] = c
mapLock.Unlock()
}
c.Lock()
if c.RemovalInProgress {
// We probably crashed in the middle of a removal, reset
// the flag.
//
// We DO NOT remove the container here as we do not
// know if the user had requested for either the
// associated volumes, network links or both to also
// be removed. So we put the container in the "dead"
// state and leave further processing up to them.
c.RemovalInProgress = false
c.Dead = true
if err := c.CheckpointTo(daemon.containersReplica); err != nil {
log.WithError(err).Error("failed to update RemovalInProgress container state")
} else {
log.Debugf("reset RemovalInProgress state for container")
}
}
c.Unlock()
}(c)
}
group.Wait()
daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes)
if err != nil {
return fmt.Errorf("Error initializing network controller: %v", err)
}
// Now that all the containers are registered, register the links
for _, c := range containers {
group.Add(1)
go func(c *container.Container) {
_ = sem.Acquire(context.Background(), 1)
if err := daemon.registerLinks(c, c.HostConfig); err != nil {
logrus.WithField("container", c.ID).WithError(err).Error("failed to register link for container")
}
sem.Release(1)
group.Done()
}(c)
}
group.Wait()
for c, notifier := range restartContainers {
group.Add(1)
go func(c *container.Container, chNotify chan struct{}) {
_ = sem.Acquire(context.Background(), 1)
log := logrus.WithField("container", c.ID)
log.Debug("starting container")
// ignore errors here as this is a best effort to wait for children to be
// running before we try to start the container
children := daemon.children(c)
timeout := time.NewTimer(5 * time.Second)
defer timeout.Stop()
for _, child := range children {
if notifier, exists := restartContainers[child]; exists {
select {
case <-notifier:
case <-timeout.C:
}
}
}
// Make sure networks are available before starting
daemon.waitForNetworks(c)
if err := daemon.containerStart(c, "", "", true); err != nil {
log.WithError(err).Error("failed to start container")
}
close(chNotify)
sem.Release(1)
group.Done()
}(c, notifier)
}
group.Wait()
for id := range removeContainers {
group.Add(1)
go func(cid string) {
_ = sem.Acquire(context.Background(), 1)
if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
logrus.WithField("container", cid).WithError(err).Error("failed to remove container")
}
sem.Release(1)
group.Done()
}(id)
}
group.Wait()
// any containers that were started above would already have had this done,
// however we need to now prepare the mountpoints for the rest of the containers as well.
// This shouldn't cause any issue running on the containers that already had this run.
// This must be run after any containers with a restart policy so that containerized plugins
// can have a chance to be running before we try to initialize them.
for _, c := range containers {
// if the container has restart policy, do not
// prepare the mountpoints since it has been done on restarting.
// This is to speed up the daemon start when a restart container
// has a volume and the volume driver is not available.
if _, ok := restartContainers[c]; ok {
continue
} else if _, ok := removeContainers[c.ID]; ok {
// container is automatically removed, skip it.
continue
}
group.Add(1)
go func(c *container.Container) {
_ = sem.Acquire(context.Background(), 1)
if err := daemon.prepareMountPoints(c); err != nil {
logrus.WithField("container", c.ID).WithError(err).Error("failed to prepare mountpoints for container")
}
sem.Release(1)
group.Done()
}(c)
}
group.Wait()
logrus.Info("Loading containers: done.")
return nil
}
// RestartSwarmContainers restarts any autostart container which has a
// swarm endpoint.
func (daemon *Daemon) RestartSwarmContainers() {
ctx := context.Background()
// parallelLimit is the maximum number of parallel startup jobs that we
// allow (this is the limited used for all startup semaphores). The multipler
// (128) was chosen after some fairly significant benchmarking -- don't change
// it unless you've tested it significantly (this value is adjusted if
// RLIMIT_NOFILE is small to avoid EMFILE).
parallelLimit := adjustParallelLimit(len(daemon.List()), 128*runtime.NumCPU())
var group sync.WaitGroup
sem := semaphore.NewWeighted(int64(parallelLimit))
for _, c := range daemon.List() {
if !c.IsRunning() && !c.IsPaused() {
// Autostart all the containers which has a
// swarm endpoint now that the cluster is
// initialized.
if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint && c.HasBeenStartedBefore {
group.Add(1)
go func(c *container.Container) {
if err := sem.Acquire(ctx, 1); err != nil {
// ctx is done.
group.Done()
return
}
if err := daemon.containerStart(c, "", "", true); err != nil {
logrus.WithField("container", c.ID).WithError(err).Error("failed to start swarm container")
}
sem.Release(1)
group.Done()
}(c)
}
}
}
group.Wait()
}
// waitForNetworks is used during daemon initialization when starting up containers
// It ensures that all of a container's networks are available before the daemon tries to start the container.
// In practice it just makes sure the discovery service is available for containers which use a network that require discovery.
func (daemon *Daemon) waitForNetworks(c *container.Container) {
if daemon.discoveryWatcher == nil {
return
}
// Make sure if the container has a network that requires discovery that the discovery service is available before starting
for netName := range c.NetworkSettings.Networks {
// If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready
// Most likely this is because the K/V store used for discovery is in a container and needs to be started
if _, err := daemon.netController.NetworkByName(netName); err != nil {
if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok {
continue
}
// use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host
// FIXME: why is this slow???
dur := 60 * time.Second
timer := time.NewTimer(dur)
logrus.WithField("container", c.ID).Debugf("Container %s waiting for network to be ready", c.Name)
select {
case <-daemon.discoveryWatcher.ReadyCh():
case <-timer.C:
}
timer.Stop()
return
}
}
}
func (daemon *Daemon) children(c *container.Container) map[string]*container.Container {
return daemon.linkIndex.children(c)
}
// parents returns the names of the parent containers of the container
// with the given name.
func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container {
return daemon.linkIndex.parents(c)
}
func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error {
fullName := path.Join(parent.Name, alias)
if err := daemon.containersReplica.ReserveName(fullName, child.ID); err != nil {
if err == container.ErrNameReserved {
logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err)
return nil
}
return err
}
daemon.linkIndex.link(parent, child, fullName)
return nil
}
// DaemonJoinsCluster informs the daemon has joined the cluster and provides
// the handler to query the cluster component
func (daemon *Daemon) DaemonJoinsCluster(clusterProvider cluster.Provider) {
daemon.setClusterProvider(clusterProvider)
}
// DaemonLeavesCluster informs the daemon has left the cluster
func (daemon *Daemon) DaemonLeavesCluster() {
// Daemon is in charge of removing the attachable networks with
// connected containers when the node leaves the swarm
daemon.clearAttachableNetworks()
// We no longer need the cluster provider, stop it now so that
// the network agent will stop listening to cluster events.
daemon.setClusterProvider(nil)
// Wait for the networking cluster agent to stop
daemon.netController.AgentStopWait()
// Daemon is in charge of removing the ingress network when the
// node leaves the swarm. Wait for job to be done or timeout.
// This is called also on graceful daemon shutdown. We need to
// wait, because the ingress release has to happen before the
// network controller is stopped.
if done, err := daemon.ReleaseIngress(); err == nil {
timeout := time.NewTimer(5 * time.Second)
defer timeout.Stop()
select {
case <-done:
case <-timeout.C:
logrus.Warn("timeout while waiting for ingress network removal")
}
} else {
logrus.Warnf("failed to initiate ingress network removal: %v", err)
}
daemon.attachmentStore.ClearAttachments()
}
// setClusterProvider sets a component for querying the current cluster state.
func (daemon *Daemon) setClusterProvider(clusterProvider cluster.Provider) {
daemon.clusterProvider = clusterProvider
daemon.netController.SetClusterProvider(clusterProvider)
daemon.attachableNetworkLock = locker.New()
}
// IsSwarmCompatible verifies if the current daemon
// configuration is compatible with the swarm mode
func (daemon *Daemon) IsSwarmCompatible() error {
if daemon.configStore == nil {
return nil
}
return daemon.configStore.IsSwarmCompatible()
}
// NewDaemon sets up everything for the daemon to be able to service
// requests from the webserver.
func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.Store) (daemon *Daemon, err error) {
setDefaultMtu(config)
registryService, err := registry.NewService(config.ServiceOptions)
if err != nil {
return nil, err
}
// Ensure that we have a correct root key limit for launching containers.
if err := ModifyRootKeyLimit(); err != nil {
logrus.Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err)
}
// Ensure we have compatible and valid configuration options
if err := verifyDaemonSettings(config); err != nil {
return nil, err
}
// Do we have a disabled network?
config.DisableBridge = isBridgeNetworkDisabled(config)
// Setup the resolv.conf
setupResolvConf(config)
// Verify the platform is supported as a daemon
if !platformSupported {
return nil, errSystemNotSupported
}
// Validate platform-specific requirements
if err := checkSystem(); err != nil {
return nil, err
}
idMapping, err := setupRemappedRoot(config)
if err != nil {
return nil, err
}
rootIDs := idMapping.RootPair()
if err := setupDaemonProcess(config); err != nil {
return nil, err
}
// set up the tmpDir to use a canonical path
tmp, err := prepareTempDir(config.Root, rootIDs)
if err != nil {
return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err)
}
realTmp, err := fileutils.ReadSymlinkedDirectory(tmp)
if err != nil {
return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err)
}
if isWindows {
if _, err := os.Stat(realTmp); err != nil && os.IsNotExist(err) {
if err := system.MkdirAll(realTmp, 0700); err != nil {
return nil, fmt.Errorf("Unable to create the TempDir (%s): %s", realTmp, err)
}
}
os.Setenv("TEMP", realTmp)
os.Setenv("TMP", realTmp)
} else {
os.Setenv("TMPDIR", realTmp)
}
d := &Daemon{
configStore: config,
PluginStore: pluginStore,
startupDone: make(chan struct{}),
}
// Ensure the daemon is properly shutdown if there is a failure during
// initialization
defer func() {
if err != nil {
if err := d.Shutdown(); err != nil {
logrus.Error(err)
}
}
}()
if err := d.setGenericResources(config); err != nil {
return nil, err
}
// set up SIGUSR1 handler on Unix-like systems, or a Win32 global event
// on Windows to dump Go routine stacks
stackDumpDir := config.Root
if execRoot := config.GetExecRoot(); execRoot != "" {
stackDumpDir = execRoot
}
d.setupDumpStackTrap(stackDumpDir)
if err := d.setupSeccompProfile(); err != nil {
return nil, err
}
// Set the default isolation mode (only applicable on Windows)
if err := d.setDefaultIsolation(); err != nil {
return nil, fmt.Errorf("error setting default isolation mode: %v", err)
}
if err := configureMaxThreads(config); err != nil {
logrus.Warnf("Failed to configure golang's threads limit: %v", err)
}
// ensureDefaultAppArmorProfile does nothing if apparmor is disabled
if err := ensureDefaultAppArmorProfile(); err != nil {
logrus.Errorf(err.Error())
}
daemonRepo := filepath.Join(config.Root, "containers")
if err := idtools.MkdirAllAndChown(daemonRepo, 0700, rootIDs); err != nil {
return nil, err
}
// Create the directory where we'll store the runtime scripts (i.e. in
// order to support runtimeArgs)
daemonRuntimes := filepath.Join(config.Root, "runtimes")
if err := system.MkdirAll(daemonRuntimes, 0700); err != nil {
return nil, err
}
if err := d.loadRuntimes(); err != nil {
return nil, err
}
if isWindows {
if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0); err != nil {
return nil, err
}
}
// On Windows we don't support the environment variable, or a user supplied graphdriver
// as Windows has no choice in terms of which graphdrivers to use. It's a case of
// running Windows containers on Windows - windowsfilter, running Linux containers on Windows,
// lcow. Unix platforms however run a single graphdriver for all containers, and it can
// be set through an environment variable, a daemon start parameter, or chosen through
// initialization of the layerstore through driver priority order for example.
d.graphDrivers = make(map[string]string)
layerStores := make(map[string]layer.Store)
if isWindows {
d.graphDrivers[runtime.GOOS] = "windowsfilter"
if system.LCOWSupported() {
d.graphDrivers["linux"] = "lcow"
}
} else {
driverName := os.Getenv("DOCKER_DRIVER")
if driverName == "" {
driverName = config.GraphDriver
} else {
logrus.Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", driverName)
}
d.graphDrivers[runtime.GOOS] = driverName // May still be empty. Layerstore init determines instead.
}
d.RegistryService = registryService
logger.RegisterPluginGetter(d.PluginStore)
metricsSockPath, err := d.listenMetricsSock()
if err != nil {
return nil, err
}
registerMetricsPluginCallback(d.PluginStore, metricsSockPath)
backoffConfig := backoff.DefaultConfig
backoffConfig.MaxDelay = 3 * time.Second
connParams := grpc.ConnectParams{
Backoff: backoffConfig,
}
gopts := []grpc.DialOption{
// WithBlock makes sure that the following containerd request
// is reliable.
//
// NOTE: In one edge case with high load pressure, kernel kills
// dockerd, containerd and containerd-shims caused by OOM.
// When both dockerd and containerd restart, but containerd
// will take time to recover all the existing containers. Before
// containerd serving, dockerd will failed with gRPC error.
// That bad thing is that restore action will still ignore the
// any non-NotFound errors and returns running state for
// already stopped container. It is unexpected behavior. And
// we need to restart dockerd to make sure that anything is OK.
//
// It is painful. Add WithBlock can prevent the edge case. And
// n common case, the containerd will be serving in shortly.
// It is not harm to add WithBlock for containerd connection.
grpc.WithBlock(),
grpc.WithInsecure(),
grpc.WithConnectParams(connParams),
grpc.WithContextDialer(dialer.ContextDialer),
// TODO(stevvooe): We may need to allow configuration of this on the client.
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)),
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)),
}
if config.ContainerdAddr != "" {
d.containerdCli, err = containerd.New(config.ContainerdAddr, containerd.WithDefaultNamespace(config.ContainerdNamespace), containerd.WithDialOpts(gopts), containerd.WithTimeout(60*time.Second))
if err != nil {
return nil, errors.Wrapf(err, "failed to dial %q", config.ContainerdAddr)
}
}
createPluginExec := func(m *plugin.Manager) (plugin.Executor, error) {
var pluginCli *containerd.Client
// Windows is not currently using containerd, keep the
// client as nil
if config.ContainerdAddr != "" {
pluginCli, err = containerd.New(config.ContainerdAddr, containerd.WithDefaultNamespace(config.ContainerdPluginNamespace), containerd.WithDialOpts(gopts), containerd.WithTimeout(60*time.Second))
if err != nil {
return nil, errors.Wrapf(err, "failed to dial %q", config.ContainerdAddr)
}
}
var rt types.Runtime
if runtime := config.GetRuntime(config.GetDefaultRuntimeName()); runtime != nil {
rt = *runtime
}
return pluginexec.New(ctx, getPluginExecRoot(config.Root), pluginCli, config.ContainerdPluginNamespace, m, rt)
}
// Plugin system initialization should happen before restore. Do not change order.
d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{
Root: filepath.Join(config.Root, "plugins"),
ExecRoot: getPluginExecRoot(config.Root),
Store: d.PluginStore,
CreateExecutor: createPluginExec,
RegistryService: registryService,
LiveRestoreEnabled: config.LiveRestoreEnabled,
LogPluginEvent: d.LogPluginEvent, // todo: make private
AuthzMiddleware: config.AuthzMiddleware,
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create plugin manager")
}
if err := d.setupDefaultLogConfig(); err != nil {
return nil, err
}
for operatingSystem, gd := range d.graphDrivers {
layerStores[operatingSystem], err = layer.NewStoreFromOptions(layer.StoreOptions{
Root: config.Root,
MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"),
GraphDriver: gd,
GraphDriverOptions: config.GraphOptions,
IDMapping: idMapping,
PluginGetter: d.PluginStore,
ExperimentalEnabled: config.Experimental,
OS: operatingSystem,
})
if err != nil {
return nil, err
}
// As layerstore initialization may set the driver
d.graphDrivers[operatingSystem] = layerStores[operatingSystem].DriverName()
}
// Configure and validate the kernels security support. Note this is a Linux/FreeBSD
// operation only, so it is safe to pass *just* the runtime OS graphdriver.
if err := configureKernelSecuritySupport(config, d.graphDrivers[runtime.GOOS]); err != nil {
return nil, err
}
imageRoot := filepath.Join(config.Root, "image", d.graphDrivers[runtime.GOOS])
ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb"))
if err != nil {
return nil, err
}
lgrMap := make(map[string]image.LayerGetReleaser)
for los, ls := range layerStores {
lgrMap[los] = ls
}
imageStore, err := image.NewImageStore(ifs, lgrMap)
if err != nil {
return nil, err
}
d.volumes, err = volumesservice.NewVolumeService(config.Root, d.PluginStore, rootIDs, d)
if err != nil {
return nil, err
}
trustKey, err := loadOrCreateTrustKey(config.TrustKeyPath)
if err != nil {
return nil, err
}
trustDir := filepath.Join(config.Root, "trust")
if err := system.MkdirAll(trustDir, 0700); err != nil {
return nil, err
}
// We have a single tag/reference store for the daemon globally. However, it's
// stored under the graphdriver. On host platforms which only support a single
// container OS, but multiple selectable graphdrivers, this means depending on which
// graphdriver is chosen, the global reference store is under there. For
// platforms which support multiple container operating systems, this is slightly
// more problematic as where does the global ref store get located? Fortunately,
// for Windows, which is currently the only daemon supporting multiple container
// operating systems, the list of graphdrivers available isn't user configurable.
// For backwards compatibility, we just put it under the windowsfilter
// directory regardless.
refStoreLocation := filepath.Join(imageRoot, `repositories.json`)
rs, err := refstore.NewReferenceStore(refStoreLocation)
if err != nil {
return nil, fmt.Errorf("Couldn't create reference store repository: %s", err)
}
distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution"))
if err != nil {
return nil, err
}
// Discovery is only enabled when the daemon is launched with an address to advertise. When
// initialized, the daemon is registered and we can store the discovery backend as it's read-only
if err := d.initDiscovery(config); err != nil {
return nil, err
}
sysInfo := d.RawSysInfo(false)
// Check if Devices cgroup is mounted, it is hard requirement for container security,
// on Linux.
if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled && !sys.RunningInUserNS() {
return nil, errors.New("Devices cgroup isn't mounted")
}
d.ID = trustKey.PublicKey().KeyID()
d.repository = daemonRepo
d.containers = container.NewMemoryStore()
if d.containersReplica, err = container.NewViewDB(); err != nil {
return nil, err
}
d.execCommands = exec.NewStore()
d.idIndex = truncindex.NewTruncIndex([]string{})
d.statsCollector = d.newStatsCollector(1 * time.Second)
d.EventsService = events.New()
d.root = config.Root
d.idMapping = idMapping
d.seccompEnabled = sysInfo.Seccomp
d.apparmorEnabled = sysInfo.AppArmor
d.linkIndex = newLinkIndex()
imgSvcConfig := images.ImageServiceConfig{
ContainerStore: d.containers,
DistributionMetadataStore: distributionMetadataStore,
EventsService: d.EventsService,
ImageStore: imageStore,
LayerStores: layerStores,
MaxConcurrentDownloads: *config.MaxConcurrentDownloads,
MaxConcurrentUploads: *config.MaxConcurrentUploads,
MaxDownloadAttempts: *config.MaxDownloadAttempts,
ReferenceStore: rs,
RegistryService: registryService,
TrustKey: trustKey,
ContentNamespace: config.ContainerdNamespace,
}
// containerd is not currently supported with Windows.
// So sometimes d.containerdCli will be nil
// In that case we'll create a local content store... but otherwise we'll use containerd
if d.containerdCli != nil {
imgSvcConfig.Leases = d.containerdCli.LeasesService()
imgSvcConfig.ContentStore = d.containerdCli.ContentStore()
} else {
cs, lm, err := d.configureLocalContentStore()
if err != nil {
return nil, err
}
imgSvcConfig.ContentStore = cs
imgSvcConfig.Leases = lm
}
// TODO: imageStore, distributionMetadataStore, and ReferenceStore are only
// used above to run migration. They could be initialized in ImageService
// if migration is called from daemon/images. layerStore might move as well.
d.imageService = images.NewImageService(imgSvcConfig)
go d.execCommandGC()
d.containerd, err = libcontainerd.NewClient(ctx, d.containerdCli, filepath.Join(config.ExecRoot, "containerd"), config.ContainerdNamespace, d)
if err != nil {
return nil, err
}
if err := d.restore(); err != nil {
return nil, err
}
close(d.startupDone)
info := d.SystemInfo()
engineInfo.WithValues(
dockerversion.Version,
dockerversion.GitCommit,
info.Architecture,
info.Driver,
info.KernelVersion,
info.OperatingSystem,
info.OSType,
info.OSVersion,
info.ID,
).Set(1)
engineCpus.Set(float64(info.NCPU))
engineMemory.Set(float64(info.MemTotal))
gd := ""
for os, driver := range d.graphDrivers {
if len(gd) > 0 {
gd += ", "
}
gd += driver
if len(d.graphDrivers) > 1 {
gd = fmt.Sprintf("%s (%s)", gd, os)
}
}
logrus.WithFields(logrus.Fields{
"version": dockerversion.Version,
"commit": dockerversion.GitCommit,
"graphdriver(s)": gd,
}).Info("Docker daemon")
return d, nil
}
// DistributionServices returns services controlling daemon storage
func (daemon *Daemon) DistributionServices() images.DistributionServices {
return daemon.imageService.DistributionServices()
}
func (daemon *Daemon) waitForStartupDone() {
<-daemon.startupDone
}
func (daemon *Daemon) shutdownContainer(c *container.Container) error {
stopTimeout := c.StopTimeout()
// If container failed to exit in stopTimeout seconds of SIGTERM, then using the force
if err := daemon.containerStop(c, stopTimeout); err != nil {
return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err)
}
// Wait without timeout for the container to exit.
// Ignore the result.
<-c.Wait(context.Background(), container.WaitConditionNotRunning)
return nil
}
// ShutdownTimeout returns the timeout (in seconds) before containers are forcibly
// killed during shutdown. The default timeout can be configured both on the daemon
// and per container, and the longest timeout will be used. A grace-period of
// 5 seconds is added to the configured timeout.
//
// A negative (-1) timeout means "indefinitely", which means that containers
// are not forcibly killed, and the daemon shuts down after all containers exit.
func (daemon *Daemon) ShutdownTimeout() int {
shutdownTimeout := daemon.configStore.ShutdownTimeout
if shutdownTimeout < 0 {
return -1
}
if daemon.containers == nil {
return shutdownTimeout
}
graceTimeout := 5
for _, c := range daemon.containers.List() {
stopTimeout := c.StopTimeout()
if stopTimeout < 0 {
return -1
}
if stopTimeout+graceTimeout > shutdownTimeout {
shutdownTimeout = stopTimeout + graceTimeout
}
}
return shutdownTimeout
}
// Shutdown stops the daemon.
func (daemon *Daemon) Shutdown() error {
daemon.shutdown = true
// Keep mounts and networking running on daemon shutdown if
// we are to keep containers running and restore them.
if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil {
// check if there are any running containers, if none we should do some cleanup
if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil {
// metrics plugins still need some cleanup
daemon.cleanupMetricsPlugins()
return nil
}
}
if daemon.containers != nil {
logrus.Debugf("daemon configured with a %d seconds minimum shutdown timeout", daemon.configStore.ShutdownTimeout)
logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.ShutdownTimeout())
daemon.containers.ApplyAll(func(c *container.Container) {
if !c.IsRunning() {
return
}
log := logrus.WithField("container", c.ID)
log.Debug("shutting down container")
if err := daemon.shutdownContainer(c); err != nil {
log.WithError(err).Error("failed to shut down container")
return
}
if mountid, err := daemon.imageService.GetLayerMountID(c.ID, c.OS); err == nil {
daemon.cleanupMountsByID(mountid)
}
log.Debugf("shut down container")
})
}
if daemon.volumes != nil {
if err := daemon.volumes.Shutdown(); err != nil {
logrus.Errorf("Error shutting down volume store: %v", err)
}
}
if daemon.imageService != nil {
daemon.imageService.Cleanup()
}
// If we are part of a cluster, clean up cluster's stuff
if daemon.clusterProvider != nil {
logrus.Debugf("start clean shutdown of cluster resources...")
daemon.DaemonLeavesCluster()
}
daemon.cleanupMetricsPlugins()
// Shutdown plugins after containers and layerstore. Don't change the order.
daemon.pluginShutdown()
// trigger libnetwork Stop only if it's initialized
if daemon.netController != nil {
daemon.netController.Stop()
}
if daemon.containerdCli != nil {
daemon.containerdCli.Close()
}
if daemon.mdDB != nil {
daemon.mdDB.Close()
}
return daemon.cleanupMounts()
}
// Mount sets container.BaseFS
// (is it not set coming in? why is it unset?)
func (daemon *Daemon) Mount(container *container.Container) error {
if container.RWLayer == nil {
return errors.New("RWLayer of container " + container.ID + " is unexpectedly nil")
}
dir, err := container.RWLayer.Mount(container.GetMountLabel())
if err != nil {
return err
}
logrus.WithField("container", container.ID).Debugf("container mounted via layerStore: %v", dir)
if container.BaseFS != nil && container.BaseFS.Path() != dir.Path() {
// The mount path reported by the graph driver should always be trusted on Windows, since the
// volume path for a given mounted layer may change over time. This should only be an error
// on non-Windows operating systems.
if runtime.GOOS != "windows" {
daemon.Unmount(container)
return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
daemon.imageService.GraphDriverForOS(container.OS), container.ID, container.BaseFS, dir)
}
}
container.BaseFS = dir // TODO: combine these fields
return nil
}
// Unmount unsets the container base filesystem
func (daemon *Daemon) Unmount(container *container.Container) error {
if container.RWLayer == nil {
return errors.New("RWLayer of container " + container.ID + " is unexpectedly nil")
}
if err := container.RWLayer.Unmount(); err != nil {
logrus.WithField("container", container.ID).WithError(err).Error("error unmounting container")
return err
}
return nil
}
// Subnets return the IPv4 and IPv6 subnets of networks that are manager by Docker.
func (daemon *Daemon) Subnets() ([]net.IPNet, []net.IPNet) {
var v4Subnets []net.IPNet
var v6Subnets []net.IPNet
managedNetworks := daemon.netController.Networks()
for _, managedNetwork := range managedNetworks {
v4infos, v6infos := managedNetwork.Info().IpamInfo()
for _, info := range v4infos {
if info.IPAMData.Pool != nil {
v4Subnets = append(v4Subnets, *info.IPAMData.Pool)
}
}
for _, info := range v6infos {
if info.IPAMData.Pool != nil {
v6Subnets = append(v6Subnets, *info.IPAMData.Pool)
}
}
}
return v4Subnets, v6Subnets
}
// prepareTempDir prepares and returns the default directory to use
// for temporary files.
// If it doesn't exist, it is created. If it exists, its content is removed.
func prepareTempDir(rootDir string, rootIdentity idtools.Identity) (string, error) {
var tmpDir string
if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" {
tmpDir = filepath.Join(rootDir, "tmp")
newName := tmpDir + "-old"
if err := os.Rename(tmpDir, newName); err == nil {
go func() {
if err := os.RemoveAll(newName); err != nil {
logrus.Warnf("failed to delete old tmp directory: %s", newName)
}
}()
} else if !os.IsNotExist(err) {
logrus.Warnf("failed to rename %s for background deletion: %s. Deleting synchronously", tmpDir, err)
if err := os.RemoveAll(tmpDir); err != nil {
logrus.Warnf("failed to delete old tmp directory: %s", tmpDir)
}
}
}
// We don't remove the content of tmpdir if it's not the default,
// it may hold things that do not belong to us.
return tmpDir, idtools.MkdirAllAndChown(tmpDir, 0700, rootIdentity)
}
func (daemon *Daemon) setGenericResources(conf *config.Config) error {
genericResources, err := config.ParseGenericResources(conf.NodeGenericResources)
if err != nil {
return err
}
daemon.genericResources = genericResources
return nil
}
func setDefaultMtu(conf *config.Config) {
// do nothing if the config does not have the default 0 value.
if conf.Mtu != 0 {
return
}
conf.Mtu = config.DefaultNetworkMtu
}
// IsShuttingDown tells whether the daemon is shutting down or not
func (daemon *Daemon) IsShuttingDown() bool {
return daemon.shutdown
}
// initDiscovery initializes the discovery watcher for this daemon.
func (daemon *Daemon) initDiscovery(conf *config.Config) error {
advertise, err := config.ParseClusterAdvertiseSettings(conf.ClusterStore, conf.ClusterAdvertise)
if err != nil {
if err == discovery.ErrDiscoveryDisabled {
return nil
}
return err
}
conf.ClusterAdvertise = advertise
discoveryWatcher, err := discovery.Init(conf.ClusterStore, conf.ClusterAdvertise, conf.ClusterOpts)
if err != nil {
return fmt.Errorf("discovery initialization failed (%v)", err)
}
daemon.discoveryWatcher = discoveryWatcher
return nil
}
func isBridgeNetworkDisabled(conf *config.Config) bool {
return conf.BridgeConfig.Iface == config.DisableNetworkBridge
}
func (daemon *Daemon) networkOptions(dconfig *config.Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) {
options := []nwconfig.Option{}
if dconfig == nil {
return options, nil
}
options = append(options, nwconfig.OptionExperimental(dconfig.Experimental))
options = append(options, nwconfig.OptionDataDir(dconfig.Root))
options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot()))
dd := runconfig.DefaultDaemonNetworkMode()
dn := runconfig.DefaultDaemonNetworkMode().NetworkName()
options = append(options, nwconfig.OptionDefaultDriver(string(dd)))
options = append(options, nwconfig.OptionDefaultNetwork(dn))
if strings.TrimSpace(dconfig.ClusterStore) != "" {
kv := strings.Split(dconfig.ClusterStore, "://")
if len(kv) != 2 {
return nil, errors.New("kv store daemon config must be of the form KV-PROVIDER://KV-URL")
}
options = append(options, nwconfig.OptionKVProvider(kv[0]))
options = append(options, nwconfig.OptionKVProviderURL(kv[1]))
}
if len(dconfig.ClusterOpts) > 0 {
options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts))
}
if daemon.discoveryWatcher != nil {
options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher))
}
if dconfig.ClusterAdvertise != "" {
options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise))
}
options = append(options, nwconfig.OptionLabels(dconfig.Labels))
options = append(options, driverOptions(dconfig)...)
if len(dconfig.NetworkConfig.DefaultAddressPools.Value()) > 0 {
options = append(options, nwconfig.OptionDefaultAddressPoolConfig(dconfig.NetworkConfig.DefaultAddressPools.Value()))
}
if daemon.configStore != nil && daemon.configStore.LiveRestoreEnabled && len(activeSandboxes) != 0 {
options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes))
}
if pg != nil {
options = append(options, nwconfig.OptionPluginGetter(pg))
}
options = append(options, nwconfig.OptionNetworkControlPlaneMTU(dconfig.NetworkControlPlaneMTU))
return options, nil
}
// GetCluster returns the cluster
func (daemon *Daemon) GetCluster() Cluster {
return daemon.cluster
}
// SetCluster sets the cluster
func (daemon *Daemon) SetCluster(cluster Cluster) {
daemon.cluster = cluster
}
func (daemon *Daemon) pluginShutdown() {
manager := daemon.pluginManager
// Check for a valid manager object. In error conditions, daemon init can fail
// and shutdown called, before plugin manager is initialized.
if manager != nil {
manager.Shutdown()
}
}
// PluginManager returns current pluginManager associated with the daemon
func (daemon *Daemon) PluginManager() *plugin.Manager { // set up before daemon to avoid this method
return daemon.pluginManager
}
// PluginGetter returns current pluginStore associated with the daemon
func (daemon *Daemon) PluginGetter() *plugin.Store {
return daemon.PluginStore
}
// CreateDaemonRoot creates the root for the daemon
func CreateDaemonRoot(config *config.Config) error {
// get the canonical path to the Docker root directory
var realRoot string
if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) {
realRoot = config.Root
} else {
realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root)
if err != nil {
return fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err)
}
}
idMapping, err := setupRemappedRoot(config)
if err != nil {
return err
}
return setupDaemonRoot(config, realRoot, idMapping.RootPair())
}
// checkpointAndSave grabs a container lock to safely call container.CheckpointTo
func (daemon *Daemon) checkpointAndSave(container *container.Container) error {
container.Lock()
defer container.Unlock()
if err := container.CheckpointTo(daemon.containersReplica); err != nil {
return fmt.Errorf("Error saving container state: %v", err)
}
return nil
}
// because the CLI sends a -1 when it wants to unset the swappiness value
// we need to clear it on the server side
func fixMemorySwappiness(resources *containertypes.Resources) {
if resources.MemorySwappiness != nil && *resources.MemorySwappiness == -1 {
resources.MemorySwappiness = nil
}
}
// GetAttachmentStore returns current attachment store associated with the daemon
func (daemon *Daemon) GetAttachmentStore() *network.AttachmentStore {
return &daemon.attachmentStore
}
// IdentityMapping returns uid/gid mapping or a SID (in the case of Windows) for the builder
func (daemon *Daemon) IdentityMapping() *idtools.IdentityMapping {
return daemon.idMapping
}
// ImageService returns the Daemon's ImageService
func (daemon *Daemon) ImageService() *images.ImageService {
return daemon.imageService
}
// BuilderBackend returns the backend used by builder
func (daemon *Daemon) BuilderBackend() builder.Backend {
return struct {
*Daemon
*images.ImageService
}{daemon, daemon.imageService}
}
|
[
"\"DOCKER_DRIVER\"",
"\"DOCKER_TMPDIR\""
] |
[] |
[
"DOCKER_DRIVER",
"DOCKER_TMPDIR"
] |
[]
|
["DOCKER_DRIVER", "DOCKER_TMPDIR"]
|
go
| 2 | 0 | |
s3-config-validator/src/test/binary/binary_suite_test.go
|
package binary_test
import (
"fmt"
"github.com/onsi/gomega/gexec"
"io/ioutil"
"os"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestBinary(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Binary Suite")
}
var (
binary string
awsAccessKey string
awsSecretKey string
validUnversionedConfigFile *os.File
validVersionedConfigFile *os.File
)
var _ = BeforeSuite(func() {
var err error
binary, err = gexec.Build("github.com/cloudfoundry-incubator/bosh-backup-and-restore/s3-config-validator/src/cmd")
Expect(err).NotTo(HaveOccurred())
checkRequiredEnvs([]string{
"AWS_ACCESS_KEY",
"AWS_SECRET_KEY",
})
awsAccessKey = os.Getenv("AWS_ACCESS_KEY")
awsSecretKey = os.Getenv("AWS_SECRET_KEY")
validVersionedConfigFile = createVersionedConfigFile("bbr-s3-validator-versioned-bucket", awsAccessKey, awsSecretKey, "eu-west-1")
validUnversionedConfigFile = createUnversionedConfigFile("bbr-s3-validator-e2e-all-permissions", awsAccessKey, awsSecretKey, "eu-west-1", "eu-west-1")
})
var _ = AfterSuite(func() {
err := os.Remove(validUnversionedConfigFile.Name())
if err != nil {
Fail(err.Error())
}
err = os.Remove(validVersionedConfigFile.Name())
if err != nil {
Fail(err.Error())
}
})
func checkRequiredEnvs(envs []string) {
for _, env := range envs {
_, present := os.LookupEnv(env)
if !present {
fmt.Fprintf(os.Stderr, "Environment Variable %s must be set", env)
os.Exit(1)
}
}
}
func createUnversionedConfigFile(bucketName, awsAccessKey, awsSecretKey, liveRegion, backupRegion string) *os.File {
configFile, err := ioutil.TempFile("/tmp", "bbr_s3_validator_e2e")
Expect(err).NotTo(HaveOccurred())
fileContents := fmt.Sprintf(`
{
"test-resource": {
"aws_access_key_id": "%[2]s",
"aws_secret_access_key": "%[3]s",
"endpoint": "",
"name": "%[1]s",
"region": "%[4]s",
"backup": {
"name": "%[1]s",
"region": "%[5]s"
}
}
}
`, bucketName, awsAccessKey, awsSecretKey, liveRegion, backupRegion)
_, err = configFile.WriteString(fileContents)
Expect(err).NotTo(HaveOccurred())
return configFile
}
func createVersionedConfigFile(bucketName, awsAccessKey, awsSecretKey, liveRegion string) *os.File {
configFile, err := ioutil.TempFile("/tmp", "bbr_s3_validator_e2e")
Expect(err).NotTo(HaveOccurred())
fileContents := fmt.Sprintf(`
{
"test-resource": {
"aws_access_key_id": "%[2]s",
"aws_secret_access_key": "%[3]s",
"endpoint": "",
"name": "%[1]s",
"region": "%[4]s"
}
}
`, bucketName, awsAccessKey, awsSecretKey, liveRegion)
_, err = configFile.WriteString(fileContents)
Expect(err).NotTo(HaveOccurred())
return configFile
}
|
[
"\"AWS_ACCESS_KEY\"",
"\"AWS_SECRET_KEY\""
] |
[] |
[
"AWS_SECRET_KEY",
"AWS_ACCESS_KEY"
] |
[]
|
["AWS_SECRET_KEY", "AWS_ACCESS_KEY"]
|
go
| 2 | 0 | |
tools/spn-archivist/internal/archive_test.go
|
// Copyright 2016 Spn Development Foundation and contributors. Licensed
// under the Apache License, Version 2.0. See the COPYING file at the root
// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
package archivist
import (
"bytes"
"crypto/rand"
"crypto/sha256"
"fmt"
"io/ioutil"
"math/big"
"os"
"testing"
"github.com/spn/go/xdr"
"github.com/stretchr/testify/assert"
)
func GetTestS3Archive() *Archive {
mx := big.NewInt(0xffffffff)
r, e := rand.Int(rand.Reader, mx)
if e != nil {
panic(e)
}
return MustConnect(fmt.Sprintf("s3://history-stg.spn.org/dev/archivist/test-%s", r),
ConnectOptions{S3Region: "eu-west-1"})
}
func GetTestMockArchive() *Archive {
return MustConnect("mock://test", ConnectOptions{})
}
var tmpdirs []string
func GetTestFileArchive() *Archive {
d, e := ioutil.TempDir("/tmp", "archivist")
if e != nil {
panic(e)
}
if tmpdirs == nil {
tmpdirs = []string{d}
} else {
tmpdirs = append(tmpdirs, d)
}
return MustConnect("file://"+d, ConnectOptions{})
}
func cleanup() {
for _, d := range tmpdirs {
os.RemoveAll(d)
}
}
func GetTestArchive() *Archive {
ty := os.Getenv("ARCHIVIST_TEST_TYPE")
if ty == "file" {
return GetTestFileArchive()
} else if ty == "s3" {
return GetTestS3Archive()
} else {
return GetTestMockArchive()
}
}
func (arch *Archive) AddRandomBucket() (Hash, error) {
var h Hash
buf := make([]byte, 1024)
_, e := rand.Read(buf)
if e != nil {
return h, e
}
h = sha256.Sum256(buf)
pth := BucketPath(h)
e = arch.backend.PutFile(pth, ioutil.NopCloser(bytes.NewReader(buf)))
return h, e
}
func (arch *Archive) AddRandomCheckpointFile(cat string, chk uint32) error {
buf := make([]byte, 1024)
_, e := rand.Read(buf)
if e != nil {
return e
}
pth := CategoryCheckpointPath(cat, chk)
return arch.backend.PutFile(pth, ioutil.NopCloser(bytes.NewReader(buf)))
}
func (arch *Archive) AddRandomCheckpoint(chk uint32) error {
opts := &CommandOptions{Force: true}
for _, cat := range Categories() {
if cat == "history" {
var has HistoryArchiveState
has.CurrentLedger = chk
for i := 0; i < NumLevels; i++ {
curr, e := arch.AddRandomBucket()
if e != nil {
return e
}
snap, e := arch.AddRandomBucket()
if e != nil {
return e
}
next, e := arch.AddRandomBucket()
if e != nil {
return e
}
has.CurrentBuckets[i].Curr = curr.String()
has.CurrentBuckets[i].Snap = snap.String()
has.CurrentBuckets[i].Next.Output = next.String()
}
arch.PutCheckpointHAS(chk, has, opts)
arch.PutRootHAS(has, opts)
} else {
arch.AddRandomCheckpointFile(cat, chk)
}
}
return nil
}
func (arch *Archive) PopulateRandomRange(rng Range) error {
for chk := range rng.Checkpoints() {
if e := arch.AddRandomCheckpoint(chk); e != nil {
return e
}
}
return nil
}
func testRange() Range {
return Range{Low: 63, High: 0x3bf}
}
func testOptions() *CommandOptions {
return &CommandOptions{Range: testRange(), Concurrency: 16}
}
func GetRandomPopulatedArchive() *Archive {
a := GetTestArchive()
a.PopulateRandomRange(testRange())
return a
}
func TestScan(t *testing.T) {
defer cleanup()
opts := testOptions()
GetRandomPopulatedArchive().Scan(opts)
}
func countMissing(arch *Archive, opts *CommandOptions) int {
n := 0
arch.Scan(opts)
for _, missing := range arch.CheckCheckpointFilesMissing(opts) {
n += len(missing)
}
n += len(arch.CheckBucketsMissing())
return n
}
func TestMirror(t *testing.T) {
defer cleanup()
opts := testOptions()
src := GetRandomPopulatedArchive()
dst := GetTestArchive()
Mirror(src, dst, opts)
assert.Equal(t, 0, countMissing(dst, opts))
}
func copyFile(category string, checkpoint uint32, src *Archive, dst *Archive) {
pth := CategoryCheckpointPath(category, checkpoint)
rdr, err := src.backend.GetFile(pth)
if err != nil {
panic(err)
}
if err = dst.backend.PutFile(pth, rdr); err != nil {
panic(err)
}
}
func TestMirrorThenRepair(t *testing.T) {
defer cleanup()
opts := testOptions()
src := GetRandomPopulatedArchive()
dst := GetTestArchive()
Mirror(src, dst, opts)
assert.Equal(t, 0, countMissing(dst, opts))
bad := opts.Range.Low + uint32(opts.Range.Size()/2)
src.AddRandomCheckpoint(bad)
copyFile("history", bad, src, dst)
assert.NotEqual(t, 0, countMissing(dst, opts))
Repair(src, dst, opts)
assert.Equal(t, 0, countMissing(dst, opts))
}
func TestDryRunNoRepair(t *testing.T) {
defer cleanup()
opts := testOptions()
src := GetRandomPopulatedArchive()
dst := GetTestArchive()
Mirror(src, dst, opts)
assert.Equal(t, 0, countMissing(dst, opts))
bad := opts.Range.Low + uint32(opts.Range.Size()/2)
src.AddRandomCheckpoint(bad)
copyFile("history", bad, src, dst)
assert.NotEqual(t, 0, countMissing(dst, opts))
opts.DryRun = true
Repair(src, dst, opts)
assert.NotEqual(t, 0, countMissing(dst, opts))
}
func TestXdrDecode(t *testing.T) {
xdrbytes := []byte{
0, 0, 0, 0, // entry type 0, liveentry
0, 32, 223, 100, // lastmodified 2154340
0, 0, 0, 0, // entry type 0, account
0, 0, 0, 0, // key type 0
23, 140, 68, 253, // ed25519 key (32 bytes)
184, 162, 186, 195,
118, 239, 158, 210,
100, 241, 174, 254,
108, 110, 165, 140,
75, 76, 83, 141,
104, 212, 227, 80,
1, 214, 157, 7,
0, 0, 0, 29, // 64bit balance: 125339976000
46, 216, 65, 64,
0, 0, 129, 170, // 64bit seqnum: 142567144423475
0, 0, 0, 51,
0, 0, 0, 1, // numsubentries: 1
0, 0, 0, 1, // inflationdest type, populated
0, 0, 0, 0, // key type 0
87, 240, 19, 71, // ed25519 key (32 bytes)
52, 91, 9, 62,
213, 239, 178, 85,
161, 119, 108, 251,
168, 90, 76, 116,
12, 48, 134, 248,
115, 255, 117, 50,
19, 18, 170, 203,
0, 0, 0, 0, // flags
0, 0, 0, 19, // homedomain: 19 bytes + 1 null padding
99, 101, 110, 116, // "centaurus.xcoins.de"
97, 117, 114, 117,
115, 46, 120, 99,
111, 105, 110, 115,
46, 100, 101, 0,
1, 0, 0, 0, // thresholds
0, 0, 0, 0, // signers (null)
0, 0, 0, 0, // entry.account.ext.v: 0
0, 0, 0, 0, // entry.ext.v: 0
}
assert.Equal(t, len(xdrbytes), 152)
var tmp xdr.BucketEntry
n, err := xdr.Unmarshal(bytes.NewReader(xdrbytes[:]), &tmp)
fmt.Printf("Decoded %d bytes\n", n)
if err != nil {
panic(err)
}
assert.Equal(t, len(xdrbytes), n)
var out bytes.Buffer
n, err = xdr.Marshal(&out, &tmp)
fmt.Printf("Encoded %d bytes\n", n)
if err != nil {
panic(err)
}
assert.Equal(t, out.Len(), n)
assert.Equal(t, out.Bytes(), xdrbytes)
}
|
[
"\"ARCHIVIST_TEST_TYPE\""
] |
[] |
[
"ARCHIVIST_TEST_TYPE"
] |
[]
|
["ARCHIVIST_TEST_TYPE"]
|
go
| 1 | 0 | |
fos/lib/pyglet/lib.py
|
# ----------------------------------------------------------------------------
# fos.lib.pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of fos.lib.pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Functions for loading dynamic libraries.
These extend and correct ctypes functions.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import os
import re
import sys
import ctypes
import ctypes.util
import fos.lib.pyglet
_debug_lib = fos.lib.pyglet.options['debug_lib']
_debug_trace = fos.lib.pyglet.options['debug_trace']
class _TraceFunction(object):
def __init__(self, func):
self.__dict__['_func'] = func
def __str__(self):
return self._func.__name__
def __call__(self, *args, **kwargs):
return self._func(*args, **kwargs)
def __getattr__(self, name):
return getattr(self._func, name)
def __setattr__(self, name, value):
setattr(self._func, name, value)
class _TraceLibrary(object):
def __init__(self, library):
self._library = library
print library
def __getattr__(self, name):
func = getattr(self._library, name)
f = _TraceFunction(func)
return f
class LibraryLoader(object):
def load_library(self, *names, **kwargs):
'''Find and load a library.
More than one name can be specified, they will be tried in order.
Platform-specific library names (given as kwargs) are tried first.
Raises ImportError if library is not found.
'''
if 'framework' in kwargs and self.platform == 'darwin':
return self.load_framework(kwargs['framework'])
platform_names = kwargs.get(self.platform, [])
if type(platform_names) in (str, unicode):
platform_names = [platform_names]
elif type(platform_names) is tuple:
platform_names = list(platform_names)
if self.platform == 'linux2':
platform_names.extend(['lib%s.so' % n for n in names])
platform_names.extend(names)
for name in platform_names:
try:
lib = ctypes.cdll.LoadLibrary(name)
if _debug_lib:
print name
if _debug_trace:
lib = _TraceLibrary(lib)
return lib
except OSError:
path = self.find_library(name)
if path:
try:
lib = ctypes.cdll.LoadLibrary(path)
if _debug_lib:
print path
if _debug_trace:
lib = _TraceLibrary(lib)
return lib
except OSError:
pass
raise ImportError('Library "%s" not found.' % names[0])
find_library = lambda self, name: ctypes.util.find_library(name)
platform = sys.platform
if platform == 'cygwin':
platform = 'win32'
def load_framework(self, path):
raise RuntimeError("Can't load framework on this platform.")
class MachOLibraryLoader(LibraryLoader):
def __init__(self):
if 'LD_LIBRARY_PATH' in os.environ:
self.ld_library_path = os.environ['LD_LIBRARY_PATH'].split(':')
else:
self.ld_library_path = []
if 'DYLD_LIBRARY_PATH' in os.environ:
self.dyld_library_path = os.environ['DYLD_LIBRARY_PATH'].split(':')
else:
self.dyld_library_path = []
if 'DYLD_FALLBACK_LIBRARY_PATH' in os.environ:
self.dyld_fallback_library_path = \
os.environ['DYLD_FALLBACK_LIBRARY_PATH'].split(':')
else:
self.dyld_fallback_library_path = [
os.path.expanduser('~/lib'),
'/usr/local/lib',
'/usr/lib']
def find_library(self, path):
'''Implements the dylib search as specified in Apple documentation:
http://developer.apple.com/documentation/DeveloperTools/Conceptual/DynamicLibraries/100-Articles/DynamicLibraryUsageGuidelines.html
Before commencing the standard search, the method first checks
the bundle's ``Frameworks`` directory if the application is running
within a bundle (OS X .app).
'''
libname = os.path.basename(path)
search_path = []
if hasattr(sys, 'frozen') and sys.frozen == 'macosx_app':
search_path.append(os.path.join(
os.environ['RESOURCEPATH'],
'..',
'Frameworks',
libname))
if '/' in path:
search_path.extend(
[os.path.join(p, libname) \
for p in self.dyld_library_path])
search_path.append(path)
search_path.extend(
[os.path.join(p, libname) \
for p in self.dyld_fallback_library_path])
else:
search_path.extend(
[os.path.join(p, libname) \
for p in self.ld_library_path])
search_path.extend(
[os.path.join(p, libname) \
for p in self.dyld_library_path])
search_path.append(path)
search_path.extend(
[os.path.join(p, libname) \
for p in self.dyld_fallback_library_path])
for path in search_path:
if os.path.exists(path):
return path
return None
def find_framework(self, path):
'''Implement runtime framework search as described by:
http://developer.apple.com/documentation/MacOSX/Conceptual/BPFrameworks/Concepts/FrameworkBinding.html
'''
# e.g. path == '/System/Library/Frameworks/OpenGL.framework'
# name == 'OpenGL'
# return '/System/Library/Frameworks/OpenGL.framework/OpenGL'
name = os.path.splitext(os.path.split(path)[1])[0]
realpath = os.path.join(path, name)
if os.path.exists(realpath):
return realpath
for dir in ('/Library/Frameworks',
'/System/Library/Frameworks'):
realpath = os.path.join(dir, '%s.framework' % name, name)
if os.path.exists(realpath):
return realpath
return None
def load_framework(self, path):
realpath = self.find_framework(path)
if realpath:
lib = ctypes.cdll.LoadLibrary(realpath)
if _debug_lib:
print realpath
if _debug_trace:
lib = _TraceLibrary(lib)
return lib
raise ImportError("Can't find framework %s." % path)
class LinuxLibraryLoader(LibraryLoader):
_ld_so_cache = None
def _create_ld_so_cache(self):
# Recreate search path followed by ld.so. This is going to be
# slow to build, and incorrect (ld.so uses ld.so.cache, which may
# not be up-to-date). Used only as fallback for distros without
# /sbin/ldconfig.
#
# We assume the DT_RPATH and DT_RUNPATH binary sections are omitted.
directories = []
try:
directories.extend(os.environ['LD_LIBRARY_PATH'].split(':'))
except KeyError:
pass
try:
directories.extend([dir.strip() for dir in open('/etc/ld.so.conf')])
except IOError:
pass
directories.extend(['/lib', '/usr/lib'])
cache = {}
lib_re = re.compile('lib(.*)\.so')
for dir in directories:
try:
for file in os.listdir(dir):
if '.so' not in file:
continue
# Index by filename
path = os.path.join(dir, file)
if file not in cache:
cache[file] = path
# Index by library name
match = lib_re.match(file)
if match:
library = match.group(1)
if library not in cache:
cache[library] = path
except OSError:
pass
self._ld_so_cache = cache
def find_library(self, path):
# ctypes tries ldconfig, gcc and objdump. If none of these are
# present, we implement the ld-linux.so search path as described in
# the man page.
result = ctypes.util.find_library(path)
if result:
return result
if self._ld_so_cache is None:
self._create_ld_so_cache()
return self._ld_so_cache.get(path)
if sys.platform == 'darwin':
loader = MachOLibraryLoader()
elif sys.platform == 'linux2':
loader = LinuxLibraryLoader()
else:
loader = LibraryLoader()
load_library = loader.load_library
|
[] |
[] |
[
"DYLD_FALLBACK_LIBRARY_PATH",
"RESOURCEPATH",
"LD_LIBRARY_PATH",
"DYLD_LIBRARY_PATH"
] |
[]
|
["DYLD_FALLBACK_LIBRARY_PATH", "RESOURCEPATH", "LD_LIBRARY_PATH", "DYLD_LIBRARY_PATH"]
|
python
| 4 | 0 | |
src/cmd/cgo/out.go
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"debug/elf"
"debug/macho"
"debug/pe"
"fmt"
"go/ast"
"go/printer"
"go/token"
"internal/xcoff"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"sort"
"strings"
)
var (
conf = printer.Config{Mode: printer.SourcePos, Tabwidth: 8}
noSourceConf = printer.Config{Tabwidth: 8}
)
// writeDefs creates output files to be compiled by gc and gcc.
func (p *Package) writeDefs() {
var fgo2, fc io.Writer
f := creat(*objDir + "_cgo_gotypes.go")
defer f.Close()
fgo2 = f
if *gccgo {
f := creat(*objDir + "_cgo_defun.c")
defer f.Close()
fc = f
}
fm := creat(*objDir + "_cgo_main.c")
var gccgoInit bytes.Buffer
fflg := creat(*objDir + "_cgo_flags")
for k, v := range p.CgoFlags {
fmt.Fprintf(fflg, "_CGO_%s=%s\n", k, strings.Join(v, " "))
if k == "LDFLAGS" && !*gccgo {
for _, arg := range v {
fmt.Fprintf(fgo2, "//go:cgo_ldflag %q\n", arg)
}
}
}
fflg.Close()
// Write C main file for using gcc to resolve imports.
fmt.Fprintf(fm, "int main() { return 0; }\n")
if *importRuntimeCgo {
fmt.Fprintf(fm, "void crosscall2(void(*fn)(void*, int, __SIZE_TYPE__), void *a, int c, __SIZE_TYPE__ ctxt) { }\n")
fmt.Fprintf(fm, "__SIZE_TYPE__ _cgo_wait_runtime_init_done() { return 0; }\n")
fmt.Fprintf(fm, "void _cgo_release_context(__SIZE_TYPE__ ctxt) { }\n")
fmt.Fprintf(fm, "char* _cgo_topofstack(void) { return (char*)0; }\n")
} else {
// If we're not importing runtime/cgo, we *are* runtime/cgo,
// which provides these functions. We just need a prototype.
fmt.Fprintf(fm, "void crosscall2(void(*fn)(void*, int, __SIZE_TYPE__), void *a, int c, __SIZE_TYPE__ ctxt);\n")
fmt.Fprintf(fm, "__SIZE_TYPE__ _cgo_wait_runtime_init_done();\n")
fmt.Fprintf(fm, "void _cgo_release_context(__SIZE_TYPE__);\n")
}
fmt.Fprintf(fm, "void _cgo_allocate(void *a, int c) { }\n")
fmt.Fprintf(fm, "void _cgo_panic(void *a, int c) { }\n")
fmt.Fprintf(fm, "void _cgo_reginit(void) { }\n")
// Write second Go output: definitions of _C_xxx.
// In a separate file so that the import of "unsafe" does not
// pollute the original file.
fmt.Fprintf(fgo2, "// Code generated by cmd/cgo; DO NOT EDIT.\n\n")
fmt.Fprintf(fgo2, "package %s\n\n", p.PackageName)
fmt.Fprintf(fgo2, "import \"unsafe\"\n\n")
if !*gccgo && *importRuntimeCgo {
fmt.Fprintf(fgo2, "import _ \"runtime/cgo\"\n\n")
}
if *importSyscall {
fmt.Fprintf(fgo2, "import \"syscall\"\n\n")
fmt.Fprintf(fgo2, "var _ syscall.Errno\n")
}
fmt.Fprintf(fgo2, "func _Cgo_ptr(ptr unsafe.Pointer) unsafe.Pointer { return ptr }\n\n")
if !*gccgo {
fmt.Fprintf(fgo2, "//go:linkname _Cgo_always_false runtime.cgoAlwaysFalse\n")
fmt.Fprintf(fgo2, "var _Cgo_always_false bool\n")
fmt.Fprintf(fgo2, "//go:linkname _Cgo_use runtime.cgoUse\n")
fmt.Fprintf(fgo2, "func _Cgo_use(interface{})\n")
}
typedefNames := make([]string, 0, len(typedef))
for name := range typedef {
typedefNames = append(typedefNames, name)
}
sort.Strings(typedefNames)
for _, name := range typedefNames {
def := typedef[name]
fmt.Fprintf(fgo2, "type %s ", name)
// We don't have source info for these types, so write them out without source info.
// Otherwise types would look like:
//
// type _Ctype_struct_cb struct {
// //line :1
// on_test *[0]byte
// //line :1
// }
//
// Which is not useful. Moreover we never override source info,
// so subsequent source code uses the same source info.
// Moreover, empty file name makes compile emit no source debug info at all.
var buf bytes.Buffer
noSourceConf.Fprint(&buf, fset, def.Go)
if bytes.HasPrefix(buf.Bytes(), []byte("_Ctype_")) {
// This typedef is of the form `typedef a b` and should be an alias.
fmt.Fprintf(fgo2, "= ")
}
fmt.Fprintf(fgo2, "%s", buf.Bytes())
fmt.Fprintf(fgo2, "\n\n")
}
if *gccgo {
fmt.Fprintf(fgo2, "type _Ctype_void byte\n")
} else {
fmt.Fprintf(fgo2, "type _Ctype_void [0]byte\n")
}
if *gccgo {
fmt.Fprint(fgo2, gccgoGoProlog)
fmt.Fprint(fc, p.cPrologGccgo())
} else {
fmt.Fprint(fgo2, goProlog)
}
if fc != nil {
fmt.Fprintf(fc, "#line 1 \"cgo-generated-wrappers\"\n")
}
if fm != nil {
fmt.Fprintf(fm, "#line 1 \"cgo-generated-wrappers\"\n")
}
gccgoSymbolPrefix := p.gccgoSymbolPrefix()
cVars := make(map[string]bool)
for _, key := range nameKeys(p.Name) {
n := p.Name[key]
if !n.IsVar() {
continue
}
if !cVars[n.C] {
if *gccgo {
fmt.Fprintf(fc, "extern byte *%s;\n", n.C)
} else {
fmt.Fprintf(fm, "extern char %s[];\n", n.C)
fmt.Fprintf(fm, "void *_cgohack_%s = %s;\n\n", n.C, n.C)
fmt.Fprintf(fgo2, "//go:linkname __cgo_%s %s\n", n.C, n.C)
fmt.Fprintf(fgo2, "//go:cgo_import_static %s\n", n.C)
fmt.Fprintf(fgo2, "var __cgo_%s byte\n", n.C)
}
cVars[n.C] = true
}
var node ast.Node
if n.Kind == "var" {
node = &ast.StarExpr{X: n.Type.Go}
} else if n.Kind == "fpvar" {
node = n.Type.Go
} else {
panic(fmt.Errorf("invalid var kind %q", n.Kind))
}
if *gccgo {
fmt.Fprintf(fc, `extern void *%s __asm__("%s.%s");`, n.Mangle, gccgoSymbolPrefix, n.Mangle)
fmt.Fprintf(&gccgoInit, "\t%s = &%s;\n", n.Mangle, n.C)
fmt.Fprintf(fc, "\n")
}
fmt.Fprintf(fgo2, "var %s ", n.Mangle)
conf.Fprint(fgo2, fset, node)
if !*gccgo {
fmt.Fprintf(fgo2, " = (")
conf.Fprint(fgo2, fset, node)
fmt.Fprintf(fgo2, ")(unsafe.Pointer(&__cgo_%s))", n.C)
}
fmt.Fprintf(fgo2, "\n")
}
if *gccgo {
fmt.Fprintf(fc, "\n")
}
for _, key := range nameKeys(p.Name) {
n := p.Name[key]
if n.Const != "" {
fmt.Fprintf(fgo2, "const %s = %s\n", n.Mangle, n.Const)
}
}
fmt.Fprintf(fgo2, "\n")
callsMalloc := false
for _, key := range nameKeys(p.Name) {
n := p.Name[key]
if n.FuncType != nil {
p.writeDefsFunc(fgo2, n, &callsMalloc)
}
}
fgcc := creat(*objDir + "_cgo_export.c")
fgcch := creat(*objDir + "_cgo_export.h")
if *gccgo {
p.writeGccgoExports(fgo2, fm, fgcc, fgcch)
} else {
p.writeExports(fgo2, fm, fgcc, fgcch)
}
if callsMalloc && !*gccgo {
fmt.Fprint(fgo2, strings.Replace(cMallocDefGo, "PREFIX", cPrefix, -1))
fmt.Fprint(fgcc, strings.Replace(strings.Replace(cMallocDefC, "PREFIX", cPrefix, -1), "PACKED", p.packedAttribute(), -1))
}
if err := fgcc.Close(); err != nil {
fatalf("%s", err)
}
if err := fgcch.Close(); err != nil {
fatalf("%s", err)
}
if *exportHeader != "" && len(p.ExpFunc) > 0 {
fexp := creat(*exportHeader)
fgcch, err := os.Open(*objDir + "_cgo_export.h")
if err != nil {
fatalf("%s", err)
}
_, err = io.Copy(fexp, fgcch)
if err != nil {
fatalf("%s", err)
}
if err = fexp.Close(); err != nil {
fatalf("%s", err)
}
}
init := gccgoInit.String()
if init != "" {
// The init function does nothing but simple
// assignments, so it won't use much stack space, so
// it's OK to not split the stack. Splitting the stack
// can run into a bug in clang (as of 2018-11-09):
// this is a leaf function, and when clang sees a leaf
// function it won't emit the split stack prologue for
// the function. However, if this function refers to a
// non-split-stack function, which will happen if the
// cgo code refers to a C function not compiled with
// -fsplit-stack, then the linker will think that it
// needs to adjust the split stack prologue, but there
// won't be one. Marking the function explicitly
// no_split_stack works around this problem by telling
// the linker that it's OK if there is no split stack
// prologue.
fmt.Fprintln(fc, "static void init(void) __attribute__ ((constructor, no_split_stack));")
fmt.Fprintln(fc, "static void init(void) {")
fmt.Fprint(fc, init)
fmt.Fprintln(fc, "}")
}
}
func dynimport(obj string) {
stdout := os.Stdout
if *dynout != "" {
f, err := os.Create(*dynout)
if err != nil {
fatalf("%s", err)
}
stdout = f
}
fmt.Fprintf(stdout, "package %s\n", *dynpackage)
if f, err := elf.Open(obj); err == nil {
if *dynlinker {
// Emit the cgo_dynamic_linker line.
if sec := f.Section(".interp"); sec != nil {
if data, err := sec.Data(); err == nil && len(data) > 1 {
// skip trailing \0 in data
fmt.Fprintf(stdout, "//go:cgo_dynamic_linker %q\n", string(data[:len(data)-1]))
}
}
}
sym, _ := f.ImportedSymbols()
for _, s := range sym {
targ := s.Name
if s.Version != "" {
targ += "#" + s.Version
}
fmt.Fprintf(stdout, "//go:cgo_import_dynamic %s %s %q\n", s.Name, targ, s.Library)
}
lib, _ := f.ImportedLibraries()
for _, l := range lib {
fmt.Fprintf(stdout, "//go:cgo_import_dynamic _ _ %q\n", l)
}
return
}
if f, err := macho.Open(obj); err == nil {
sym, _ := f.ImportedSymbols()
for _, s := range sym {
if len(s) > 0 && s[0] == '_' {
s = s[1:]
}
fmt.Fprintf(stdout, "//go:cgo_import_dynamic %s %s %q\n", s, s, "")
}
lib, _ := f.ImportedLibraries()
for _, l := range lib {
fmt.Fprintf(stdout, "//go:cgo_import_dynamic _ _ %q\n", l)
}
return
}
if f, err := pe.Open(obj); err == nil {
sym, _ := f.ImportedSymbols()
for _, s := range sym {
ss := strings.Split(s, ":")
name := strings.Split(ss[0], "@")[0]
fmt.Fprintf(stdout, "//go:cgo_import_dynamic %s %s %q\n", name, ss[0], strings.ToLower(ss[1]))
}
return
}
if f, err := xcoff.Open(obj); err == nil {
sym, err := f.ImportedSymbols()
if err != nil {
fatalf("cannot load imported symbols from XCOFF file %s: %v", obj, err)
}
for _, s := range sym {
fmt.Fprintf(stdout, "//go:cgo_import_dynamic %s %s %q\n", s.Name, s.Name, s.Library)
}
lib, err := f.ImportedLibraries()
if err != nil {
fatalf("cannot load imported libraries from XCOFF file %s: %v", obj, err)
}
for _, l := range lib {
fmt.Fprintf(stdout, "//go:cgo_import_dynamic _ _ %q\n", l)
}
return
}
fatalf("cannot parse %s as ELF, Mach-O, PE or XCOFF", obj)
}
// Construct a gcc struct matching the gc argument frame.
// Assumes that in gcc, char is 1 byte, short 2 bytes, int 4 bytes, long long 8 bytes.
// These assumptions are checked by the gccProlog.
// Also assumes that gc convention is to word-align the
// input and output parameters.
func (p *Package) structType(n *Name) (string, int64) {
var buf bytes.Buffer
fmt.Fprint(&buf, "struct {\n")
off := int64(0)
for i, t := range n.FuncType.Params {
if off%t.Align != 0 {
pad := t.Align - off%t.Align
fmt.Fprintf(&buf, "\t\tchar __pad%d[%d];\n", off, pad)
off += pad
}
c := t.Typedef
if c == "" {
c = t.C.String()
}
fmt.Fprintf(&buf, "\t\t%s p%d;\n", c, i)
off += t.Size
}
if off%p.PtrSize != 0 {
pad := p.PtrSize - off%p.PtrSize
fmt.Fprintf(&buf, "\t\tchar __pad%d[%d];\n", off, pad)
off += pad
}
if t := n.FuncType.Result; t != nil {
if off%t.Align != 0 {
pad := t.Align - off%t.Align
fmt.Fprintf(&buf, "\t\tchar __pad%d[%d];\n", off, pad)
off += pad
}
fmt.Fprintf(&buf, "\t\t%s r;\n", t.C)
off += t.Size
}
if off%p.PtrSize != 0 {
pad := p.PtrSize - off%p.PtrSize
fmt.Fprintf(&buf, "\t\tchar __pad%d[%d];\n", off, pad)
off += pad
}
if off == 0 {
fmt.Fprintf(&buf, "\t\tchar unused;\n") // avoid empty struct
}
fmt.Fprintf(&buf, "\t}")
return buf.String(), off
}
func (p *Package) writeDefsFunc(fgo2 io.Writer, n *Name, callsMalloc *bool) {
name := n.Go
gtype := n.FuncType.Go
void := gtype.Results == nil || len(gtype.Results.List) == 0
if n.AddError {
// Add "error" to return type list.
// Type list is known to be 0 or 1 element - it's a C function.
err := &ast.Field{Type: ast.NewIdent("error")}
l := gtype.Results.List
if len(l) == 0 {
l = []*ast.Field{err}
} else {
l = []*ast.Field{l[0], err}
}
t := new(ast.FuncType)
*t = *gtype
t.Results = &ast.FieldList{List: l}
gtype = t
}
// Go func declaration.
d := &ast.FuncDecl{
Name: ast.NewIdent(n.Mangle),
Type: gtype,
}
// Builtins defined in the C prolog.
inProlog := builtinDefs[name] != ""
cname := fmt.Sprintf("_cgo%s%s", cPrefix, n.Mangle)
paramnames := []string(nil)
if d.Type.Params != nil {
for i, param := range d.Type.Params.List {
paramName := fmt.Sprintf("p%d", i)
param.Names = []*ast.Ident{ast.NewIdent(paramName)}
paramnames = append(paramnames, paramName)
}
}
if *gccgo {
// Gccgo style hooks.
fmt.Fprint(fgo2, "\n")
conf.Fprint(fgo2, fset, d)
fmt.Fprint(fgo2, " {\n")
if !inProlog {
fmt.Fprint(fgo2, "\tdefer syscall.CgocallDone()\n")
fmt.Fprint(fgo2, "\tsyscall.Cgocall()\n")
}
if n.AddError {
fmt.Fprint(fgo2, "\tsyscall.SetErrno(0)\n")
}
fmt.Fprint(fgo2, "\t")
if !void {
fmt.Fprint(fgo2, "r := ")
}
fmt.Fprintf(fgo2, "%s(%s)\n", cname, strings.Join(paramnames, ", "))
if n.AddError {
fmt.Fprint(fgo2, "\te := syscall.GetErrno()\n")
fmt.Fprint(fgo2, "\tif e != 0 {\n")
fmt.Fprint(fgo2, "\t\treturn ")
if !void {
fmt.Fprint(fgo2, "r, ")
}
fmt.Fprint(fgo2, "e\n")
fmt.Fprint(fgo2, "\t}\n")
fmt.Fprint(fgo2, "\treturn ")
if !void {
fmt.Fprint(fgo2, "r, ")
}
fmt.Fprint(fgo2, "nil\n")
} else if !void {
fmt.Fprint(fgo2, "\treturn r\n")
}
fmt.Fprint(fgo2, "}\n")
// declare the C function.
fmt.Fprintf(fgo2, "//extern %s\n", cname)
d.Name = ast.NewIdent(cname)
if n.AddError {
l := d.Type.Results.List
d.Type.Results.List = l[:len(l)-1]
}
conf.Fprint(fgo2, fset, d)
fmt.Fprint(fgo2, "\n")
return
}
if inProlog {
fmt.Fprint(fgo2, builtinDefs[name])
if strings.Contains(builtinDefs[name], "_cgo_cmalloc") {
*callsMalloc = true
}
return
}
// Wrapper calls into gcc, passing a pointer to the argument frame.
fmt.Fprintf(fgo2, "//go:cgo_import_static %s\n", cname)
fmt.Fprintf(fgo2, "//go:linkname __cgofn_%s %s\n", cname, cname)
fmt.Fprintf(fgo2, "var __cgofn_%s byte\n", cname)
fmt.Fprintf(fgo2, "var %s = unsafe.Pointer(&__cgofn_%s)\n", cname, cname)
nret := 0
if !void {
d.Type.Results.List[0].Names = []*ast.Ident{ast.NewIdent("r1")}
nret = 1
}
if n.AddError {
d.Type.Results.List[nret].Names = []*ast.Ident{ast.NewIdent("r2")}
}
fmt.Fprint(fgo2, "\n")
fmt.Fprint(fgo2, "//go:cgo_unsafe_args\n")
conf.Fprint(fgo2, fset, d)
fmt.Fprint(fgo2, " {\n")
// NOTE: Using uintptr to hide from escape analysis.
arg := "0"
if len(paramnames) > 0 {
arg = "uintptr(unsafe.Pointer(&p0))"
} else if !void {
arg = "uintptr(unsafe.Pointer(&r1))"
}
prefix := ""
if n.AddError {
prefix = "errno := "
}
fmt.Fprintf(fgo2, "\t%s_cgo_runtime_cgocall(%s, %s)\n", prefix, cname, arg)
if n.AddError {
fmt.Fprintf(fgo2, "\tif errno != 0 { r2 = syscall.Errno(errno) }\n")
}
fmt.Fprintf(fgo2, "\tif _Cgo_always_false {\n")
if d.Type.Params != nil {
for i := range d.Type.Params.List {
fmt.Fprintf(fgo2, "\t\t_Cgo_use(p%d)\n", i)
}
}
fmt.Fprintf(fgo2, "\t}\n")
fmt.Fprintf(fgo2, "\treturn\n")
fmt.Fprintf(fgo2, "}\n")
}
// writeOutput creates stubs for a specific source file to be compiled by gc
func (p *Package) writeOutput(f *File, srcfile string) {
base := srcfile
if strings.HasSuffix(base, ".go") {
base = base[0 : len(base)-3]
}
base = filepath.Base(base)
fgo1 := creat(*objDir + base + ".cgo1.go")
fgcc := creat(*objDir + base + ".cgo2.c")
p.GoFiles = append(p.GoFiles, base+".cgo1.go")
p.GccFiles = append(p.GccFiles, base+".cgo2.c")
// Write Go output: Go input with rewrites of C.xxx to _C_xxx.
fmt.Fprintf(fgo1, "// Code generated by cmd/cgo; DO NOT EDIT.\n\n")
fmt.Fprintf(fgo1, "//line %s:1:1\n", srcfile)
fgo1.Write(f.Edit.Bytes())
// While we process the vars and funcs, also write gcc output.
// Gcc output starts with the preamble.
fmt.Fprintf(fgcc, "%s\n", builtinProlog)
fmt.Fprintf(fgcc, "%s\n", f.Preamble)
fmt.Fprintf(fgcc, "%s\n", gccProlog)
fmt.Fprintf(fgcc, "%s\n", tsanProlog)
fmt.Fprintf(fgcc, "%s\n", msanProlog)
for _, key := range nameKeys(f.Name) {
n := f.Name[key]
if n.FuncType != nil {
p.writeOutputFunc(fgcc, n)
}
}
fgo1.Close()
fgcc.Close()
}
// fixGo converts the internal Name.Go field into the name we should show
// to users in error messages. There's only one for now: on input we rewrite
// C.malloc into C._CMalloc, so change it back here.
func fixGo(name string) string {
if name == "_CMalloc" {
return "malloc"
}
return name
}
var isBuiltin = map[string]bool{
"_Cfunc_CString": true,
"_Cfunc_CBytes": true,
"_Cfunc_GoString": true,
"_Cfunc_GoStringN": true,
"_Cfunc_GoBytes": true,
"_Cfunc__CMalloc": true,
}
func (p *Package) writeOutputFunc(fgcc *os.File, n *Name) {
name := n.Mangle
if isBuiltin[name] || p.Written[name] {
// The builtins are already defined in the C prolog, and we don't
// want to duplicate function definitions we've already done.
return
}
p.Written[name] = true
if *gccgo {
p.writeGccgoOutputFunc(fgcc, n)
return
}
ctype, _ := p.structType(n)
// Gcc wrapper unpacks the C argument struct
// and calls the actual C function.
fmt.Fprintf(fgcc, "CGO_NO_SANITIZE_THREAD\n")
if n.AddError {
fmt.Fprintf(fgcc, "int\n")
} else {
fmt.Fprintf(fgcc, "void\n")
}
fmt.Fprintf(fgcc, "_cgo%s%s(void *v)\n", cPrefix, n.Mangle)
fmt.Fprintf(fgcc, "{\n")
if n.AddError {
fmt.Fprintf(fgcc, "\tint _cgo_errno;\n")
}
// We're trying to write a gcc struct that matches gc's layout.
// Use packed attribute to force no padding in this struct in case
// gcc has different packing requirements.
fmt.Fprintf(fgcc, "\t%s %v *_cgo_a = v;\n", ctype, p.packedAttribute())
if n.FuncType.Result != nil {
// Save the stack top for use below.
fmt.Fprintf(fgcc, "\tchar *_cgo_stktop = _cgo_topofstack();\n")
}
tr := n.FuncType.Result
if tr != nil {
fmt.Fprintf(fgcc, "\t__typeof__(_cgo_a->r) _cgo_r;\n")
}
fmt.Fprintf(fgcc, "\t_cgo_tsan_acquire();\n")
if n.AddError {
fmt.Fprintf(fgcc, "\terrno = 0;\n")
}
fmt.Fprintf(fgcc, "\t")
if tr != nil {
fmt.Fprintf(fgcc, "_cgo_r = ")
if c := tr.C.String(); c[len(c)-1] == '*' {
fmt.Fprint(fgcc, "(__typeof__(_cgo_a->r)) ")
}
}
if n.Kind == "macro" {
fmt.Fprintf(fgcc, "%s;\n", n.C)
} else {
fmt.Fprintf(fgcc, "%s(", n.C)
for i := range n.FuncType.Params {
if i > 0 {
fmt.Fprintf(fgcc, ", ")
}
fmt.Fprintf(fgcc, "_cgo_a->p%d", i)
}
fmt.Fprintf(fgcc, ");\n")
}
if n.AddError {
fmt.Fprintf(fgcc, "\t_cgo_errno = errno;\n")
}
fmt.Fprintf(fgcc, "\t_cgo_tsan_release();\n")
if n.FuncType.Result != nil {
// The cgo call may have caused a stack copy (via a callback).
// Adjust the return value pointer appropriately.
fmt.Fprintf(fgcc, "\t_cgo_a = (void*)((char*)_cgo_a + (_cgo_topofstack() - _cgo_stktop));\n")
// Save the return value.
fmt.Fprintf(fgcc, "\t_cgo_a->r = _cgo_r;\n")
// The return value is on the Go stack. If we are using msan,
// and if the C value is partially or completely uninitialized,
// the assignment will mark the Go stack as uninitialized.
// The Go compiler does not update msan for changes to the
// stack. It is possible that the stack will remain
// uninitialized, and then later be used in a way that is
// visible to msan, possibly leading to a false positive.
// Mark the stack space as written, to avoid this problem.
// See issue 26209.
fmt.Fprintf(fgcc, "\t_cgo_msan_write(&_cgo_a->r, sizeof(_cgo_a->r));\n")
}
if n.AddError {
fmt.Fprintf(fgcc, "\treturn _cgo_errno;\n")
}
fmt.Fprintf(fgcc, "}\n")
fmt.Fprintf(fgcc, "\n")
}
// Write out a wrapper for a function when using gccgo. This is a
// simple wrapper that just calls the real function. We only need a
// wrapper to support static functions in the prologue--without a
// wrapper, we can't refer to the function, since the reference is in
// a different file.
func (p *Package) writeGccgoOutputFunc(fgcc *os.File, n *Name) {
fmt.Fprintf(fgcc, "CGO_NO_SANITIZE_THREAD\n")
if t := n.FuncType.Result; t != nil {
fmt.Fprintf(fgcc, "%s\n", t.C.String())
} else {
fmt.Fprintf(fgcc, "void\n")
}
fmt.Fprintf(fgcc, "_cgo%s%s(", cPrefix, n.Mangle)
for i, t := range n.FuncType.Params {
if i > 0 {
fmt.Fprintf(fgcc, ", ")
}
c := t.Typedef
if c == "" {
c = t.C.String()
}
fmt.Fprintf(fgcc, "%s p%d", c, i)
}
fmt.Fprintf(fgcc, ")\n")
fmt.Fprintf(fgcc, "{\n")
if t := n.FuncType.Result; t != nil {
fmt.Fprintf(fgcc, "\t%s _cgo_r;\n", t.C.String())
}
fmt.Fprintf(fgcc, "\t_cgo_tsan_acquire();\n")
fmt.Fprintf(fgcc, "\t")
if t := n.FuncType.Result; t != nil {
fmt.Fprintf(fgcc, "_cgo_r = ")
// Cast to void* to avoid warnings due to omitted qualifiers.
if c := t.C.String(); c[len(c)-1] == '*' {
fmt.Fprintf(fgcc, "(void*)")
}
}
if n.Kind == "macro" {
fmt.Fprintf(fgcc, "%s;\n", n.C)
} else {
fmt.Fprintf(fgcc, "%s(", n.C)
for i := range n.FuncType.Params {
if i > 0 {
fmt.Fprintf(fgcc, ", ")
}
fmt.Fprintf(fgcc, "p%d", i)
}
fmt.Fprintf(fgcc, ");\n")
}
fmt.Fprintf(fgcc, "\t_cgo_tsan_release();\n")
if t := n.FuncType.Result; t != nil {
fmt.Fprintf(fgcc, "\treturn ")
// Cast to void* to avoid warnings due to omitted qualifiers
// and explicit incompatible struct types.
if c := t.C.String(); c[len(c)-1] == '*' {
fmt.Fprintf(fgcc, "(void*)")
}
fmt.Fprintf(fgcc, "_cgo_r;\n")
}
fmt.Fprintf(fgcc, "}\n")
fmt.Fprintf(fgcc, "\n")
}
// packedAttribute returns host compiler struct attribute that will be
// used to match gc's struct layout. For example, on 386 Windows,
// gcc wants to 8-align int64s, but gc does not.
// Use __gcc_struct__ to work around https://gcc.gnu.org/PR52991 on x86,
// and https://golang.org/issue/5603.
func (p *Package) packedAttribute() string {
s := "__attribute__((__packed__"
if !p.GccIsClang && (goarch == "amd64" || goarch == "386") {
s += ", __gcc_struct__"
}
return s + "))"
}
// Write out the various stubs we need to support functions exported
// from Go so that they are callable from C.
func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
p.writeExportHeader(fgcch)
fmt.Fprintf(fgcc, "/* Code generated by cmd/cgo; DO NOT EDIT. */\n\n")
fmt.Fprintf(fgcc, "#include <stdlib.h>\n")
fmt.Fprintf(fgcc, "#include \"_cgo_export.h\"\n\n")
// We use packed structs, but they are always aligned.
fmt.Fprintf(fgcc, "#pragma GCC diagnostic ignored \"-Wpragmas\"\n")
fmt.Fprintf(fgcc, "#pragma GCC diagnostic ignored \"-Waddress-of-packed-member\"\n")
fmt.Fprintf(fgcc, "extern void crosscall2(void (*fn)(void *, int, __SIZE_TYPE__), void *, int, __SIZE_TYPE__);\n")
fmt.Fprintf(fgcc, "extern __SIZE_TYPE__ _cgo_wait_runtime_init_done();\n")
fmt.Fprintf(fgcc, "extern void _cgo_release_context(__SIZE_TYPE__);\n\n")
fmt.Fprintf(fgcc, "extern char* _cgo_topofstack(void);")
fmt.Fprintf(fgcc, "%s\n", tsanProlog)
fmt.Fprintf(fgcc, "%s\n", msanProlog)
for _, exp := range p.ExpFunc {
fn := exp.Func
// Construct a gcc struct matching the gc argument and
// result frame. The gcc struct will be compiled with
// __attribute__((packed)) so all padding must be accounted
// for explicitly.
ctype := "struct {\n"
off := int64(0)
npad := 0
if fn.Recv != nil {
t := p.cgoType(fn.Recv.List[0].Type)
ctype += fmt.Sprintf("\t\t%s recv;\n", t.C)
off += t.Size
}
fntype := fn.Type
forFieldList(fntype.Params,
func(i int, aname string, atype ast.Expr) {
t := p.cgoType(atype)
if off%t.Align != 0 {
pad := t.Align - off%t.Align
ctype += fmt.Sprintf("\t\tchar __pad%d[%d];\n", npad, pad)
off += pad
npad++
}
ctype += fmt.Sprintf("\t\t%s p%d;\n", t.C, i)
off += t.Size
})
if off%p.PtrSize != 0 {
pad := p.PtrSize - off%p.PtrSize
ctype += fmt.Sprintf("\t\tchar __pad%d[%d];\n", npad, pad)
off += pad
npad++
}
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) {
t := p.cgoType(atype)
if off%t.Align != 0 {
pad := t.Align - off%t.Align
ctype += fmt.Sprintf("\t\tchar __pad%d[%d];\n", npad, pad)
off += pad
npad++
}
ctype += fmt.Sprintf("\t\t%s r%d;\n", t.C, i)
off += t.Size
})
if off%p.PtrSize != 0 {
pad := p.PtrSize - off%p.PtrSize
ctype += fmt.Sprintf("\t\tchar __pad%d[%d];\n", npad, pad)
off += pad
npad++
}
if ctype == "struct {\n" {
ctype += "\t\tchar unused;\n" // avoid empty struct
}
ctype += "\t}"
// Get the return type of the wrapper function
// compiled by gcc.
gccResult := ""
if fntype.Results == nil || len(fntype.Results.List) == 0 {
gccResult = "void"
} else if len(fntype.Results.List) == 1 && len(fntype.Results.List[0].Names) <= 1 {
gccResult = p.cgoType(fntype.Results.List[0].Type).C.String()
} else {
fmt.Fprintf(fgcch, "\n/* Return type for %s */\n", exp.ExpName)
fmt.Fprintf(fgcch, "struct %s_return {\n", exp.ExpName)
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) {
fmt.Fprintf(fgcch, "\t%s r%d;", p.cgoType(atype).C, i)
if len(aname) > 0 {
fmt.Fprintf(fgcch, " /* %s */", aname)
}
fmt.Fprint(fgcch, "\n")
})
fmt.Fprintf(fgcch, "};\n")
gccResult = "struct " + exp.ExpName + "_return"
}
// Build the wrapper function compiled by gcc.
s := fmt.Sprintf("%s %s(", gccResult, exp.ExpName)
if fn.Recv != nil {
s += p.cgoType(fn.Recv.List[0].Type).C.String()
s += " recv"
}
forFieldList(fntype.Params,
func(i int, aname string, atype ast.Expr) {
if i > 0 || fn.Recv != nil {
s += ", "
}
s += fmt.Sprintf("%s p%d", p.cgoType(atype).C, i)
})
s += ")"
if len(exp.Doc) > 0 {
fmt.Fprintf(fgcch, "\n%s", exp.Doc)
}
fmt.Fprintf(fgcch, "\nextern %s;\n", s)
fmt.Fprintf(fgcc, "extern void _cgoexp%s_%s(void *, int, __SIZE_TYPE__);\n", cPrefix, exp.ExpName)
fmt.Fprintf(fgcc, "\nCGO_NO_SANITIZE_THREAD")
fmt.Fprintf(fgcc, "\n%s\n", s)
fmt.Fprintf(fgcc, "{\n")
fmt.Fprintf(fgcc, "\t__SIZE_TYPE__ _cgo_ctxt = _cgo_wait_runtime_init_done();\n")
fmt.Fprintf(fgcc, "\t%s %v a;\n", ctype, p.packedAttribute())
if gccResult != "void" && (len(fntype.Results.List) > 1 || len(fntype.Results.List[0].Names) > 1) {
fmt.Fprintf(fgcc, "\t%s r;\n", gccResult)
}
if fn.Recv != nil {
fmt.Fprintf(fgcc, "\ta.recv = recv;\n")
}
forFieldList(fntype.Params,
func(i int, aname string, atype ast.Expr) {
fmt.Fprintf(fgcc, "\ta.p%d = p%d;\n", i, i)
})
fmt.Fprintf(fgcc, "\t_cgo_tsan_release();\n")
fmt.Fprintf(fgcc, "\tcrosscall2(_cgoexp%s_%s, &a, %d, _cgo_ctxt);\n", cPrefix, exp.ExpName, off)
fmt.Fprintf(fgcc, "\t_cgo_tsan_acquire();\n")
fmt.Fprintf(fgcc, "\t_cgo_release_context(_cgo_ctxt);\n")
if gccResult != "void" {
if len(fntype.Results.List) == 1 && len(fntype.Results.List[0].Names) <= 1 {
fmt.Fprintf(fgcc, "\treturn a.r0;\n")
} else {
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) {
fmt.Fprintf(fgcc, "\tr.r%d = a.r%d;\n", i, i)
})
fmt.Fprintf(fgcc, "\treturn r;\n")
}
}
fmt.Fprintf(fgcc, "}\n")
// Build the wrapper function compiled by cmd/compile.
goname := "_cgoexpwrap" + cPrefix + "_"
if fn.Recv != nil {
goname += fn.Recv.List[0].Names[0].Name + "_"
}
goname += exp.Func.Name.Name
fmt.Fprintf(fgo2, "//go:cgo_export_dynamic %s\n", exp.ExpName)
fmt.Fprintf(fgo2, "//go:linkname _cgoexp%s_%s _cgoexp%s_%s\n", cPrefix, exp.ExpName, cPrefix, exp.ExpName)
fmt.Fprintf(fgo2, "//go:cgo_export_static _cgoexp%s_%s\n", cPrefix, exp.ExpName)
fmt.Fprintf(fgo2, "//go:nosplit\n") // no split stack, so no use of m or g
fmt.Fprintf(fgo2, "//go:norace\n") // must not have race detector calls inserted
fmt.Fprintf(fgo2, "func _cgoexp%s_%s(a unsafe.Pointer, n int32, ctxt uintptr) {\n", cPrefix, exp.ExpName)
fmt.Fprintf(fgo2, "\tfn := %s\n", goname)
// The indirect here is converting from a Go function pointer to a C function pointer.
fmt.Fprintf(fgo2, "\t_cgo_runtime_cgocallback(**(**unsafe.Pointer)(unsafe.Pointer(&fn)), a, uintptr(n), ctxt);\n")
fmt.Fprintf(fgo2, "}\n")
fmt.Fprintf(fm, "int _cgoexp%s_%s;\n", cPrefix, exp.ExpName)
// This code uses printer.Fprint, not conf.Fprint,
// because we don't want //line comments in the middle
// of the function types.
fmt.Fprintf(fgo2, "\n")
fmt.Fprintf(fgo2, "func %s(", goname)
comma := false
if fn.Recv != nil {
fmt.Fprintf(fgo2, "recv ")
printer.Fprint(fgo2, fset, fn.Recv.List[0].Type)
comma = true
}
forFieldList(fntype.Params,
func(i int, aname string, atype ast.Expr) {
if comma {
fmt.Fprintf(fgo2, ", ")
}
fmt.Fprintf(fgo2, "p%d ", i)
printer.Fprint(fgo2, fset, atype)
comma = true
})
fmt.Fprintf(fgo2, ")")
if gccResult != "void" {
fmt.Fprint(fgo2, " (")
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) {
if i > 0 {
fmt.Fprint(fgo2, ", ")
}
fmt.Fprintf(fgo2, "r%d ", i)
printer.Fprint(fgo2, fset, atype)
})
fmt.Fprint(fgo2, ")")
}
fmt.Fprint(fgo2, " {\n")
if gccResult == "void" {
fmt.Fprint(fgo2, "\t")
} else {
// Verify that any results don't contain any
// Go pointers.
addedDefer := false
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) {
if !p.hasPointer(nil, atype, false) {
return
}
if !addedDefer {
fmt.Fprint(fgo2, "\tdefer func() {\n")
addedDefer = true
}
fmt.Fprintf(fgo2, "\t\t_cgoCheckResult(r%d)\n", i)
})
if addedDefer {
fmt.Fprint(fgo2, "\t}()\n")
}
fmt.Fprint(fgo2, "\treturn ")
}
if fn.Recv != nil {
fmt.Fprintf(fgo2, "recv.")
}
fmt.Fprintf(fgo2, "%s(", exp.Func.Name)
forFieldList(fntype.Params,
func(i int, aname string, atype ast.Expr) {
if i > 0 {
fmt.Fprint(fgo2, ", ")
}
fmt.Fprintf(fgo2, "p%d", i)
})
fmt.Fprint(fgo2, ")\n")
fmt.Fprint(fgo2, "}\n")
}
fmt.Fprintf(fgcch, "%s", gccExportHeaderEpilog)
}
// Write out the C header allowing C code to call exported gccgo functions.
func (p *Package) writeGccgoExports(fgo2, fm, fgcc, fgcch io.Writer) {
gccgoSymbolPrefix := p.gccgoSymbolPrefix()
p.writeExportHeader(fgcch)
fmt.Fprintf(fgcc, "/* Code generated by cmd/cgo; DO NOT EDIT. */\n\n")
fmt.Fprintf(fgcc, "#include \"_cgo_export.h\"\n")
fmt.Fprintf(fgcc, "%s\n", gccgoExportFileProlog)
fmt.Fprintf(fgcc, "%s\n", tsanProlog)
fmt.Fprintf(fgcc, "%s\n", msanProlog)
for _, exp := range p.ExpFunc {
fn := exp.Func
fntype := fn.Type
cdeclBuf := new(bytes.Buffer)
resultCount := 0
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) { resultCount++ })
switch resultCount {
case 0:
fmt.Fprintf(cdeclBuf, "void")
case 1:
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) {
t := p.cgoType(atype)
fmt.Fprintf(cdeclBuf, "%s", t.C)
})
default:
// Declare a result struct.
fmt.Fprintf(fgcch, "\n/* Return type for %s */\n", exp.ExpName)
fmt.Fprintf(fgcch, "struct %s_return {\n", exp.ExpName)
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) {
t := p.cgoType(atype)
fmt.Fprintf(fgcch, "\t%s r%d;", t.C, i)
if len(aname) > 0 {
fmt.Fprintf(fgcch, " /* %s */", aname)
}
fmt.Fprint(fgcch, "\n")
})
fmt.Fprintf(fgcch, "};\n")
fmt.Fprintf(cdeclBuf, "struct %s_return", exp.ExpName)
}
cRet := cdeclBuf.String()
cdeclBuf = new(bytes.Buffer)
fmt.Fprintf(cdeclBuf, "(")
if fn.Recv != nil {
fmt.Fprintf(cdeclBuf, "%s recv", p.cgoType(fn.Recv.List[0].Type).C.String())
}
// Function parameters.
forFieldList(fntype.Params,
func(i int, aname string, atype ast.Expr) {
if i > 0 || fn.Recv != nil {
fmt.Fprintf(cdeclBuf, ", ")
}
t := p.cgoType(atype)
fmt.Fprintf(cdeclBuf, "%s p%d", t.C, i)
})
fmt.Fprintf(cdeclBuf, ")")
cParams := cdeclBuf.String()
if len(exp.Doc) > 0 {
fmt.Fprintf(fgcch, "\n%s", exp.Doc)
}
fmt.Fprintf(fgcch, "extern %s %s%s;\n", cRet, exp.ExpName, cParams)
// We need to use a name that will be exported by the
// Go code; otherwise gccgo will make it static and we
// will not be able to link against it from the C
// code.
goName := "Cgoexp_" + exp.ExpName
fmt.Fprintf(fgcc, `extern %s %s %s __asm__("%s.%s");`, cRet, goName, cParams, gccgoSymbolPrefix, goName)
fmt.Fprint(fgcc, "\n")
fmt.Fprint(fgcc, "\nCGO_NO_SANITIZE_THREAD\n")
fmt.Fprintf(fgcc, "%s %s %s {\n", cRet, exp.ExpName, cParams)
if resultCount > 0 {
fmt.Fprintf(fgcc, "\t%s r;\n", cRet)
}
fmt.Fprintf(fgcc, "\tif(_cgo_wait_runtime_init_done)\n")
fmt.Fprintf(fgcc, "\t\t_cgo_wait_runtime_init_done();\n")
fmt.Fprintf(fgcc, "\t_cgo_tsan_release();\n")
fmt.Fprint(fgcc, "\t")
if resultCount > 0 {
fmt.Fprint(fgcc, "r = ")
}
fmt.Fprintf(fgcc, "%s(", goName)
if fn.Recv != nil {
fmt.Fprint(fgcc, "recv")
}
forFieldList(fntype.Params,
func(i int, aname string, atype ast.Expr) {
if i > 0 || fn.Recv != nil {
fmt.Fprintf(fgcc, ", ")
}
fmt.Fprintf(fgcc, "p%d", i)
})
fmt.Fprint(fgcc, ");\n")
fmt.Fprintf(fgcc, "\t_cgo_tsan_acquire();\n")
if resultCount > 0 {
fmt.Fprint(fgcc, "\treturn r;\n")
}
fmt.Fprint(fgcc, "}\n")
// Dummy declaration for _cgo_main.c
fmt.Fprintf(fm, `char %s[1] __asm__("%s.%s");`, goName, gccgoSymbolPrefix, goName)
fmt.Fprint(fm, "\n")
// For gccgo we use a wrapper function in Go, in order
// to call CgocallBack and CgocallBackDone.
// This code uses printer.Fprint, not conf.Fprint,
// because we don't want //line comments in the middle
// of the function types.
fmt.Fprint(fgo2, "\n")
fmt.Fprintf(fgo2, "func %s(", goName)
if fn.Recv != nil {
fmt.Fprint(fgo2, "recv ")
printer.Fprint(fgo2, fset, fn.Recv.List[0].Type)
}
forFieldList(fntype.Params,
func(i int, aname string, atype ast.Expr) {
if i > 0 || fn.Recv != nil {
fmt.Fprintf(fgo2, ", ")
}
fmt.Fprintf(fgo2, "p%d ", i)
printer.Fprint(fgo2, fset, atype)
})
fmt.Fprintf(fgo2, ")")
if resultCount > 0 {
fmt.Fprintf(fgo2, " (")
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) {
if i > 0 {
fmt.Fprint(fgo2, ", ")
}
printer.Fprint(fgo2, fset, atype)
})
fmt.Fprint(fgo2, ")")
}
fmt.Fprint(fgo2, " {\n")
fmt.Fprint(fgo2, "\tsyscall.CgocallBack()\n")
fmt.Fprint(fgo2, "\tdefer syscall.CgocallBackDone()\n")
fmt.Fprint(fgo2, "\t")
if resultCount > 0 {
fmt.Fprint(fgo2, "return ")
}
if fn.Recv != nil {
fmt.Fprint(fgo2, "recv.")
}
fmt.Fprintf(fgo2, "%s(", exp.Func.Name)
forFieldList(fntype.Params,
func(i int, aname string, atype ast.Expr) {
if i > 0 {
fmt.Fprint(fgo2, ", ")
}
fmt.Fprintf(fgo2, "p%d", i)
})
fmt.Fprint(fgo2, ")\n")
fmt.Fprint(fgo2, "}\n")
}
fmt.Fprintf(fgcch, "%s", gccExportHeaderEpilog)
}
// writeExportHeader writes out the start of the _cgo_export.h file.
func (p *Package) writeExportHeader(fgcch io.Writer) {
fmt.Fprintf(fgcch, "/* Code generated by cmd/cgo; DO NOT EDIT. */\n\n")
pkg := *importPath
if pkg == "" {
pkg = p.PackagePath
}
fmt.Fprintf(fgcch, "/* package %s */\n\n", pkg)
fmt.Fprintf(fgcch, "%s\n", builtinExportProlog)
// Remove absolute paths from #line comments in the preamble.
// They aren't useful for people using the header file,
// and they mean that the header files change based on the
// exact location of GOPATH.
re := regexp.MustCompile(`(?m)^(#line\s+[0-9]+\s+")[^"]*[/\\]([^"]*")`)
preamble := re.ReplaceAllString(p.Preamble, "$1$2")
fmt.Fprintf(fgcch, "/* Start of preamble from import \"C\" comments. */\n\n")
fmt.Fprintf(fgcch, "%s\n", preamble)
fmt.Fprintf(fgcch, "\n/* End of preamble from import \"C\" comments. */\n\n")
fmt.Fprintf(fgcch, "%s\n", p.gccExportHeaderProlog())
}
// gccgoUsesNewMangling reports whether gccgo uses the new collision-free
// packagepath mangling scheme (see determineGccgoManglingScheme for more
// info).
func gccgoUsesNewMangling() bool {
if !gccgoMangleCheckDone {
gccgoNewmanglingInEffect = determineGccgoManglingScheme()
gccgoMangleCheckDone = true
}
return gccgoNewmanglingInEffect
}
const mangleCheckCode = `
package läufer
func Run(x int) int {
return 1
}
`
// determineGccgoManglingScheme performs a runtime test to see which
// flavor of packagepath mangling gccgo is using. Older versions of
// gccgo use a simple mangling scheme where there can be collisions
// between packages whose paths are different but mangle to the same
// string. More recent versions of gccgo use a new mangler that avoids
// these collisions. Return value is whether gccgo uses the new mangling.
func determineGccgoManglingScheme() bool {
// Emit a small Go file for gccgo to compile.
filepat := "*_gccgo_manglecheck.go"
var f *os.File
var err error
if f, err = ioutil.TempFile(*objDir, filepat); err != nil {
fatalf("%v", err)
}
gofilename := f.Name()
defer os.Remove(gofilename)
if err = ioutil.WriteFile(gofilename, []byte(mangleCheckCode), 0666); err != nil {
fatalf("%v", err)
}
// Compile with gccgo, capturing generated assembly.
gccgocmd := os.Getenv("GCCGO")
if gccgocmd == "" {
gpath, gerr := exec.LookPath("gccgo")
if gerr != nil {
fatalf("unable to locate gccgo: %v", gerr)
}
gccgocmd = gpath
}
cmd := exec.Command(gccgocmd, "-S", "-o", "-", gofilename)
buf, cerr := cmd.CombinedOutput()
if cerr != nil {
fatalf("%s", cerr)
}
// New mangling: expect go.l..u00e4ufer.Run
// Old mangling: expect go.l__ufer.Run
return regexp.MustCompile(`go\.l\.\.u00e4ufer\.Run`).Match(buf)
}
// gccgoPkgpathToSymbolNew converts a package path to a gccgo-style
// package symbol.
func gccgoPkgpathToSymbolNew(ppath string) string {
bsl := []byte{}
changed := false
for _, c := range []byte(ppath) {
switch {
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z',
'0' <= c && c <= '9', c == '_', c == '.':
bsl = append(bsl, c)
default:
changed = true
encbytes := []byte(fmt.Sprintf("..z%02x", c))
bsl = append(bsl, encbytes...)
}
}
if !changed {
return ppath
}
return string(bsl)
}
// gccgoPkgpathToSymbolOld converts a package path to a gccgo-style
// package symbol using the older mangling scheme.
func gccgoPkgpathToSymbolOld(ppath string) string {
clean := func(r rune) rune {
switch {
case 'A' <= r && r <= 'Z', 'a' <= r && r <= 'z',
'0' <= r && r <= '9':
return r
}
return '_'
}
return strings.Map(clean, ppath)
}
// gccgoPkgpathToSymbol converts a package path to a mangled packagepath
// symbol.
func gccgoPkgpathToSymbol(ppath string) string {
if gccgoUsesNewMangling() {
return gccgoPkgpathToSymbolNew(ppath)
} else {
return gccgoPkgpathToSymbolOld(ppath)
}
}
// Return the package prefix when using gccgo.
func (p *Package) gccgoSymbolPrefix() string {
if !*gccgo {
return ""
}
if *gccgopkgpath != "" {
return gccgoPkgpathToSymbol(*gccgopkgpath)
}
if *gccgoprefix == "" && p.PackageName == "main" {
return "main"
}
prefix := gccgoPkgpathToSymbol(*gccgoprefix)
if prefix == "" {
prefix = "go"
}
return prefix + "." + p.PackageName
}
// Call a function for each entry in an ast.FieldList, passing the
// index into the list, the name if any, and the type.
func forFieldList(fl *ast.FieldList, fn func(int, string, ast.Expr)) {
if fl == nil {
return
}
i := 0
for _, r := range fl.List {
if r.Names == nil {
fn(i, "", r.Type)
i++
} else {
for _, n := range r.Names {
fn(i, n.Name, r.Type)
i++
}
}
}
}
func c(repr string, args ...interface{}) *TypeRepr {
return &TypeRepr{repr, args}
}
// Map predeclared Go types to Type.
var goTypes = map[string]*Type{
"bool": {Size: 1, Align: 1, C: c("GoUint8")},
"byte": {Size: 1, Align: 1, C: c("GoUint8")},
"int": {Size: 0, Align: 0, C: c("GoInt")},
"uint": {Size: 0, Align: 0, C: c("GoUint")},
"rune": {Size: 4, Align: 4, C: c("GoInt32")},
"int8": {Size: 1, Align: 1, C: c("GoInt8")},
"uint8": {Size: 1, Align: 1, C: c("GoUint8")},
"int16": {Size: 2, Align: 2, C: c("GoInt16")},
"uint16": {Size: 2, Align: 2, C: c("GoUint16")},
"int32": {Size: 4, Align: 4, C: c("GoInt32")},
"uint32": {Size: 4, Align: 4, C: c("GoUint32")},
"int64": {Size: 8, Align: 8, C: c("GoInt64")},
"uint64": {Size: 8, Align: 8, C: c("GoUint64")},
"float32": {Size: 4, Align: 4, C: c("GoFloat32")},
"float64": {Size: 8, Align: 8, C: c("GoFloat64")},
"complex64": {Size: 8, Align: 4, C: c("GoComplex64")},
"complex128": {Size: 16, Align: 8, C: c("GoComplex128")},
}
// Map an ast type to a Type.
func (p *Package) cgoType(e ast.Expr) *Type {
switch t := e.(type) {
case *ast.StarExpr:
x := p.cgoType(t.X)
return &Type{Size: p.PtrSize, Align: p.PtrSize, C: c("%s*", x.C)}
case *ast.ArrayType:
if t.Len == nil {
// Slice: pointer, len, cap.
return &Type{Size: p.PtrSize * 3, Align: p.PtrSize, C: c("GoSlice")}
}
// Non-slice array types are not supported.
case *ast.StructType:
// Not supported.
case *ast.FuncType:
return &Type{Size: p.PtrSize, Align: p.PtrSize, C: c("void*")}
case *ast.InterfaceType:
return &Type{Size: 2 * p.PtrSize, Align: p.PtrSize, C: c("GoInterface")}
case *ast.MapType:
return &Type{Size: p.PtrSize, Align: p.PtrSize, C: c("GoMap")}
case *ast.ChanType:
return &Type{Size: p.PtrSize, Align: p.PtrSize, C: c("GoChan")}
case *ast.Ident:
// Look up the type in the top level declarations.
// TODO: Handle types defined within a function.
for _, d := range p.Decl {
gd, ok := d.(*ast.GenDecl)
if !ok || gd.Tok != token.TYPE {
continue
}
for _, spec := range gd.Specs {
ts, ok := spec.(*ast.TypeSpec)
if !ok {
continue
}
if ts.Name.Name == t.Name {
return p.cgoType(ts.Type)
}
}
}
if def := typedef[t.Name]; def != nil {
return def
}
if t.Name == "uintptr" {
return &Type{Size: p.PtrSize, Align: p.PtrSize, C: c("GoUintptr")}
}
if t.Name == "string" {
// The string data is 1 pointer + 1 (pointer-sized) int.
return &Type{Size: 2 * p.PtrSize, Align: p.PtrSize, C: c("GoString")}
}
if t.Name == "error" {
return &Type{Size: 2 * p.PtrSize, Align: p.PtrSize, C: c("GoInterface")}
}
if r, ok := goTypes[t.Name]; ok {
if r.Size == 0 { // int or uint
rr := new(Type)
*rr = *r
rr.Size = p.IntSize
rr.Align = p.IntSize
r = rr
}
if r.Align > p.PtrSize {
r.Align = p.PtrSize
}
return r
}
error_(e.Pos(), "unrecognized Go type %s", t.Name)
return &Type{Size: 4, Align: 4, C: c("int")}
case *ast.SelectorExpr:
id, ok := t.X.(*ast.Ident)
if ok && id.Name == "unsafe" && t.Sel.Name == "Pointer" {
return &Type{Size: p.PtrSize, Align: p.PtrSize, C: c("void*")}
}
}
error_(e.Pos(), "Go type not supported in export: %s", gofmt(e))
return &Type{Size: 4, Align: 4, C: c("int")}
}
const gccProlog = `
#line 1 "cgo-gcc-prolog"
/*
If x and y are not equal, the type will be invalid
(have a negative array count) and an inscrutable error will come
out of the compiler and hopefully mention "name".
*/
#define __cgo_compile_assert_eq(x, y, name) typedef char name[(x-y)*(x-y)*-2+1];
/* Check at compile time that the sizes we use match our expectations. */
#define __cgo_size_assert(t, n) __cgo_compile_assert_eq(sizeof(t), n, _cgo_sizeof_##t##_is_not_##n)
__cgo_size_assert(char, 1)
__cgo_size_assert(short, 2)
__cgo_size_assert(int, 4)
typedef long long __cgo_long_long;
__cgo_size_assert(__cgo_long_long, 8)
__cgo_size_assert(float, 4)
__cgo_size_assert(double, 8)
extern char* _cgo_topofstack(void);
/* We use packed structs, but they are always aligned. */
#pragma GCC diagnostic ignored "-Wpragmas"
#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
#include <errno.h>
#include <string.h>
`
// Prologue defining TSAN functions in C.
const noTsanProlog = `
#define CGO_NO_SANITIZE_THREAD
#define _cgo_tsan_acquire()
#define _cgo_tsan_release()
`
// This must match the TSAN code in runtime/cgo/libcgo.h.
// This is used when the code is built with the C/C++ Thread SANitizer,
// which is not the same as the Go race detector.
// __tsan_acquire tells TSAN that we are acquiring a lock on a variable,
// in this case _cgo_sync. __tsan_release releases the lock.
// (There is no actual lock, we are just telling TSAN that there is.)
//
// When we call from Go to C we call _cgo_tsan_acquire.
// When the C function returns we call _cgo_tsan_release.
// Similarly, when C calls back into Go we call _cgo_tsan_release
// and then call _cgo_tsan_acquire when we return to C.
// These calls tell TSAN that there is a serialization point at the C call.
//
// This is necessary because TSAN, which is a C/C++ tool, can not see
// the synchronization in the Go code. Without these calls, when
// multiple goroutines call into C code, TSAN does not understand
// that the calls are properly synchronized on the Go side.
//
// To be clear, if the calls are not properly synchronized on the Go side,
// we will be hiding races. But when using TSAN on mixed Go C/C++ code
// it is more important to avoid false positives, which reduce confidence
// in the tool, than to avoid false negatives.
const yesTsanProlog = `
#line 1 "cgo-tsan-prolog"
#define CGO_NO_SANITIZE_THREAD __attribute__ ((no_sanitize_thread))
long long _cgo_sync __attribute__ ((common));
extern void __tsan_acquire(void*);
extern void __tsan_release(void*);
__attribute__ ((unused))
static void _cgo_tsan_acquire() {
__tsan_acquire(&_cgo_sync);
}
__attribute__ ((unused))
static void _cgo_tsan_release() {
__tsan_release(&_cgo_sync);
}
`
// Set to yesTsanProlog if we see -fsanitize=thread in the flags for gcc.
var tsanProlog = noTsanProlog
// noMsanProlog is a prologue defining an MSAN function in C.
// This is used when not compiling with -fsanitize=memory.
const noMsanProlog = `
#define _cgo_msan_write(addr, sz)
`
// yesMsanProlog is a prologue defining an MSAN function in C.
// This is used when compiling with -fsanitize=memory.
// See the comment above where _cgo_msan_write is called.
const yesMsanProlog = `
extern void __msan_unpoison(const volatile void *, size_t);
#define _cgo_msan_write(addr, sz) __msan_unpoison((addr), (sz))
`
// msanProlog is set to yesMsanProlog if we see -fsanitize=memory in the flags
// for the C compiler.
var msanProlog = noMsanProlog
const builtinProlog = `
#line 1 "cgo-builtin-prolog"
#include <stddef.h> /* for ptrdiff_t and size_t below */
/* Define intgo when compiling with GCC. */
typedef ptrdiff_t intgo;
#define GO_CGO_GOSTRING_TYPEDEF
typedef struct { const char *p; intgo n; } _GoString_;
typedef struct { char *p; intgo n; intgo c; } _GoBytes_;
_GoString_ GoString(char *p);
_GoString_ GoStringN(char *p, int l);
_GoBytes_ GoBytes(void *p, int n);
char *CString(_GoString_);
void *CBytes(_GoBytes_);
void *_CMalloc(size_t);
__attribute__ ((unused))
static size_t _GoStringLen(_GoString_ s) { return (size_t)s.n; }
__attribute__ ((unused))
static const char *_GoStringPtr(_GoString_ s) { return s.p; }
`
const goProlog = `
//go:linkname _cgo_runtime_cgocall runtime.cgocall
func _cgo_runtime_cgocall(unsafe.Pointer, uintptr) int32
//go:linkname _cgo_runtime_cgocallback runtime.cgocallback
func _cgo_runtime_cgocallback(unsafe.Pointer, unsafe.Pointer, uintptr, uintptr)
//go:linkname _cgoCheckPointer runtime.cgoCheckPointer
func _cgoCheckPointer(interface{}, ...interface{})
//go:linkname _cgoCheckResult runtime.cgoCheckResult
func _cgoCheckResult(interface{})
`
const gccgoGoProlog = `
func _cgoCheckPointer(interface{}, ...interface{})
func _cgoCheckResult(interface{})
`
const goStringDef = `
//go:linkname _cgo_runtime_gostring runtime.gostring
func _cgo_runtime_gostring(*_Ctype_char) string
func _Cfunc_GoString(p *_Ctype_char) string {
return _cgo_runtime_gostring(p)
}
`
const goStringNDef = `
//go:linkname _cgo_runtime_gostringn runtime.gostringn
func _cgo_runtime_gostringn(*_Ctype_char, int) string
func _Cfunc_GoStringN(p *_Ctype_char, l _Ctype_int) string {
return _cgo_runtime_gostringn(p, int(l))
}
`
const goBytesDef = `
//go:linkname _cgo_runtime_gobytes runtime.gobytes
func _cgo_runtime_gobytes(unsafe.Pointer, int) []byte
func _Cfunc_GoBytes(p unsafe.Pointer, l _Ctype_int) []byte {
return _cgo_runtime_gobytes(p, int(l))
}
`
const cStringDef = `
func _Cfunc_CString(s string) *_Ctype_char {
p := _cgo_cmalloc(uint64(len(s)+1))
pp := (*[1<<30]byte)(p)
copy(pp[:], s)
pp[len(s)] = 0
return (*_Ctype_char)(p)
}
`
const cBytesDef = `
func _Cfunc_CBytes(b []byte) unsafe.Pointer {
p := _cgo_cmalloc(uint64(len(b)))
pp := (*[1<<30]byte)(p)
copy(pp[:], b)
return p
}
`
const cMallocDef = `
func _Cfunc__CMalloc(n _Ctype_size_t) unsafe.Pointer {
return _cgo_cmalloc(uint64(n))
}
`
var builtinDefs = map[string]string{
"GoString": goStringDef,
"GoStringN": goStringNDef,
"GoBytes": goBytesDef,
"CString": cStringDef,
"CBytes": cBytesDef,
"_CMalloc": cMallocDef,
}
// Definitions for C.malloc in Go and in C. We define it ourselves
// since we call it from functions we define, such as C.CString.
// Also, we have historically ensured that C.malloc does not return
// nil even for an allocation of 0.
const cMallocDefGo = `
//go:cgo_import_static _cgoPREFIX_Cfunc__Cmalloc
//go:linkname __cgofn__cgoPREFIX_Cfunc__Cmalloc _cgoPREFIX_Cfunc__Cmalloc
var __cgofn__cgoPREFIX_Cfunc__Cmalloc byte
var _cgoPREFIX_Cfunc__Cmalloc = unsafe.Pointer(&__cgofn__cgoPREFIX_Cfunc__Cmalloc)
//go:linkname runtime_throw runtime.throw
func runtime_throw(string)
//go:cgo_unsafe_args
func _cgo_cmalloc(p0 uint64) (r1 unsafe.Pointer) {
_cgo_runtime_cgocall(_cgoPREFIX_Cfunc__Cmalloc, uintptr(unsafe.Pointer(&p0)))
if r1 == nil {
runtime_throw("runtime: C malloc failed")
}
return
}
`
// cMallocDefC defines the C version of C.malloc for the gc compiler.
// It is defined here because C.CString and friends need a definition.
// We define it by hand, rather than simply inventing a reference to
// C.malloc, because <stdlib.h> may not have been included.
// This is approximately what writeOutputFunc would generate, but
// skips the cgo_topofstack code (which is only needed if the C code
// calls back into Go). This also avoids returning nil for an
// allocation of 0 bytes.
const cMallocDefC = `
CGO_NO_SANITIZE_THREAD
void _cgoPREFIX_Cfunc__Cmalloc(void *v) {
struct {
unsigned long long p0;
void *r1;
} PACKED *a = v;
void *ret;
_cgo_tsan_acquire();
ret = malloc(a->p0);
if (ret == 0 && a->p0 == 0) {
ret = malloc(1);
}
a->r1 = ret;
_cgo_tsan_release();
}
`
func (p *Package) cPrologGccgo() string {
return strings.Replace(strings.Replace(cPrologGccgo, "PREFIX", cPrefix, -1),
"GCCGOSYMBOLPREF", p.gccgoSymbolPrefix(), -1)
}
const cPrologGccgo = `
#line 1 "cgo-c-prolog-gccgo"
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
typedef unsigned char byte;
typedef intptr_t intgo;
struct __go_string {
const unsigned char *__data;
intgo __length;
};
typedef struct __go_open_array {
void* __values;
intgo __count;
intgo __capacity;
} Slice;
struct __go_string __go_byte_array_to_string(const void* p, intgo len);
struct __go_open_array __go_string_to_byte_array (struct __go_string str);
const char *_cgoPREFIX_Cfunc_CString(struct __go_string s) {
char *p = malloc(s.__length+1);
memmove(p, s.__data, s.__length);
p[s.__length] = 0;
return p;
}
void *_cgoPREFIX_Cfunc_CBytes(struct __go_open_array b) {
char *p = malloc(b.__count);
memmove(p, b.__values, b.__count);
return p;
}
struct __go_string _cgoPREFIX_Cfunc_GoString(char *p) {
intgo len = (p != NULL) ? strlen(p) : 0;
return __go_byte_array_to_string(p, len);
}
struct __go_string _cgoPREFIX_Cfunc_GoStringN(char *p, int32_t n) {
return __go_byte_array_to_string(p, n);
}
Slice _cgoPREFIX_Cfunc_GoBytes(char *p, int32_t n) {
struct __go_string s = { (const unsigned char *)p, n };
return __go_string_to_byte_array(s);
}
extern void runtime_throw(const char *);
void *_cgoPREFIX_Cfunc__CMalloc(size_t n) {
void *p = malloc(n);
if(p == NULL && n == 0)
p = malloc(1);
if(p == NULL)
runtime_throw("runtime: C malloc failed");
return p;
}
struct __go_type_descriptor;
typedef struct __go_empty_interface {
const struct __go_type_descriptor *__type_descriptor;
void *__object;
} Eface;
extern void runtimeCgoCheckPointer(Eface, Slice)
__asm__("runtime.cgoCheckPointer")
__attribute__((weak));
extern void localCgoCheckPointer(Eface, Slice)
__asm__("GCCGOSYMBOLPREF._cgoCheckPointer");
void localCgoCheckPointer(Eface ptr, Slice args) {
if(runtimeCgoCheckPointer) {
runtimeCgoCheckPointer(ptr, args);
}
}
extern void runtimeCgoCheckResult(Eface)
__asm__("runtime.cgoCheckResult")
__attribute__((weak));
extern void localCgoCheckResult(Eface)
__asm__("GCCGOSYMBOLPREF._cgoCheckResult");
void localCgoCheckResult(Eface val) {
if(runtimeCgoCheckResult) {
runtimeCgoCheckResult(val);
}
}
`
// builtinExportProlog is a shorter version of builtinProlog,
// to be put into the _cgo_export.h file.
// For historical reasons we can't use builtinProlog in _cgo_export.h,
// because _cgo_export.h defines GoString as a struct while builtinProlog
// defines it as a function. We don't change this to avoid unnecessarily
// breaking existing code.
// The test of GO_CGO_GOSTRING_TYPEDEF avoids a duplicate definition
// error if a Go file with a cgo comment #include's the export header
// generated by a different package.
const builtinExportProlog = `
#line 1 "cgo-builtin-export-prolog"
#include <stddef.h> /* for ptrdiff_t below */
#ifndef GO_CGO_EXPORT_PROLOGUE_H
#define GO_CGO_EXPORT_PROLOGUE_H
#ifndef GO_CGO_GOSTRING_TYPEDEF
typedef struct { const char *p; ptrdiff_t n; } _GoString_;
#endif
#endif
`
func (p *Package) gccExportHeaderProlog() string {
return strings.Replace(gccExportHeaderProlog, "GOINTBITS", fmt.Sprint(8*p.IntSize), -1)
}
// gccExportHeaderProlog is written to the exported header, after the
// import "C" comment preamble but before the generated declarations
// of exported functions. This permits the generated declarations to
// use the type names that appear in goTypes, above.
//
// The test of GO_CGO_GOSTRING_TYPEDEF avoids a duplicate definition
// error if a Go file with a cgo comment #include's the export header
// generated by a different package. Unfortunately GoString means two
// different things: in this prolog it means a C name for the Go type,
// while in the prolog written into the start of the C code generated
// from a cgo-using Go file it means the C.GoString function. There is
// no way to resolve this conflict, but it also doesn't make much
// difference, as Go code never wants to refer to the latter meaning.
const gccExportHeaderProlog = `
/* Start of boilerplate cgo prologue. */
#line 1 "cgo-gcc-export-header-prolog"
#ifndef GO_CGO_PROLOGUE_H
#define GO_CGO_PROLOGUE_H
typedef signed char GoInt8;
typedef unsigned char GoUint8;
typedef short GoInt16;
typedef unsigned short GoUint16;
typedef int GoInt32;
typedef unsigned int GoUint32;
typedef long long GoInt64;
typedef unsigned long long GoUint64;
typedef GoIntGOINTBITS GoInt;
typedef GoUintGOINTBITS GoUint;
typedef __SIZE_TYPE__ GoUintptr;
typedef float GoFloat32;
typedef double GoFloat64;
typedef float _Complex GoComplex64;
typedef double _Complex GoComplex128;
/*
static assertion to make sure the file is being used on architecture
at least with matching size of GoInt.
*/
typedef char _check_for_GOINTBITS_bit_pointer_matching_GoInt[sizeof(void*)==GOINTBITS/8 ? 1:-1];
#ifndef GO_CGO_GOSTRING_TYPEDEF
typedef _GoString_ GoString;
#endif
typedef void *GoMap;
typedef void *GoChan;
typedef struct { void *t; void *v; } GoInterface;
typedef struct { void *data; GoInt len; GoInt cap; } GoSlice;
#endif
/* End of boilerplate cgo prologue. */
#ifdef __cplusplus
extern "C" {
#endif
`
// gccExportHeaderEpilog goes at the end of the generated header file.
const gccExportHeaderEpilog = `
#ifdef __cplusplus
}
#endif
`
// gccgoExportFileProlog is written to the _cgo_export.c file when
// using gccgo.
// We use weak declarations, and test the addresses, so that this code
// works with older versions of gccgo.
const gccgoExportFileProlog = `
#line 1 "cgo-gccgo-export-file-prolog"
extern _Bool runtime_iscgo __attribute__ ((weak));
static void GoInit(void) __attribute__ ((constructor));
static void GoInit(void) {
if(&runtime_iscgo)
runtime_iscgo = 1;
}
extern __SIZE_TYPE__ _cgo_wait_runtime_init_done() __attribute__ ((weak));
`
|
[
"\"GCCGO\""
] |
[] |
[
"GCCGO"
] |
[]
|
["GCCGO"]
|
go
| 1 | 0 | |
hookman/hooks.py
|
import inspect
import shutil
from pathlib import Path
from typing import Callable, List, Optional, Sequence
from zipfile import ZipFile
from hookman import hookman_utils
from hookman.exceptions import InvalidDestinationPathError, PluginAlreadyInstalledError
from hookman.plugin_config import PluginInfo
class HookSpecs:
"""
A class that holds the specification of the hooks, currently the following specification are available:
:kwparam project_name:
This field will be used to identify the project and to name the hook functions. This is usually a project name
in a user-friendly format, such as "My Project".
:kwparam str version:
The current version of the spec, when a new hook is created or modified this version should be changed.
:kwparam str pyd_name:
Base name of the shared library for the bindings for the HookCaller class. If None, no bindings will be
generated.
:kwparam List[function] hooks:
A list with the hooks available for the project, each hook is a python function with type annotations.
:kwparam List[str] extra_includes:
Extra #include directives that will be added to the generated HookCaller.hpp file.
"""
def __init__(
self,
*,
project_name: str,
version: str,
pyd_name: str = None,
hooks: List[Callable],
extra_includes: List[str] = (),
) -> None:
for hook in hooks:
self._check_hook_arguments(hook)
self.project_name = project_name
self.version = version
self.pyd_name = pyd_name
self.hooks = hooks
self.extra_includes = list(extra_includes)
def _check_hook_arguments(self, hook: Callable):
"""
Check if the arguments of the hooks are valid.
If an error is found, a TypeError exception will be raised
"""
hook_args = inspect.getfullargspec(hook)
if not hook_args.args:
raise TypeError("It's not possible to create a hook without argument")
annotate_args = {
arg: hook_args.annotations[arg] for arg in hook_args.annotations if arg != "return"
}
if len(annotate_args) != len(hook_args.args):
raise TypeError("All hooks arguments must have the type informed")
if not inspect.getdoc(hook):
raise TypeError("All hooks must have documentation")
class HookMan:
"""
Main class of HookMan, this class holds all the information related to the plugins
"""
def __init__(self, *, specs: HookSpecs, plugin_dirs: List[Path]):
self.specs = specs
self.plugins_dirs = plugin_dirs
self.hooks_available = {
f"{hook.__name__.lower()}": f"{specs.project_name.lower()}_v{specs.version}_{hook.__name__.lower()}"
for hook in specs.hooks
}
def install_plugin(self, plugin_file_path: Path, dest_path: Path) -> str:
"""
Extract the content of the zip file into dest_path.
If the installation occurs successfully the name of the installed plugin will be returned.
The following checks will be executed to validate the consistency of the inputs:
1. The destination Path should be one of the paths informed during the initialization of HookMan (plugins_dirs field).
2. The plugins_dirs cannot have two plugins with the same name.
:plugin_file_path: The Path for the ``.hmplugin``
:dest_path: The destination to where the plugin should be placed.
"""
plugin_file_zip = ZipFile(plugin_file_path)
PluginInfo.validate_plugin_file(plugin_file_zip=plugin_file_zip)
if dest_path not in self.plugins_dirs:
raise InvalidDestinationPathError(
f"Invalid destination path, {dest_path} is not one of "
f"the paths that were informed when the HookMan "
f"object was initialized: {self.plugins_dirs}."
)
yaml_content = plugin_file_zip.open("assets/plugin.yaml").read().decode("utf-8")
plugin_id = PluginInfo._load_yaml_file(yaml_content)["id"]
plugins_dirs = [x for x in dest_path.iterdir() if x.is_dir()]
if plugin_id in [x.name for x in plugins_dirs]:
raise PluginAlreadyInstalledError("Plugin already installed")
plugin_destination_folder = dest_path / plugin_id
plugin_destination_folder.mkdir(parents=True)
plugin_file_zip.extractall(plugin_destination_folder)
return plugin_id
def remove_plugin(self, caption: str):
"""
This method receives the name of the plugin as input, and will remove completely the plugin from ``plugin_dirs``.
:caption: Name of the plugin to be removed
"""
for plugin in self.get_plugins_available():
if plugin.id == caption:
shutil.rmtree(plugin.yaml_location.parents[1])
break
def get_plugins_available(
self, ignored_plugins: Sequence[str] = ()
) -> Optional[List[PluginInfo]]:
"""
Return a list of :ref:`plugin-info-api-section` that are available on ``plugins_dirs``
Optionally you can pass a list of plugins that should be ignored.
When informed, the `ignored_plugins` must be a list with the names of the plugins (same as shared_lib_name)
instead of the plugin caption.
The :ref:`plugin-info-api-section` is a object that holds all information related to the plugin.
"""
plugin_config_files = hookman_utils.find_config_files(self.plugins_dirs)
plugins_available = [
PluginInfo(plugin_file, self.hooks_available) for plugin_file in plugin_config_files
]
return [
plugin_info
for plugin_info in plugins_available
if plugin_info.id not in ignored_plugins
]
def get_hook_caller(self, ignored_plugins: Sequence[str] = ()):
"""
Return a HookCaller class that holds all references for the functions implemented
on the plugins.
When informed, the `ignored_plugins` must be a list with the names of the plugins (same as shared_lib_name)
instead of the plugin caption.
"""
_hookman = __import__(self.specs.pyd_name)
hook_caller = _hookman.HookCaller()
for plugin in self.get_plugins_available(ignored_plugins):
hook_caller.load_impls_from_library(str(plugin.shared_lib_path))
return hook_caller
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
ambari-server/src/main/python/ambari_server/dbConfiguration_linux.py
|
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import fileinput
import glob
import os
import re
import shutil
import socket
import subprocess
import sys
import time
import pwd
from ambari_commons import OSCheck, OSConst
from ambari_commons.logging_utils import get_silent, get_verbose, print_error_msg, print_info_msg, print_warning_msg
from ambari_commons.exceptions import NonFatalException, FatalException
from ambari_commons.os_utils import copy_files, find_in_path, is_root, remove_file, run_os_command
from ambari_server.dbConfiguration import DBMSConfig, USERNAME_PATTERN, SETUP_DB_CONNECT_ATTEMPTS, \
SETUP_DB_CONNECT_TIMEOUT, STORAGE_TYPE_LOCAL, DEFAULT_USERNAME, DEFAULT_PASSWORD
from ambari_server.serverConfiguration import encrypt_password, store_password_file, \
get_ambari_properties, get_resources_location, get_value_from_properties, configDefaults, \
OS_FAMILY, AMBARI_PROPERTIES_FILE, RESOURCES_DIR_PROPERTY, \
JDBC_DATABASE_PROPERTY, JDBC_DATABASE_NAME_PROPERTY, JDBC_POSTGRES_SCHEMA_PROPERTY, \
JDBC_HOSTNAME_PROPERTY, JDBC_PORT_PROPERTY, \
JDBC_USER_NAME_PROPERTY, JDBC_PASSWORD_PROPERTY, JDBC_PASSWORD_FILENAME, \
JDBC_DRIVER_PROPERTY, JDBC_URL_PROPERTY, \
JDBC_RCA_USER_NAME_PROPERTY, JDBC_RCA_PASSWORD_ALIAS, JDBC_RCA_PASSWORD_FILE_PROPERTY, \
JDBC_RCA_DRIVER_PROPERTY, JDBC_RCA_URL_PROPERTY, \
PERSISTENCE_TYPE_PROPERTY, JDBC_CONNECTION_POOL_TYPE, JDBC_CONNECTION_POOL_ACQUISITION_SIZE, \
JDBC_CONNECTION_POOL_IDLE_TEST_INTERVAL, JDBC_CONNECTION_POOL_MAX_AGE, JDBC_CONNECTION_POOL_MAX_IDLE_TIME, \
JDBC_CONNECTION_POOL_MAX_IDLE_TIME_EXCESS, JDBC_SQLA_SERVER_NAME, LOCAL_DATABASE_ADMIN_PROPERTY
from ambari_commons.constants import AMBARI_SUDO_BINARY
from ambari_server.userInput import get_YN_input, get_validated_string_input, read_password
from ambari_server.utils import get_postgre_hba_dir, get_postgre_running_status
from ambari_server.ambariPath import AmbariPath
from resource_management.core import sudo
ORACLE_DB_ID_TYPES = ["Service Name", "SID"]
ORACLE_SNAME_PATTERN = "jdbc:oracle:thin:@.+:.+:.+"
JDBC_PROPERTIES_PREFIX = "server.jdbc.properties."
class LinuxDBMSConfig(DBMSConfig):
def __init__(self, options, properties, storage_type):
super(LinuxDBMSConfig, self).__init__(options, properties, storage_type)
#Init the database configuration data here, if any
self.dbms_full_name = ""
# The values from options supersede the values from properties
self.database_host = DBMSConfig._init_member_with_prop_default(options, "database_host",
properties, JDBC_HOSTNAME_PROPERTY, "localhost")
#self.database_port is set in the subclasses
self.database_name = DBMSConfig._init_member_with_prop_default(options, "database_name",
properties, JDBC_DATABASE_NAME_PROPERTY, configDefaults.DEFAULT_DB_NAME)
self.database_username = DBMSConfig._init_member_with_prop_default(options, "database_username",
properties, JDBC_USER_NAME_PROPERTY, DEFAULT_USERNAME)
self.local_admin_user = DBMSConfig._init_member_with_prop_default(options, "local_admin_user",
properties, LOCAL_DATABASE_ADMIN_PROPERTY, "postgres")
self.database_password = getattr(options, "database_password", "")
if not self.database_password:
self.database_password = DBMSConfig._read_password_from_properties(properties, options)
self.database_url_pattern = ""
self.database_url_pattern_alt = ""
self.database_storage_name = ""
self.sid_or_sname = "sid"
self.init_script_file = ""
self.drop_tables_script_file = ""
self.client_tool_usage_pattern = ""
self.jdbc_extra_params = []
def _prompt_db_properties(self):
if self.must_set_database_options:
if self.persistence_type != STORAGE_TYPE_LOCAL:
self.database_host = get_validated_string_input(
"Hostname (" + self.database_host + "): ",
self.database_host,
"^[a-zA-Z0-9.\-]*$",
"Invalid hostname.",
False
)
self.database_port = get_validated_string_input(
"Port (" + self.database_port + "): ",
self.database_port,
"^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$",
"Invalid port.",
False
)
if self.persistence_type == STORAGE_TYPE_LOCAL:
self.local_admin_user = get_validated_string_input(
"Database admin user ("+ self.local_admin_user + "): ",
self.local_admin_user,
".+",
"Invalid username.",
False
)
if not self._configure_database_name():
return False
# Username is common for Oracle/MySQL/MSSQL/Postgres
self.database_username = get_validated_string_input(
'Username (' + self.database_username + '): ',
self.database_username,
USERNAME_PATTERN,
"Invalid characters in username. Start with _ or alpha "
"followed by alphanumeric or _ or - characters",
False
)
self.database_password = LinuxDBMSConfig._configure_database_password(True, self.database_password)
self._display_db_properties()
return True
# Supporting remote server for all the DB types. Supporting local server only for PostgreSQL.
def _setup_remote_server(self, args, options):
self._store_remote_properties(args, options)
def _setup_remote_database(self):
properties = get_ambari_properties()
if properties == -1:
err = 'Error getting ambari properties'
print_error_msg(err)
raise FatalException(-1, err)
if self.ensure_jdbc_driver_installed(properties):
print 'Configuring remote database connection properties...'
retcode = self._setup_remote_db()
if retcode == -1:
err = "Remote database setup aborted."
raise NonFatalException(err)
if not retcode == 0:
err = 'Error while configuring connection properties. Exiting'
raise FatalException(retcode, err)
def _reset_remote_database(self):
client_usage_cmd_drop = self._get_remote_script_line(self.drop_tables_script_file)
client_usage_cmd_init = self._get_remote_script_line(self.init_script_file)
print_warning_msg('To reset Ambari Server schema ' +
'you must run the following DDL against the database to '
+ 'drop the schema:' + os.linesep + client_usage_cmd_drop
+ os.linesep + 'Then you must run the following DDL ' +
'against the database to create the schema: ' + os.linesep +
client_usage_cmd_init + os.linesep)
def _get_default_driver_path(self, properties):
return os.path.join(configDefaults.JAVA_SHARE_PATH, self.driver_file_name)
def _install_jdbc_driver(self, properties, files_list):
if type(files_list) is not int:
print 'Copying JDBC drivers to server resources...'
resources_dir = get_resources_location(properties)
db_name = self.dbms_full_name.lower()
symlink_name = db_name + "-jdbc-driver.jar"
jdbc_symlink = os.path.join(resources_dir, symlink_name)
db_default_driver_path = self._get_default_driver_path(properties)
if os.path.lexists(jdbc_symlink):
os.remove(jdbc_symlink)
copy_status = copy_files(files_list, resources_dir)
if not copy_status == 0:
raise FatalException(-1, "Failed to copy JDBC drivers to server resources")
if db_default_driver_path in files_list:
os.symlink(os.path.join(resources_dir, self.driver_file_name), jdbc_symlink)
else:
if files_list == -1:
return False
return True
def _configure_database_name(self):
return True
def _get_remote_script_line(self, scriptFile):
return None
@staticmethod
def _configure_database_password(showDefault=True, defaultPassword=DEFAULT_PASSWORD):
passwordDefault = defaultPassword
if showDefault:
passwordPrompt = 'Enter Database Password (' + passwordDefault + '): '
else:
passwordPrompt = 'Enter Database Password: '
passwordPattern = "^[a-zA-Z0-9_-]*$"
passwordDescr = "Invalid characters in password. Use only alphanumeric or " \
"_ or - characters"
password = read_password(passwordDefault, passwordPattern, passwordPrompt,
passwordDescr)
return password
@staticmethod
def _get_validated_db_name(database_storage_name, database_name):
return get_validated_string_input(
database_storage_name + " name ("
+ database_name + "): ",
database_name,
".*",
"Invalid " + database_storage_name.lower() + " name.",
False
)
def _display_db_properties(self):
print_info_msg('Using database options: {database},{host},{port},{schema},{user},{password}'.format(
database=self.dbms,
host=self.database_host,
port=self.database_port,
schema=self.database_name,
user=self.database_username,
password=self.database_password
))
#Check if required jdbc drivers present
@staticmethod
def _find_jdbc_driver(jdbc_pattern):
drivers = []
drivers.extend(glob.glob(configDefaults.JAVA_SHARE_PATH + os.sep + jdbc_pattern))
if drivers:
return drivers
return -1
def _extract_client_tarball(self, properties):
pass
def _get_native_libs(self, properties):
return None
# Let the console user initialize the remote database schema
def _setup_remote_db(self):
setup_msg = "Before starting Ambari Server, you must run the following DDL " \
"against the database to create the schema: {0}".format(self.init_script_file)
print_warning_msg(setup_msg)
proceed = get_YN_input("Proceed with configuring remote database connection properties [y/n] (y)? ", True)
retCode = 0 if proceed else -1
return retCode
def _store_password_property(self, properties, property_name, options):
properties.process_pair(property_name,
store_password_file(self.database_password, JDBC_PASSWORD_FILENAME))
if self.isSecure:
encrypted_password = encrypt_password(JDBC_RCA_PASSWORD_ALIAS, self.database_password, options)
if encrypted_password != self.database_password:
properties.process_pair(property_name, encrypted_password)
def _get_database_hostname(self):
# fully qualify the hostname to make sure all the other hosts can connect
# to the jdbc hostname since its passed onto the agents for RCA
jdbc_hostname = self.database_host
if (self.database_host == "localhost"):
jdbc_hostname = socket.getfqdn()
return jdbc_hostname
def _get_jdbc_connection_string(self):
jdbc_hostname = self._get_database_hostname()
connectionStringFormat = self.database_url_pattern
if self.sid_or_sname == "sid":
connectionStringFormat = self.database_url_pattern_alt
return connectionStringFormat.format(jdbc_hostname, self.database_port, self.database_name)
# Store set of properties for remote database connection
def _store_remote_properties(self, properties, options):
properties.process_pair(PERSISTENCE_TYPE_PROPERTY, self.persistence_type)
properties.process_pair(JDBC_DATABASE_PROPERTY, self.dbms)
properties.process_pair(JDBC_HOSTNAME_PROPERTY, self.database_host)
properties.process_pair(JDBC_PORT_PROPERTY, self.database_port)
properties.process_pair(JDBC_DATABASE_NAME_PROPERTY, self.database_name)
properties.process_pair(JDBC_DRIVER_PROPERTY, self.driver_class_name)
connection_string = self._get_jdbc_connection_string()
properties.process_pair(JDBC_URL_PROPERTY, connection_string)
properties.process_pair(JDBC_USER_NAME_PROPERTY, self.database_username)
self._store_password_property(properties, JDBC_PASSWORD_PROPERTY, options)
# save any other defined properties to pass to JDBC
for pair in self.jdbc_extra_params:
properties.process_pair(JDBC_PROPERTIES_PREFIX + pair[0], pair[1])
properties.process_pair(JDBC_RCA_DRIVER_PROPERTY, self.driver_class_name)
properties.process_pair(JDBC_RCA_URL_PROPERTY, connection_string)
properties.process_pair(JDBC_RCA_USER_NAME_PROPERTY, self.database_username)
self._store_password_property(properties, JDBC_RCA_PASSWORD_FILE_PROPERTY, options)
# connection pooling (internal JPA by default)
properties.process_pair(JDBC_CONNECTION_POOL_TYPE, "internal")
# PostgreSQL configuration and setup
class PGConfig(LinuxDBMSConfig):
# PostgreSQL settings
SETUP_DB_CMD = [AMBARI_SUDO_BINARY, 'su', 'postgres', '-',
'--command=psql -f {0} -v username=\'"{1}"\' -v password="\'{2}\'" -v dbname="{3}"']
EXECUTE_SCRIPT_AS_USER = [AMBARI_SUDO_BINARY, "bash", "-c", 'export PGPASSWORD={0} && psql -U {1} -f {2}']
EXECUTE_QUERY_AS_POSTGRES_FOR_DB_SILENT = [AMBARI_SUDO_BINARY, 'su', 'postgres', '-', '--command=psql -qAt -c "{0}" {1}']
EXECUTE_QUERY_AS_POSTGRES_FOR_DB = [AMBARI_SUDO_BINARY, 'su', 'postgres', '-', '--command=psql -c "{0}" {1}']
PG_ERROR_BLOCKED = "is being accessed by other users"
PG_STATUS_RUNNING = None
SERVICE_CMD = "/usr/bin/env service"
PG_SERVICE_NAME = "postgresql"
PG_HBA_DIR = None
PG_ST_CMD = "%s %s status" % (SERVICE_CMD, PG_SERVICE_NAME)
if os.path.isfile("/usr/bin/postgresql-setup"):
PG_INITDB_CMD = "/usr/bin/postgresql-setup initdb"
else:
PG_INITDB_CMD = "%s %s initdb" % (SERVICE_CMD, PG_SERVICE_NAME)
PG_START_CMD = AMBARI_SUDO_BINARY + " %s %s start" % (SERVICE_CMD, PG_SERVICE_NAME)
PG_RESTART_CMD = AMBARI_SUDO_BINARY + " %s %s restart" % (SERVICE_CMD, PG_SERVICE_NAME)
PG_HBA_RELOAD_CMD = AMBARI_SUDO_BINARY + " %s %s reload" % (SERVICE_CMD, PG_SERVICE_NAME)
PG_HBA_CONF_FILE = None
PG_HBA_CONF_FILE_BACKUP = None
POSTGRESQL_CONF_FILE = None
POSTGRES_EMBEDDED_INIT_FILE = AmbariPath.get("/var/lib/ambari-server/resources/Ambari-DDL-Postgres-EMBEDDED-CREATE.sql")
POSTGRES_EMBEDDED_DROP_FILE = AmbariPath.get("/var/lib/ambari-server/resources/Ambari-DDL-Postgres-EMBEDDED-DROP.sql")
POSTGRES_INIT_FILE = AmbariPath.get("/var/lib/ambari-server/resources/Ambari-DDL-Postgres-CREATE.sql")
POSTGRES_DROP_FILE = AmbariPath.get("/var/lib/ambari-server/resources/Ambari-DDL-Postgres-DROP.sql")
def __init__(self, options, properties, storage_type):
super(PGConfig, self).__init__(options, properties, storage_type)
#Init the database configuration data here, if any
self.dbms = "postgres"
self.dbms_full_name = "PostgreSQL"
self.driver_class_name = "org.postgresql.Driver"
self.driver_file_name = "postgresql-jdbc.jar"
self.database_storage_name = "Database"
# PostgreSQL seems to require additional schema coordinates
self.postgres_schema = DBMSConfig._init_member_with_prop_default(options, "postgres_schema",
properties, JDBC_POSTGRES_SCHEMA_PROPERTY, self.database_name)
self.database_port = DBMSConfig._init_member_with_prop_default(options, "database_port",
properties, JDBC_PORT_PROPERTY, "5432")
self.database_url_pattern = "jdbc:postgresql://{0}:{1}/{2}"
self.database_url_pattern_alt = "jdbc:postgresql://{0}:{1}/{2}"
self.JDBC_DRIVER_INSTALL_MSG = 'Before starting Ambari Server, ' \
'you must copy the {0} JDBC driver JAR file to {1} and set property "server.jdbc.driver.path=[path/to/custom_jdbc_driver]" in ambari.properties.'.format(
self.dbms_full_name, configDefaults.JAVA_SHARE_PATH)
self._is_user_changed = False
if self.persistence_type == STORAGE_TYPE_LOCAL:
PGConfig.PG_STATUS_RUNNING = get_postgre_running_status()
PGConfig.PG_HBA_DIR = get_postgre_hba_dir(OS_FAMILY)
PGConfig.PG_HBA_CONF_FILE = os.path.join(PGConfig.PG_HBA_DIR, "pg_hba.conf")
PGConfig.PG_HBA_CONF_FILE_BACKUP = os.path.join(PGConfig.PG_HBA_DIR, "pg_hba_bak.conf.old")
PGConfig.POSTGRESQL_CONF_FILE = os.path.join(PGConfig.PG_HBA_DIR, "postgresql.conf")
postgres_init_file_default = PGConfig.POSTGRES_EMBEDDED_INIT_FILE
postgres_drop_file_default = PGConfig.POSTGRES_EMBEDDED_DROP_FILE
else:
postgres_init_file_default = PGConfig.POSTGRES_INIT_FILE
postgres_drop_file_default = PGConfig.POSTGRES_DROP_FILE
self.init_script_file = DBMSConfig._init_member_with_default(options, "init_script_file",
postgres_init_file_default)
self.drop_tables_script_file = DBMSConfig._init_member_with_default(options, "drop_script_file",
postgres_drop_file_default)
self.client_tool_usage_pattern = 'su -postgres --command=psql -f {0} -v username=\'"{1}"\' -v password="\'{2}\'"'
#
# Public methods
#
def ensure_dbms_is_running(self, options, properties, scmStatus=None):
if self._is_local_database():
if is_root():
(pg_status, retcode, out, err) = PGConfig._check_postgre_up()
if not retcode == 0:
err = 'Unable to start PostgreSQL server. Status {0}. {1}. Exiting'.format(pg_status, err)
raise FatalException(retcode, err)
else:
print "Unable to check PostgreSQL server status when starting " \
"without root privileges."
print "Please do not forget to start PostgreSQL server."
#
# Private implementation
#
# Supporting remote server for all the DB types. Supporting local server only for PostgreSQL.
def _setup_local_server(self, properties, options):
# check if jdbc user is changed
self._is_user_changed = PGConfig._is_jdbc_user_changed(self.database_username)
print 'Default properties detected. Using built-in database.'
self._store_local_properties(properties, options)
def _create_postgres_lock_directory(self):
postgres_user_uid = None
try:
postgres_user_uid = pwd.getpwnam("postgres").pw_uid
except KeyError:
print "WARNING: Unable to create /var/run/postgresql directory, because user [postgres] doesn't exist. Potentially," \
" postgresql service start can be failed."
return
try:
if not os.path.isdir("/var/run/postgresql"):
os.mkdir("/var/run/postgresql")
except Exception as e:
print "WARNING: Unable to create /var/run/postgresql directory. Potentially," \
" postgresql service start can be failed."
print "Unexpected error: " + str(e)
return
if postgres_user_uid:
os.chown("/var/run/postgresql", postgres_user_uid, -1)
def _setup_local_database(self):
print 'Checking PostgreSQL...'
(pg_status, retcode, out, err) = PGConfig._check_postgre_up()
if not retcode == 0:
err = 'Unable to start PostgreSQL server. Exiting'
raise FatalException(retcode, err)
print 'Configuring local database...'
if self._is_user_changed:
#remove backup for pg_hba in order to reconfigure postgres
remove_file(PGConfig.PG_HBA_CONF_FILE_BACKUP)
print 'Configuring PostgreSQL...'
retcode, out, err = self._configure_postgres()
if not retcode == 0:
err = 'Unable to configure PostgreSQL server. Exiting'
raise FatalException(retcode, err)
retcode, out, err = self._setup_db()
if not retcode == 0:
err = 'Running database init script failed. Exiting.'
raise FatalException(retcode, err)
def _reset_local_database(self):
#force reset if silent option provided
if get_silent():
default = "yes"
else:
default = "no"
# Run automatic reset only for embedded DB
okToRun = get_YN_input("Confirm server reset [yes/no]({0})? ".format(default), get_silent())
if not okToRun:
err = "Ambari Server 'reset' cancelled"
raise FatalException(1, err)
print "Resetting the Server database..."
dbname = self.database_name
filename = self.drop_tables_script_file
username = self.database_username
password = self.database_password
command = PGConfig.SETUP_DB_CMD[:]
command[2] = self.local_admin_user
command[-1] = command[-1].format(filename, username, password, dbname)
drop_retcode, drop_outdata, drop_errdata = run_os_command(command)
if not drop_retcode == 0:
raise FatalException(1, drop_errdata)
if drop_errdata and PGConfig.PG_ERROR_BLOCKED in drop_errdata:
raise FatalException(1, "Database is in use. Please, make sure all connections to the database are closed")
if drop_errdata and get_verbose():
print_warning_msg(drop_errdata)
print_info_msg("About to run database setup")
retcode, outdata, errdata = self._setup_db()
if errdata and get_verbose():
print_warning_msg(errdata)
if (errdata and 'ERROR' in errdata.upper()) or (drop_errdata and 'ERROR' in drop_errdata.upper()):
err = "Non critical error in DDL"
if not get_verbose():
err += ", use --verbose for more information"
raise NonFatalException(err)
def _reset_remote_database(self):
super(PGConfig, self)._reset_remote_database()
raise NonFatalException("Please set DB password to PGPASSWORD env variable before running DDL`s!")
def _is_jdbc_driver_installed(self, properties):
return 0
def _configure_database_name(self):
self.database_name = LinuxDBMSConfig._get_validated_db_name(self.database_storage_name, self.database_name)
self.postgres_schema = PGConfig._get_validated_db_schema(self.postgres_schema)
return True
def _get_remote_script_line(self, scriptFile):
os.environ["PGPASSWORD"] = self.database_password
return "psql -h {0} -p {1} -d {2} -U {3} -f {4} -v username='{3}'".format(
self.database_host,
self.database_port,
self.database_name,
self.database_username,
scriptFile
)
@staticmethod
def _get_validated_db_schema(postgres_schema):
return get_validated_string_input(
"Postgres schema (" + postgres_schema + "): ",
postgres_schema,
"^[a-zA-Z0-9_\-]*$",
"Invalid schema name.",
False, allowEmpty=True
)
# Check if jdbc user is changed
@staticmethod
def _is_jdbc_user_changed(database_username):
properties = get_ambari_properties()
if properties == -1:
print_error_msg("Error getting ambari properties")
return None
previos_user = get_value_from_properties(properties, JDBC_USER_NAME_PROPERTY, "")
if previos_user and database_username:
if previos_user != database_username:
return True
else:
return False
return None
# Store local database connection properties
def _store_local_properties(self, properties, options):
properties.removeProp(JDBC_DATABASE_PROPERTY)
properties.removeProp(JDBC_DATABASE_NAME_PROPERTY)
properties.removeProp(JDBC_POSTGRES_SCHEMA_PROPERTY)
properties.removeProp(JDBC_HOSTNAME_PROPERTY)
properties.removeProp(JDBC_RCA_DRIVER_PROPERTY)
properties.removeProp(JDBC_RCA_URL_PROPERTY)
properties.removeProp(JDBC_PORT_PROPERTY)
properties.removeProp(JDBC_DRIVER_PROPERTY)
properties.removeProp(JDBC_URL_PROPERTY)
# Store the properties
properties.process_pair(PERSISTENCE_TYPE_PROPERTY, self.persistence_type)
properties.process_pair(JDBC_DATABASE_PROPERTY, self.dbms)
properties.process_pair(JDBC_DATABASE_NAME_PROPERTY, self.database_name)
properties.process_pair(JDBC_POSTGRES_SCHEMA_PROPERTY, self.postgres_schema)
properties.process_pair(JDBC_USER_NAME_PROPERTY, self.database_username)
# connection pooling (internal JPA by default)
properties.process_pair(JDBC_CONNECTION_POOL_TYPE, "internal")
properties.process_pair(LOCAL_DATABASE_ADMIN_PROPERTY, self.local_admin_user)
self._store_password_property(properties, JDBC_PASSWORD_PROPERTY, options)
@staticmethod
def _get_postgre_status():
retcode, out, err = run_os_command(PGConfig.PG_ST_CMD)
# on RHEL and SUSE PG_ST_COMD returns RC 0 for running and 3 for stoppped
if retcode == 0:
if out.strip() == "Running clusters:":
pg_status = "stopped"
else:
pg_status = PGConfig.PG_STATUS_RUNNING
else:
if retcode == 3:
pg_status = "stopped"
else:
pg_status = None
return pg_status, retcode, out, err
@staticmethod
def _check_postgre_up():
pg_status, retcode, out, err = PGConfig._get_postgre_status()
if pg_status == PGConfig.PG_STATUS_RUNNING:
print_info_msg("PostgreSQL is running")
return pg_status, 0, out, err
else:
# run initdb only on non ubuntu systems as ubuntu does not have initdb cmd.
if not OSCheck.is_ubuntu_family():
print "Running initdb: This may take up to a minute."
retcode, out, err = run_os_command(PGConfig.PG_INITDB_CMD)
if retcode == 0:
print out
print "About to start PostgreSQL"
try:
process = subprocess.Popen(PGConfig.PG_START_CMD.split(' '),
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE
)
if OSCheck.is_suse_family():
time.sleep(20)
result = process.poll()
print_info_msg("Result of postgres start cmd: " + str(result))
if result is None:
process.kill()
pg_status, retcode, out, err = PGConfig._get_postgre_status()
else:
retcode = result
else:
out, err = process.communicate()
retcode = process.returncode
pg_status, retcode, out, err = PGConfig._get_postgre_status()
if pg_status == PGConfig.PG_STATUS_RUNNING:
print_info_msg("Postgres process is running. Returning...")
return pg_status, 0, out, err
except (Exception), e:
pg_status, retcode, out, err = PGConfig._get_postgre_status()
if pg_status == PGConfig.PG_STATUS_RUNNING:
return pg_status, 0, out, err
else:
print_error_msg("Postgres start failed. " + str(e))
return pg_status, retcode, out, err
def _setup_db(self):
#password access to ambari-server and mapred
dbname = self.database_name
scriptFile = self.init_script_file
username = self.database_username
password = self.database_password
#setup DB
command = PGConfig.SETUP_DB_CMD[:]
command[2] = self.local_admin_user
command[-1] = command[-1].format(scriptFile, username, password, dbname)
retcode, outdata, errdata = self.run_with_retries(command, "Creating schema and user...")
if retcode == 0:
ddl_command = PGConfig.EXECUTE_SCRIPT_AS_USER[:]
ddl_command[-1] = ddl_command[-1].format(
password,
username,
PGConfig.POSTGRES_INIT_FILE
)
retcode, outdata, errdata = self.run_with_retries(ddl_command, "Creating tables...")
return retcode, outdata, errdata
@staticmethod
def run_with_retries(command, message):
"""
Run given command SETUP_DB_CONNECT_ATTEMPTS times in case of failures
:param command: command to execute
:param message: message to be printed
:return: (code, out, err)
"""
for i in range(SETUP_DB_CONNECT_ATTEMPTS):
print message
retcode, outdata, errdata = run_os_command(command)
if retcode == 0:
print 'done.'
return retcode, outdata, errdata
if (i+1) < SETUP_DB_CONNECT_ATTEMPTS:
print_error_msg("Failed to execute command:" + str(command))
print_error_msg("stderr:" + errdata)
print_error_msg("stdout:" + outdata)
print 'failed to execute queries ...retrying (%d)' % (i+1)
time.sleep(SETUP_DB_CONNECT_TIMEOUT)
return retcode, outdata, errdata
@staticmethod
def _configure_pg_hba_ambaridb_users(conf_file, database_username):
conf_file_content_in = sudo.read_file(conf_file)
conf_file_content_out = conf_file_content_in
conf_file_content_out += "\n"
conf_file_content_out += "local all " + database_username + ",mapred md5"
conf_file_content_out += "\n"
conf_file_content_out += "host all " + database_username + ",mapred 0.0.0.0/0 md5"
conf_file_content_out += "\n"
conf_file_content_out += "host all " + database_username + ",mapred ::/0 md5"
conf_file_content_out += "\n"
sudo.create_file(conf_file, conf_file_content_out)
retcode, out, err = run_os_command(PGConfig.PG_HBA_RELOAD_CMD)
if not retcode == 0:
raise FatalException(retcode, err)
@staticmethod
def _configure_pg_hba_postgres_user():
postgresString = "all postgres"
pg_hba_conf_file_content_in = sudo.read_file(PGConfig.PG_HBA_CONF_FILE)
pg_hba_conf_file_content_out = re.sub('all\s*all', postgresString, pg_hba_conf_file_content_in)
sudo.create_file(PGConfig.PG_HBA_CONF_FILE, pg_hba_conf_file_content_out)
sudo.chmod(PGConfig.PG_HBA_CONF_FILE, 0644)
@staticmethod
def _configure_postgresql_conf():
listenAddress = "listen_addresses = '*' #"
postgresql_conf_file_in = sudo.read_file(PGConfig.POSTGRESQL_CONF_FILE)
postgresql_conf_file_out = re.sub('#+listen_addresses.*?(#|$)', listenAddress, postgresql_conf_file_in)
sudo.create_file(PGConfig.POSTGRESQL_CONF_FILE, postgresql_conf_file_out)
sudo.chmod(PGConfig.POSTGRESQL_CONF_FILE, 0644)
def _configure_postgres(self):
if os.path.isfile(PGConfig.PG_HBA_CONF_FILE):
if not os.path.isfile(PGConfig.PG_HBA_CONF_FILE_BACKUP):
sudo.copy(PGConfig.PG_HBA_CONF_FILE, PGConfig.PG_HBA_CONF_FILE_BACKUP)
else:
#Postgres has been configured before, must not override backup
print "Backup for pg_hba found, reconfiguration not required"
return 0, "", ""
PGConfig._configure_pg_hba_postgres_user()
PGConfig._configure_pg_hba_ambaridb_users(PGConfig.PG_HBA_CONF_FILE, self.database_username)
sudo.chmod(PGConfig.PG_HBA_CONF_FILE, 0644)
PGConfig._configure_postgresql_conf()
#restart postgresql if already running
pg_status, retcode, out, err = PGConfig._get_postgre_status()
if pg_status == PGConfig.PG_STATUS_RUNNING:
retcode, out, err = PGConfig._restart_postgres()
return retcode, out, err
return 0, "", ""
@staticmethod
def _restart_postgres():
print "Restarting PostgreSQL"
process = subprocess.Popen(PGConfig.PG_RESTART_CMD.split(' '),
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE
)
time.sleep(5)
result = process.poll()
if result is None:
print_info_msg("Killing restart PostgresSQL process")
process.kill()
pg_status, retcode, out, err = PGConfig._get_postgre_status()
# SUSE linux set status of stopped postgresql proc to unused
if pg_status == "unused" or pg_status == "stopped":
print_info_msg("PostgreSQL is stopped. Restarting ...")
retcode, out, err = run_os_command(PGConfig.PG_START_CMD)
return retcode, out, err
return 0, "", ""
def _store_remote_properties(self, properties, options):
super(PGConfig, self)._store_remote_properties(properties, options)
properties.process_pair(JDBC_POSTGRES_SCHEMA_PROPERTY, self.postgres_schema)
def _change_db_files_owner(self):
retcode = 0
if not self._change_tables_owner():
print_error_msg("""Ambari is unable to change ownership of the database tables in {database} to {user}.
This may be because the administrator user ({admin_user}) does not have permission to make the changes.
Make sure that all tables returned by following SQL are owned by {user}:
"SELECT tablename FROM pg_tables WHERE schemaname = 'ambari';",
"SELECT sequence_name FROM information_schema.sequences WHERE sequence_schema = 'ambari';",
"SELECT table_name FROM information_schema.views WHERE table_schema = 'ambari';
""".format(database=self.database_name, admin_user=self.local_admin_user, user=self.database_username))
continue_ = get_YN_input("Is it safe to continue [yes/no](no)? ", "no")
if continue_ and continue_ != "no":
retcode = 0
else:
print_info_msg('Fixed database objects owner')
return retcode
@staticmethod
def _check_for_psql_error(out, err):
error_messages = [
"psql: FATAL:",
"psql: could not connect to server:"
]
for message in error_messages:
if message in out or message in err:
return True
False
def _change_tables_owner(self):
"""
Changes owner for local postgres database tables.
:return: True, if owner was changed or already correct
"""
tables = []
get_tables_queries = [
"SELECT tablename FROM pg_tables WHERE schemaname = 'ambari';",
"SELECT sequence_name FROM information_schema.sequences WHERE sequence_schema = 'ambari';",
"SELECT table_name FROM information_schema.views WHERE table_schema = 'ambari';"
]
for query in get_tables_queries:
retcode, stdout, stderr = self._execute_psql_query(query, self.database_name)
if retcode != 0 or self._check_for_psql_error(stdout, stderr):
print_error_msg("Failed to get list of ambari tables. Message from psql:\n"
" stdout:{0}\n"
" stderr:{1}\n".format(stdout, stderr))
return False
for tbl in stdout.splitlines():
tables.append(tbl)
if not tables:
print_error_msg("Failed to get list of ambari tables")
return False
for tbl in tables:
retcode, stdout, stderr = self._execute_psql_query("select u.usename from information_schema.tables t "
"join pg_catalog.pg_class c on (t.table_name = c.relname) "
"join pg_catalog.pg_user u on (c.relowner = u.usesysid) "
"where t.table_schema='ambari' and t.table_name='"+tbl+"';",
self.database_name)
owner = stdout.strip()
if owner != self.database_username:
retcode, stdout, stderr = self._execute_psql_query("ALTER TABLE \"ambari\".\""+tbl+
"\" OWNER TO \""+self.database_username+"\"",
self.database_name, False)
if retcode != 0 or "ALTER TABLE" not in stdout:
print_error_msg("Failed to change owner of table:{0} to user:{1}".format(tbl, owner))
return False
return True
@staticmethod
def _execute_psql_query(query, databse, silent=True):
"""
Executes psql query on local database as configured admin user.
:param query: query to execute
:param databse: database for executing query
:param silent: if True, only data returned by query will be printed
:return: (code, out, err)
"""
cmd = PGConfig.EXECUTE_QUERY_AS_POSTGRES_FOR_DB_SILENT[:] if silent else PGConfig.EXECUTE_QUERY_AS_POSTGRES_FOR_DB[:]
cmd[-1] = cmd[-1].format(query, databse)
return run_os_command(cmd)
def createPGConfig(options, properties, storage_type, dbId):
return PGConfig(options, properties, storage_type)
class OracleConfig(LinuxDBMSConfig):
def __init__(self, options, properties, storage_type):
super(OracleConfig, self).__init__(options, properties, storage_type)
#Init the database configuration data here, if any
self.dbms = "oracle"
self.dbms_full_name = "Oracle"
self.driver_class_name = "oracle.jdbc.driver.OracleDriver"
self.driver_file_name = "ojdbc6.jar"
self.driver_symlink_name = "oracle-jdbc-driver.jar"
self.database_storage_name = "Service"
if (hasattr(options, 'sid_or_sname') and options.sid_or_sname == "sname") or \
(hasattr(options, 'jdbc_url') and options.jdbc_url and re.match(ORACLE_SNAME_PATTERN, options.jdbc_url)):
print_info_msg("using SERVICE_NAME instead of SID for Oracle")
self.sid_or_sname = "sname"
self.database_port = DBMSConfig._init_member_with_prop_default(options, "database_port",
properties, JDBC_PORT_PROPERTY, "1521")
self.database_url_pattern = "jdbc:oracle:thin:@{0}:{1}/{2}"
self.database_url_pattern_alt = "jdbc:oracle:thin:@{0}:{1}:{2}"
self.JDBC_DRIVER_INSTALL_MSG = 'Before starting Ambari Server, ' \
'you must copy the {0} JDBC driver JAR file to {1} and set property "server.jdbc.driver.path=[path/to/custom_jdbc_driver]" in ambari.properties.'.format(
self.dbms_full_name, configDefaults.JAVA_SHARE_PATH)
self.init_script_file = AmbariPath.get("/var/lib/ambari-server/resources/Ambari-DDL-Oracle-CREATE.sql'")
self.drop_tables_script_file = AmbariPath.get("/var/lib/ambari-server/resources/Ambari-DDL-Oracle-DROP.sql")
self.client_tool_usage_pattern = 'sqlplus {1}/{2} < {0}'
self.jdbc_extra_params = [
["oracle.net.CONNECT_TIMEOUT", "2000"], # socket level timeout
["oracle.net.READ_TIMEOUT", "2000"], # socket level timeout
["oracle.jdbc.ReadTimeout", "8000"] # query fetch timeout
]
#
# Private implementation
#
def _reset_remote_database(self):
super(OracleConfig, self)._reset_remote_database()
raise NonFatalException("Please replace '*' symbols with password before running DDL`s!")
def _is_jdbc_driver_installed(self, properties):
return LinuxDBMSConfig._find_jdbc_driver("*ojdbc*.jar")
def _get_default_driver_path(self, properties):
drivers = LinuxDBMSConfig._find_jdbc_driver("*ojdbc*.jar")
if drivers == -1:
return os.path.join(configDefaults.JAVA_SHARE_PATH, self.driver_file_name)
else:
return os.pathsep.join(drivers)
def _configure_database_name(self):
if self.persistence_type != STORAGE_TYPE_LOCAL:
# Oracle uses service name or service id
idType = "1"
idType = get_validated_string_input(
"Select Oracle identifier type:\n1 - " + ORACLE_DB_ID_TYPES[0] +
"\n2 - " + ORACLE_DB_ID_TYPES[1] + "\n(" + idType + "): ",
idType,
"^[12]$",
"Invalid number.",
False
)
if idType == "1":
self.sid_or_sname = "sname"
elif idType == "2":
self.sid_or_sname = "sid"
IDTYPE_INDEX = int(idType) - 1
self.database_name = OracleConfig._get_validated_service_name(self.database_name,
IDTYPE_INDEX)
else:
self.database_name = LinuxDBMSConfig._get_validated_db_name(self.database_storage_name, self.database_name)
return True
def _get_remote_script_line(self, scriptFile):
# Detect the existing sqlplus flavor
try:
find_in_path("sqlplus64")
tool = "sqlplus64"
except:
tool = "sqlplus"
ORACLE_EXEC_ARGS = "{0} -S -L '{1}/{2}@(description=(address=(protocol=TCP)(host={3})(port={4}))(connect_data=({7}={5})))' @{6} {1}"
return ORACLE_EXEC_ARGS.format(
tool,
self.database_username,
self.database_password,
self.database_host,
self.database_port,
self.database_name,
scriptFile,
self.sid_or_sname
)
@staticmethod
def _get_validated_service_name(service_name, index):
return get_validated_string_input(
ORACLE_DB_ID_TYPES[index] + " (" + service_name + "): ",
service_name,
".*",
"Invalid " + ORACLE_DB_ID_TYPES[index] + ".",
False
)
def createOracleConfig(options, properties, storage_type, dbId):
return OracleConfig(options, properties, storage_type)
class MySQLConfig(LinuxDBMSConfig):
def __init__(self, options, properties, storage_type):
super(MySQLConfig, self).__init__(options, properties, storage_type)
#Init the database configuration data here, if any
self.dbms = "mysql"
self.dbms_full_name = "MySQL"
self.driver_class_name = "com.mysql.jdbc.Driver"
self.driver_file_name = "mysql-connector-java.jar"
self.driver_symlink_name = "mysql-jdbc-driver.jar"
self.database_storage_name = "Database"
self.database_port = DBMSConfig._init_member_with_prop_default(options, "database_port",
properties, JDBC_PORT_PROPERTY, "3306")
self.database_url_pattern = "jdbc:mysql://{0}:{1}/{2}"
self.database_url_pattern_alt = "jdbc:mysql://{0}:{1}/{2}"
self.JDBC_DRIVER_INSTALL_MSG = 'Before starting Ambari Server, ' \
'you must copy the {0} JDBC driver JAR file to {1} and set property "server.jdbc.driver.path=[path/to/custom_jdbc_driver]" in ambari.properties.'.format(
self.dbms_full_name, configDefaults.JAVA_SHARE_PATH)
self.init_script_file = AmbariPath.get("/var/lib/ambari-server/resources/Ambari-DDL-MySQL-CREATE.sql")
self.drop_tables_script_file = AmbariPath.get("/var/lib/ambari-server/resources/Ambari-DDL-MySQL-DROP.sql")
self.client_tool_usage_pattern = 'mysql --user={1} --password={2} {3}<{0}'
#
# Private implementation
#
def _reset_remote_database(self):
super(MySQLConfig, self)._reset_remote_database()
raise NonFatalException("Please replace '*' symbols with password before running DDL`s!")
def _is_jdbc_driver_installed(self, properties):
return LinuxDBMSConfig._find_jdbc_driver("*mysql*.jar")
def _configure_database_name(self):
self.database_name = LinuxDBMSConfig._get_validated_db_name(self.database_storage_name, self.database_name)
return True
def _get_remote_script_line(self, scriptFile):
MYSQL_INIT_SCRIPT = AmbariPath.get('/var/lib/ambari-server/resources/Ambari-DDL-MySQL-CREATE.sql')
MYSQL_EXEC_ARGS_WITH_USER_VARS = "mysql --host={0} --port={1} --user={2} --password={3} {4} " \
"-e\"set @schema=\'{4}\'; set @username=\'{2}\'; source {5};\""
MYSQL_EXEC_ARGS_WO_USER_VARS = "mysql --force --host={0} --port={1} --user={2} --password={3} --database={4} < {5} 2> /dev/null"
MYSQL_EXEC_ARGS = MYSQL_EXEC_ARGS_WO_USER_VARS if MYSQL_INIT_SCRIPT == scriptFile else MYSQL_EXEC_ARGS_WITH_USER_VARS
return MYSQL_EXEC_ARGS.format(
self.database_host,
self.database_port,
self.database_username,
self.database_password,
self.database_name,
scriptFile
)
def _store_remote_properties(self, properties, options):
"""
Override the remote properties written for MySQL, inheriting those from the parent first.
:param properties: the properties object to set MySQL specific properties on
:return:
"""
super(MySQLConfig, self)._store_remote_properties(properties, options)
# connection pooling (c3p0 used by MySQL by default)
properties.process_pair(JDBC_CONNECTION_POOL_TYPE, "c3p0")
properties.process_pair(JDBC_CONNECTION_POOL_ACQUISITION_SIZE, "5")
properties.process_pair(JDBC_CONNECTION_POOL_IDLE_TEST_INTERVAL, "7200")
properties.process_pair(JDBC_CONNECTION_POOL_MAX_IDLE_TIME, "14400")
properties.process_pair(JDBC_CONNECTION_POOL_MAX_IDLE_TIME_EXCESS, "0")
properties.process_pair(JDBC_CONNECTION_POOL_MAX_AGE, "0")
def createMySQLConfig(options, properties, storage_type, dbId):
return MySQLConfig(options, properties, storage_type)
class MSSQLConfig(LinuxDBMSConfig):
def __init__(self, options, properties, storage_type):
super(MSSQLConfig, self).__init__(options, properties, storage_type)
#Init the database configuration data here, if any
self.dbms = "mssql"
self.dbms_full_name = "Microsoft SQL Server"
self.driver_class_name = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
self.driver_file_name = "sqljdbc4.jar"
self.driver_symlink_name = "mssql-jdbc-driver.jar"
self.database_storage_name = "Database"
self.database_port = DBMSConfig._init_member_with_prop_default(options, "database_port",
properties, JDBC_PORT_PROPERTY, "1433")
self.database_url_pattern = "jdbc:sqlserver://{0}:{1};databaseName={2}"
self.database_url_pattern_alt = "jdbc:sqlserver://{0}:{1};databaseName={2}"
self.JDBC_DRIVER_INSTALL_MSG = 'Before starting Ambari Server, ' \
'you must copy the {0} JDBC driver JAR file to {1} and set property "server.jdbc.driver.path=[path/to/custom_jdbc_driver]" in ambari.properties.'.format(
self.dbms_full_name, configDefaults.JAVA_SHARE_PATH)
self.init_script_file = AmbariPath.get("/var/lib/ambari-server/resources/Ambari-DDL-SQLServer-CREATE.sql")
self.drop_tables_script_file = AmbariPath.get("/var/lib/ambari-server/resources/Ambari-DDL-SQLServer-DROP.sql")
self.client_tool_usage_pattern = ''
#
# Private implementation
#
def _reset_remote_database(self):
super(MSSQLConfig, self)._reset_remote_database()
raise NonFatalException("Please replace '*' symbols with password before running DDL`s!")
def _is_jdbc_driver_installed(self, properties):
return LinuxDBMSConfig._find_jdbc_driver("*sqljdbc*.jar")
def _get_jdbc_driver_path(self, properties):
super(MSSQLConfig, self)._get_jdbc_driver_path(properties)
def _configure_database_name(self):
self.database_name = LinuxDBMSConfig._get_validated_db_name(self.database_storage_name, self.database_name)
return True
def _get_remote_script_line(self, scriptFile):
return scriptFile
def createMSSQLConfig(options, properties, storage_type, dbId):
return MSSQLConfig(options, properties, storage_type)
class SQLAConfig(LinuxDBMSConfig):
EXTRACT_CMD="tar xzf {0} -C {1}"
def __init__(self, options, properties, storage_type):
super(SQLAConfig, self).__init__(options, properties, storage_type)
#Init the database configuration data here, if any
self.dbms = "sqlanywhere"
self.dbms_full_name = "SQL Anywhere"
self.driver_class_name = "sap.jdbc4.sqlanywhere.IDriver" #TODO sybase.* for v < 17, check requirements
self.driver_file_name = "sajdbc4.jar"
self.server_name = DBMSConfig._init_member_with_prop_default(options, "sqla_server_name", properties,
JDBC_SQLA_SERVER_NAME, "ambari")
self.driver_symlink_name = "sqlanywhere-jdbc-driver.jar"
self.client_tarball_pattern = "*sqla-client-jdbc*.tar.gz"
self.client_folder = "sqla-client-jdbc"
self.database_storage_name = "Database"
self.database_port = DBMSConfig._init_member_with_prop_default(options, "database_port",
properties, JDBC_PORT_PROPERTY, "2638")
self.database_url_pattern = "jdbc:sqlanywhere:eng={0};dbf={1};host={2};port={3}"
self.database_url_pattern_alt = "jdbc:sqlanywhere:eng={0};dbf={1};host={2};port={3}"
self.JDBC_DRIVER_INSTALL_MSG = 'Before starting Ambari Server, ' \
'you must copy the {0} jdbc client tarball to {1} and set property "server.jdbc.driver.path=[path/to/custom_jdbc_driver]" in ambari.properties.'.format(
self.dbms_full_name, configDefaults.SHARE_PATH)
self.init_script_file = AmbariPath.get("/var/lib/ambari-server/resources/Ambari-DDL-SQLAnywhere-CREATE.sql")
self.drop_tables_script_file = AmbariPath.get("/var/lib/ambari-server/resources/Ambari-DDL-SQLAnywhere-DROP.sql")
self.client_tool_usage_pattern = 'stub string'
#
# Private implementation
#
def _get_jdbc_connection_string(self):
jdbc_hostname = self._get_database_hostname()
connectionStringFormat = self.database_url_pattern
return connectionStringFormat.format(self.server_name, self.database_name, jdbc_hostname, self.database_port)
def _reset_remote_database(self):
super(SQLAConfig, self)._reset_remote_database()
raise NonFatalException("Please replace '*' symbols with password before running DDL`s!")
def _is_jdbc_driver_installed(self, properties):
drivers = []
drivers.extend(glob.glob(configDefaults.SHARE_PATH + os.sep + self.client_tarball_pattern))
if drivers:
return drivers
return -1
def _install_jdbc_driver(self, properties, files_list):
return True
def _configure_database_name(self):
self.server_name = get_validated_string_input("Server name (" + str(self.server_name) + "): ",
self.server_name, ".*",
"Invalid server name",
False)
self.database_name = LinuxDBMSConfig._get_validated_db_name(self.database_storage_name, self.database_name)
return True
def _get_remote_script_line(self, scriptFile):
return "stub script line" #TODO not used anymore, investigate if it can be removed
def _store_remote_properties(self, properties, options):
"""
Override the remote properties written for MySQL, inheriting those from the parent first.
:param properties: the properties object to set MySQL specific properties on
:return:
"""
super(SQLAConfig, self)._store_remote_properties(properties, options)
properties.process_pair(JDBC_SQLA_SERVER_NAME, self.server_name)
def _extract_client_tarball(self, properties):
files = []
files.extend(glob.glob(configDefaults.SHARE_PATH + os.sep + self.client_tarball_pattern))
if len(files) > 1:
raise FatalException(-1, "More than One SQl Anywhere client tarball detected")
elif len(files) == 0:
raise FatalException(-1, self.JDBC_DRIVER_INSTALL_MSG)
cmd = SQLAConfig.EXTRACT_CMD.format(files[0], get_resources_location(properties))
process = subprocess.Popen(cmd.split(' '),
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = process.communicate()
retcode = process.returncode
if retcode != 0:
raise FatalException(-1, "Error extracting SQL Anywhere client tarball: " + str(err))
def _get_native_libs(self, properties):
return os.path.join(get_resources_location(properties), self.client_folder, "native", "lib64")
def _get_default_driver_path(self, properties):
return os.path.join(get_resources_location(properties), self.client_folder, "java", self.driver_file_name)
def createSQLAConfig(options, properties, storage_type, dbId):
return SQLAConfig(options, properties, storage_type)
class BDBConfig(LinuxDBMSConfig):
def __init__(self, options, properties, storage_type):
super(BDBConfig, self).__init__(options, properties, storage_type)
#Init the database configuration data here, if any
self.dbms = "bdb"
self.dbms_full_name = "Berkeley DB Jar file"
self.driver_class_name = "com.berkeleydb.Driver"
self.driver_file_name = "je-5.0.73.jar"
self.driver_symlink_name = "bdb-jdbc-driver.jar"
self.database_storage_name = "Database"
self.client_tool_usage_pattern = ''
#
# Private implementation
#
def _is_jdbc_driver_installed(self, properties):
return LinuxDBMSConfig._find_jdbc_driver("*je-*.jar")
def _get_jdbc_driver_path(self, properties):
super(BDBConfig, self)._get_jdbc_driver_path(properties)
def _configure_database_name(self):
self.database_name = LinuxDBMSConfig._get_validated_db_name(self.database_storage_name, self.database_name)
return True
def createBDBConfig(options, properties, storage_type, dbId):
return BDBConfig(options, properties, storage_type)
|
[] |
[] |
[
"PGPASSWORD"
] |
[]
|
["PGPASSWORD"]
|
python
| 1 | 0 | |
LUCE-API/luce_vm/luce_django/django_binary_db_field/lucehome/settings.py
|
"""
Django settings for lucehome project.
Generated by 'django-admin startproject' using Django 2.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# '/vagrant/luce_django/luce'
# Folder from which manage.py runserver is called
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')%8jl5ba3h8jxwab#*x2v1l$c=f05^ac-btpt6*=htjwib()w4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'datastore',
'accounts',
'search',
'django_extensions',
]
AUTH_USER_MODEL = 'accounts.User' # changes the built-in user model
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# Re-direct to home view after logout
LOGOUT_REDIRECT_URL = '/'
ROOT_URLCONF = 'lucehome.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR + '/lucehome/templates/'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'lucehome.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
# Use sqlite by default (only one VM required, less resources)
# Can switch to Postgresql via environment variable (see code at the very bottom)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Postgresql Database:
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql',
# 'NAME': 'lucedb',
# 'USER': 'vagrant',
# 'PASSWORD': 'luce',
# 'HOST': '192.168.72.3',
# 'PORT': '5432',
# }
# }
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# ==== SETUP STATIC FILE DIRECTORIES ====
# Simulate a CDN locally:
# This path is outside django project, usually a CDN like AWS S3
LOCAL_STATIC_CDN_PATH = os.path.join(os.path.dirname(BASE_DIR), 'luce_static_files/static_cdn_local')
# Static files (CSS, JavaScript, Images)
STATIC_ROOT = os.path.join(LOCAL_STATIC_CDN_PATH, 'static')
STATIC_URL = '/static/'
# This is where files are uploaded to
MEDIA_ROOT = os.path.join(LOCAL_STATIC_CDN_PATH, 'media')
MEDIA_URL = '/media/'
# These files live inside django project
# Local file changes take place here, then at some point they are uploaded to CDN
STATICFILES_DIRS = [BASE_DIR + '/lucehome/static_files/']
# ==== SWITCH TO PSQL ====
# export DJANGO_USE_PSQL=true
# Override variables in this settings file if DJANGO_USE_PSQL env variable is set
if os.environ.get('DJANGO_USE_PSQL') is not None:
from lucehome.settings_psql import *
|
[] |
[] |
[
"DJANGO_USE_PSQL"
] |
[]
|
["DJANGO_USE_PSQL"]
|
python
| 1 | 0 | |
train/train.py
|
import argparse
import json
import os
import pickle
import sys
import sagemaker_containers
import pandas as pd
import torch
import torch.optim as optim
import torch.utils.data
from model import LSTMClassifier
def model_fn(model_dir):
print("Loading model...")
# First, load the parameters used to create the model.
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
# Determine the device and construct the model.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])
# Load the stored model parameters.
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
# Load the saved word_dict.
word_dict_path = os.path.join(model_dir, 'word_dict.pkl')
with open(word_dict_path, 'rb') as f:
model.word_dict = pickle.load(f)
model.to(device).eval()
print("Done loading model.")
return model
def _get_train_data_loader(batch_size, training_dir):
print("Get train data loader.")
train_data = pd.read_csv(os.path.join(training_dir, "train.csv"), header=None, names=None)
train_y = torch.from_numpy(train_data[[0]].values).float().squeeze()
train_X = torch.from_numpy(train_data.drop([0], axis=1).values).long()
train_ds = torch.utils.data.TensorDataset(train_X, train_y)
return torch.utils.data.DataLoader(train_ds, batch_size=batch_size)
def train(model, train_loader, epochs, optimizer, loss_fn, device):
for epoch in range(1, epochs +1):
model.train()
total_losses = 0
for batch in train_loader:
batch_x, batch_y = batch
batch_x = batch_x.to(device)
batch_y = batch_y.to(device)
# set the gradient
optimizer.zero_grad()
output = model(batch_x)
loss = loss_fn(output, batch_y)
loss.backward() # update losses
optimizer.step() # update parameters in backpropagation
total_losses += loss.data.item() # update total loss
print("Epoch : {} --> , BCELoss: {}".format(epoch, total_loss/ len(train_loader)))
if __name__ == '__main__':
# All of the model parameters and training parameters are sent as arguments when the script
# is executed. Here we set up an argument parser to easily access the parameters.
parser = argparse.ArgumentParser()
# Training Parameters
parser.add_argument('--batch-size', type=int, default=512, metavar='N',
help='input batch size for training (default: 512)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# Model Parameters
parser.add_argument('--embedding_dim', type=int, default=32, metavar='N',
help='size of the word embeddings (default: 32)')
parser.add_argument('--hidden_dim', type=int, default=100, metavar='N',
help='size of the hidden dimension (default: 100)')
parser.add_argument('--vocab_size', type=int, default=5000, metavar='N',
help='size of the vocabulary (default: 5000)')
# SageMaker Parameters
parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))
parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--data-dir', type=str, default=os.environ['SM_CHANNEL_TRAINING'])
parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS'])
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Using device {}.".format(device))
torch.manual_seed(args.seed)
# Load the training data.
train_loader = _get_train_data_loader(args.batch_size, args.data_dir)
# Build the model.
model = LSTMClassifier(args.embedding_dim, args.hidden_dim, args.vocab_size).to(device)
with open(os.path.join(args.data_dir, "word_dict.pkl"), "rb") as f:
model.word_dict = pickle.load(f)
print("Model loaded with embedding_dim {}, hidden_dim {}, vocab_size {}.".format(
args.embedding_dim, args.hidden_dim, args.vocab_size
))
# Train the model.
optimizer = optim.Adam(model.parameters())
loss_fn = torch.nn.BCELoss()
train(model, train_loader, args.epochs, optimizer, loss_fn, device)
# Save the parameters used to construct the model
model_info_path = os.path.join(args.model_dir, 'model_info.pth')
with open(model_info_path, 'wb') as f:
model_info = {
'embedding_dim': args.embedding_dim,
'hidden_dim': args.hidden_dim,
'vocab_size': args.vocab_size,
}
torch.save(model_info, f)
# Save the word_dict
word_dict_path = os.path.join(args.model_dir, 'word_dict.pkl')
with open(word_dict_path, 'wb') as f:
pickle.dump(model.word_dict, f)
# Save the model parameters
model_path = os.path.join(args.model_dir, 'model.pth')
with open(model_path, 'wb') as f:
torch.save(model.cpu().state_dict(), f)
|
[] |
[] |
[
"SM_MODEL_DIR",
"SM_NUM_GPUS",
"SM_CURRENT_HOST",
"SM_CHANNEL_TRAINING",
"SM_HOSTS"
] |
[]
|
["SM_MODEL_DIR", "SM_NUM_GPUS", "SM_CURRENT_HOST", "SM_CHANNEL_TRAINING", "SM_HOSTS"]
|
python
| 5 | 0 | |
config.py
|
import os
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
SQLALCHEMY_TRACK_MODIFICATIONS = False
class ProductionConfig(Config):
DEBUG = False
class DevelopmentConfig(Config):
DEVELOPMENT = True
# FLASK_DEBUG = os.environ['FLASK_DEBUG']
|
[] |
[] |
[
"SECRET_KEY",
"DATABASE_URL",
"FLASK_DEBUG"
] |
[]
|
["SECRET_KEY", "DATABASE_URL", "FLASK_DEBUG"]
|
python
| 3 | 0 | |
src/compas_rhino/install.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import itertools
import os
import sys
import compas_rhino
import compas._os
import compas.plugins
__all__ = ['install']
def install(version=None, packages=None):
"""Install COMPAS for Rhino.
Parameters
----------
version : {'5.0', '6.0', '7.0'}, optional
The version number of Rhino.
Default is ``'6.0'``.
packages : list of str, optional
List of packages to install or None to use default package list.
Default is ``['compas', 'compas_rhino', 'compas_ghpython']``.
Examples
--------
.. code-block:: python
import compas_rhino
compas_rhino.install('6.0')
.. code-block:: bash
python -m compas_rhino.install -v 6.0
"""
if version not in ('5.0', '6.0', '7.0'):
version = '6.0'
packages = _filter_installable_packages(version, packages)
ipylib_path = compas_rhino._get_ironpython_lib_path(version)
scripts_path = compas_rhino._get_scripts_path(version)
print('Installing COMPAS packages to Rhino {0} scripts folder:'.format(version))
print('Location scripts folder: {}'.format(scripts_path))
print()
results = []
symlinks_to_install = []
symlinks_to_uninstall = []
exit_code = 0
for package in packages:
package_path = compas_rhino._get_package_path(importlib.import_module(package))
symlink_path = os.path.join(scripts_path, package)
symlinks_to_install.append(dict(name=package, source_path=package_path, link=symlink_path))
symlinks_to_uninstall.append(dict(name=package, link=symlink_path))
# Handle legacy install location
legacy_path = os.path.join(ipylib_path, package)
if os.path.exists(legacy_path):
symlinks_to_uninstall.append(dict(name=package, link=legacy_path))
# First uninstall existing copies of packages requested for installation
symlinks = [link['link'] for link in symlinks_to_uninstall]
uninstall_results = compas._os.remove_symlinks(symlinks)
for uninstall_data, success in zip(symlinks_to_uninstall, uninstall_results):
if not success:
results.append((uninstall_data['name'], 'ERROR: Cannot remove symlink, try to run as administrator.'))
# Handle legacy bootstrapper
if not compas_rhino._try_remove_bootstrapper(ipylib_path):
results.append(('compas_bootstrapper', 'ERROR: Cannot remove legacy compas_bootstrapper, try to run as administrator.'))
# Ready to start installing
symlinks = [(link['source_path'], link['link']) for link in symlinks_to_install]
install_results = compas._os.create_symlinks(symlinks)
for install_data, success in zip(symlinks_to_install, install_results):
result = 'OK' if success else 'ERROR: Cannot create symlink, try to run as administrator.'
results.append((install_data['name'], result))
if not all(install_results):
exit_code = -1
if exit_code == -1:
results.append(('compas_bootstrapper', 'WARNING: One or more packages failed, will not install bootstrapper, try uninstalling first'))
else:
try:
_update_bootstrapper(scripts_path, packages)
results.append(('compas_bootstrapper', 'OK'))
except: # noqa: E722
results.append(('compas_bootstrapper', 'ERROR: Could not create compas_bootstrapper to auto-determine Python environment'))
for package, status in results:
print(' {} {}'.format(package.ljust(20), status))
if status != 'OK':
exit_code = -1
print('\nCompleted.')
if exit_code != 0:
sys.exit(exit_code)
@compas.plugins.plugin(category='install', pluggable_name='installable_rhino_packages', tryfirst=True)
def default_installable_rhino_packages():
# While this list could obviously be hard-coded, I think
# eating our own dogfood and using plugins to define this, just like
# any other extension/plugin would be is a better way to ensure consistent behavior.
return ['compas', 'compas_rhino']
@compas.plugins.pluggable(category='install', selector='collect_all')
def installable_rhino_packages():
"""Provide a list of packages to make available inside Rhino.
Extensions providing Rhino or Grasshopper features
can implement this pluggable interface to automatically
have their packages made available inside Rhino when
COMPAS is installed into it.
Examples
--------
>>> import compas.plugins
>>> @compas.plugins.plugin(category='install')
... def installable_rhino_packages():
... return ['compas_fab']
Returns
-------
:obj:`list` of :obj:`str`
List of package names to make available inside Rhino.
"""
pass
def _update_bootstrapper(install_path, packages):
# Take either the CONDA environment directory or the current Python executable's directory
python_directory = os.environ.get('CONDA_PREFIX', None) or os.path.dirname(sys.executable)
environment_name = os.environ.get('CONDA_DEFAULT_ENV', '')
conda_exe = os.environ.get('CONDA_EXE', '')
compas_bootstrapper = compas_rhino._get_bootstrapper_path(install_path)
bootstrapper_data = compas_rhino._get_bootstrapper_data(compas_bootstrapper)
installed_packages = bootstrapper_data.get('INSTALLED_PACKAGES', [])
installed_packages = list(set(installed_packages + list(packages)))
with open(compas_bootstrapper, 'w') as f:
f.write('ENVIRONMENT_NAME = r"{}"\n'.format(environment_name))
f.write('PYTHON_DIRECTORY = r"{}"\n'.format(python_directory))
f.write('CONDA_EXE = r"{}"\n'.format(conda_exe))
f.write('INSTALLED_PACKAGES = {}'.format(repr(installed_packages)))
def _filter_installable_packages(version, packages):
ghpython_incompatible = False
if compas._os.system == 'darwin' and version == 5.0:
ghpython_incompatible = True
if not packages:
# Flatten list of results (resulting from collect_all pluggable)
packages = list(itertools.chain.from_iterable(installable_rhino_packages()))
elif 'compas_ghpython' in packages and ghpython_incompatible:
print('Skipping installation of compas_ghpython since it\'s not supported for Rhino 5 for Mac')
if ghpython_incompatible:
packages.remove('compas_ghpython')
return packages
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--version', choices=['5.0', '6.0', '7.0'], default='6.0', help="The version of Rhino to install the packages in.")
parser.add_argument('-p', '--packages', nargs='+', help="The packages to install.")
args = parser.parse_args()
install(version=args.version, packages=args.packages)
|
[] |
[] |
[
"CONDA_EXE",
"CONDA_DEFAULT_ENV",
"CONDA_PREFIX"
] |
[]
|
["CONDA_EXE", "CONDA_DEFAULT_ENV", "CONDA_PREFIX"]
|
python
| 3 | 0 | |
build_scripts/CompileCurl-Linux.py
|
import os
from subprocess import call
import sys
import re
import multiprocessing as mp
import string
import urllib
import shutil
version = "7.61.1"
def get_curl_filename(ver):
return "curl-" + ver + ".tar.gz"
def get_curl_link(ver):
link = "https://curl.haxx.se/download/" + get_curl_filename(ver)
# print(link)
return link
def download_file(filelink, target):
try:
testfile = urllib.URLopener()
try:
os.remove(target)
print("Found file " + target + ", which is now deleted.")
except:
pass
testfile.retrieve(filelink, target)
return True
except:
return False
def download_curl():
curl_version_found = False
filename_ = ""
for ver_suffix in list(reversed(string.ascii_lowercase))+[""]:
version_str = version + ver_suffix
if(download_file(get_curl_link(version_str), get_curl_filename(version_str))):
curl_version_found = True
filename_ = get_curl_filename(version_str)
print("Found latest Curl version to be " + version_str)
break
if curl_version_found == False:
print("Could not find the latest Curl version. Probably you're not connected to the internet.")
print("If you have already downloaded Curl, put the file name in the first argument of the script.")
return filename_
if len(sys.argv) < 2:
filename = download_curl()
else:
filename = sys.argv[1]
dirname = filename.replace(".tar.gz","")
try:
shutil.rmtree(dirname)
except:
pass
working_dir = os.getcwd()
call("tar -xf " + filename, shell=True) #extract the .tar.gz file
dirname_bin = dirname + "_build"
final_dirname = "curl_build"
try:
shutil.rmtree(dirname_bin)
except:
pass
try:
shutil.rmtree(final_dirname)
except:
pass
#Go back to base dir
os.chdir(working_dir)
################
os.chdir(dirname)
# prepend ccache to the path, necessary since prior steps prepend things to the path
os.environ['PATH'] = '/usr/lib/ccache:' + os.environ['PATH']
call("./configure --disable-shared --prefix=" + os.path.join(working_dir,dirname_bin) + " --with-ssl=" +os.path.join(working_dir,"openssl_build") + " --without-libidn2 --without-librtmp --disable-ldap --without-zlib",shell=True)
call(r"make -j" + str(mp.cpu_count()), shell=True)
call(r"make install", shell=True)
print("Compilation complete.")
#Go back to base dir
os.chdir(working_dir)
################
call(r"ln -s " + dirname_bin + " " + final_dirname,shell=True)
print("")
print("Curl compiled to \"" + os.path.join(working_dir,final_dirname) + "\" with a soft link to \"" + os.path.join(working_dir,dirname_bin) + "\"")
print("")
print("CurlL lib path: " + os.path.join(working_dir,final_dirname,"lib"))
print("Curl include path: " + os.path.join(working_dir,final_dirname,"include"))
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
pkg/adapter/runtime.go
|
// +build !remoteclient
package adapter
import (
"bufio"
"context"
"io"
"io/ioutil"
"os"
"text/template"
"github.com/containers/buildah"
"github.com/containers/buildah/imagebuildah"
"github.com/containers/buildah/pkg/formats"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/image/docker/reference"
"github.com/containers/image/types"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/events"
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage/pkg/archive"
"github.com/pkg/errors"
"k8s.io/api/core/v1"
)
// LocalRuntime describes a typical libpod runtime
type LocalRuntime struct {
*libpod.Runtime
Remote bool
}
// ContainerImage ...
type ContainerImage struct {
*image.Image
}
// Container ...
type Container struct {
*libpod.Container
}
// Pod encapsulates the libpod.Pod structure, helps with remote vs. local
type Pod struct {
*libpod.Pod
}
// Volume ...
type Volume struct {
*libpod.Volume
}
// VolumeFilter is for filtering volumes on the client
type VolumeFilter func(*Volume) bool
// GetRuntimeNoStore returns a localruntime struct wit an embedded runtime but
// without a configured storage.
func GetRuntimeNoStore(ctx context.Context, c *cliconfig.PodmanCommand) (*LocalRuntime, error) {
runtime, err := libpodruntime.GetRuntimeNoStore(ctx, c)
if err != nil {
return nil, err
}
return getRuntime(runtime)
}
// GetRuntime returns a LocalRuntime struct with the actual runtime embedded in it
func GetRuntime(ctx context.Context, c *cliconfig.PodmanCommand) (*LocalRuntime, error) {
runtime, err := libpodruntime.GetRuntime(ctx, c)
if err != nil {
return nil, err
}
return getRuntime(runtime)
}
func getRuntime(runtime *libpod.Runtime) (*LocalRuntime, error) {
return &LocalRuntime{
Runtime: runtime,
}, nil
}
// GetImages returns a slice of images in containerimages
func (r *LocalRuntime) GetImages() ([]*ContainerImage, error) {
return r.getImages(false)
}
// GetRWImages returns a slice of read/write images in containerimages
func (r *LocalRuntime) GetRWImages() ([]*ContainerImage, error) {
return r.getImages(true)
}
func (r *LocalRuntime) getImages(rwOnly bool) ([]*ContainerImage, error) {
var containerImages []*ContainerImage
images, err := r.Runtime.ImageRuntime().GetImages()
if err != nil {
return nil, err
}
for _, i := range images {
if rwOnly && i.IsReadOnly() {
continue
}
containerImages = append(containerImages, &ContainerImage{i})
}
return containerImages, nil
}
// NewImageFromLocal returns a containerimage representation of a image from local storage
func (r *LocalRuntime) NewImageFromLocal(name string) (*ContainerImage, error) {
img, err := r.Runtime.ImageRuntime().NewFromLocal(name)
if err != nil {
return nil, err
}
return &ContainerImage{img}, nil
}
// LoadFromArchiveReference calls into local storage to load an image from an archive
func (r *LocalRuntime) LoadFromArchiveReference(ctx context.Context, srcRef types.ImageReference, signaturePolicyPath string, writer io.Writer) ([]*ContainerImage, error) {
var containerImages []*ContainerImage
imgs, err := r.Runtime.ImageRuntime().LoadFromArchiveReference(ctx, srcRef, signaturePolicyPath, writer)
if err != nil {
return nil, err
}
for _, i := range imgs {
ci := ContainerImage{i}
containerImages = append(containerImages, &ci)
}
return containerImages, nil
}
// New calls into local storage to look for an image in local storage or to pull it
func (r *LocalRuntime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *image.DockerRegistryOptions, signingoptions image.SigningOptions, forcePull bool, label *string) (*ContainerImage, error) {
img, err := r.Runtime.ImageRuntime().New(ctx, name, signaturePolicyPath, authfile, writer, dockeroptions, signingoptions, forcePull, label)
if err != nil {
return nil, err
}
return &ContainerImage{img}, nil
}
// RemoveImage calls into local storage and removes an image
func (r *LocalRuntime) RemoveImage(ctx context.Context, img *ContainerImage, force bool) (string, error) {
return r.Runtime.RemoveImage(ctx, img.Image, force)
}
// PruneImages is wrapper into PruneImages within the image pkg
func (r *LocalRuntime) PruneImages(ctx context.Context, all bool) ([]string, error) {
return r.ImageRuntime().PruneImages(ctx, all)
}
// Export is a wrapper to container export to a tarfile
func (r *LocalRuntime) Export(name string, path string) error {
ctr, err := r.Runtime.LookupContainer(name)
if err != nil {
return errors.Wrapf(err, "error looking up container %q", name)
}
return ctr.Export(path)
}
// Import is a wrapper to import a container image
func (r *LocalRuntime) Import(ctx context.Context, source, reference string, changes []string, history string, quiet bool) (string, error) {
return r.Runtime.Import(ctx, source, reference, changes, history, quiet)
}
// CreateVolume is a wrapper to create volumes
func (r *LocalRuntime) CreateVolume(ctx context.Context, c *cliconfig.VolumeCreateValues, labels, opts map[string]string) (string, error) {
var (
options []libpod.VolumeCreateOption
volName string
)
if len(c.InputArgs) > 0 {
volName = c.InputArgs[0]
options = append(options, libpod.WithVolumeName(volName))
}
if c.Flag("driver").Changed {
options = append(options, libpod.WithVolumeDriver(c.Driver))
}
if len(labels) != 0 {
options = append(options, libpod.WithVolumeLabels(labels))
}
if len(options) != 0 {
options = append(options, libpod.WithVolumeOptions(opts))
}
newVolume, err := r.NewVolume(ctx, options...)
if err != nil {
return "", err
}
return newVolume.Name(), nil
}
// RemoveVolumes is a wrapper to remove volumes
func (r *LocalRuntime) RemoveVolumes(ctx context.Context, c *cliconfig.VolumeRmValues) ([]string, error) {
return r.Runtime.RemoveVolumes(ctx, c.InputArgs, c.All, c.Force)
}
// Push is a wrapper to push an image to a registry
func (r *LocalRuntime) Push(ctx context.Context, srcName, destination, manifestMIMEType, authfile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions image.SigningOptions, dockerRegistryOptions *image.DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error {
newImage, err := r.ImageRuntime().NewFromLocal(srcName)
if err != nil {
return err
}
return newImage.PushImageToHeuristicDestination(ctx, destination, manifestMIMEType, authfile, signaturePolicyPath, writer, forceCompress, signingOptions, dockerRegistryOptions, nil)
}
// InspectVolumes returns a slice of volumes based on an arg list or --all
func (r *LocalRuntime) InspectVolumes(ctx context.Context, c *cliconfig.VolumeInspectValues) ([]*Volume, error) {
var (
volumes []*libpod.Volume
err error
)
if c.All {
volumes, err = r.GetAllVolumes()
} else {
for _, v := range c.InputArgs {
vol, err := r.GetVolume(v)
if err != nil {
return nil, err
}
volumes = append(volumes, vol)
}
}
if err != nil {
return nil, err
}
return libpodVolumeToVolume(volumes), nil
}
// Volumes returns a slice of localruntime volumes
func (r *LocalRuntime) Volumes(ctx context.Context) ([]*Volume, error) {
vols, err := r.GetAllVolumes()
if err != nil {
return nil, err
}
return libpodVolumeToVolume(vols), nil
}
// libpodVolumeToVolume converts a slice of libpod volumes to a slice
// of localruntime volumes (same as libpod)
func libpodVolumeToVolume(volumes []*libpod.Volume) []*Volume {
var vols []*Volume
for _, v := range volumes {
newVol := Volume{
v,
}
vols = append(vols, &newVol)
}
return vols
}
// Build is the wrapper to build images
func (r *LocalRuntime) Build(ctx context.Context, c *cliconfig.BuildValues, options imagebuildah.BuildOptions, dockerfiles []string) error {
namespaceOptions, networkPolicy, err := parse.NamespaceOptions(c.PodmanCommand.Command)
if err != nil {
return errors.Wrapf(err, "error parsing namespace-related options")
}
usernsOption, idmappingOptions, err := parse.IDMappingOptions(c.PodmanCommand.Command, options.Isolation)
if err != nil {
return errors.Wrapf(err, "error parsing ID mapping options")
}
namespaceOptions.AddOrReplace(usernsOption...)
systemContext, err := parse.SystemContextFromOptions(c.PodmanCommand.Command)
if err != nil {
return errors.Wrapf(err, "error building system context")
}
authfile := c.Authfile
if len(c.Authfile) == 0 {
authfile = os.Getenv("REGISTRY_AUTH_FILE")
}
systemContext.AuthFilePath = authfile
commonOpts, err := parse.CommonBuildOptions(c.PodmanCommand.Command)
if err != nil {
return err
}
options.NamespaceOptions = namespaceOptions
options.ConfigureNetwork = networkPolicy
options.IDMappingOptions = idmappingOptions
options.CommonBuildOpts = commonOpts
options.SystemContext = systemContext
if c.Flag("runtime").Changed {
options.Runtime = r.GetOCIRuntimePath()
}
if c.Quiet {
options.ReportWriter = ioutil.Discard
}
if rootless.IsRootless() {
options.Isolation = buildah.IsolationOCIRootless
}
return r.Runtime.Build(ctx, options, dockerfiles...)
}
// PruneVolumes is a wrapper function for libpod PruneVolumes
func (r *LocalRuntime) PruneVolumes(ctx context.Context) ([]string, []error) {
return r.Runtime.PruneVolumes(ctx)
}
// SaveImage is a wrapper function for saving an image to the local filesystem
func (r *LocalRuntime) SaveImage(ctx context.Context, c *cliconfig.SaveValues) error {
source := c.InputArgs[0]
additionalTags := c.InputArgs[1:]
newImage, err := r.Runtime.ImageRuntime().NewFromLocal(source)
if err != nil {
return err
}
return newImage.Save(ctx, source, c.Format, c.Output, additionalTags, c.Quiet, c.Compress)
}
// LoadImage is a wrapper function for libpod PruneVolumes
func (r *LocalRuntime) LoadImage(ctx context.Context, name string, cli *cliconfig.LoadValues) (string, error) {
var (
writer io.Writer
)
if !cli.Quiet {
writer = os.Stderr
}
return r.Runtime.LoadImage(ctx, name, cli.Input, writer, cli.SignaturePolicy)
}
// IsImageNotFound checks if the error indicates that no image was found.
func IsImageNotFound(err error) bool {
return errors.Cause(err) == image.ErrNoSuchImage
}
// HealthCheck is a wrapper to same named function in libpod
func (r *LocalRuntime) HealthCheck(c *cliconfig.HealthCheckValues) (string, error) {
output := "unhealthy"
status, err := r.Runtime.HealthCheck(c.InputArgs[0])
if status == libpod.HealthCheckSuccess {
output = "healthy"
}
return output, err
}
// Events is a wrapper to libpod to obtain libpod/podman events
func (r *LocalRuntime) Events(c *cliconfig.EventValues) error {
var (
fromStart bool
eventsError error
)
var tmpl *template.Template
if c.Format != formats.JSONString {
template, err := template.New("events").Parse(c.Format)
if err != nil {
return err
}
tmpl = template
}
if len(c.Since) > 0 || len(c.Until) > 0 {
fromStart = true
}
eventChannel := make(chan *events.Event)
go func() {
readOpts := events.ReadOptions{FromStart: fromStart, Stream: c.Stream, Filters: c.Filter, EventChannel: eventChannel, Since: c.Since, Until: c.Until}
eventsError = r.Runtime.Events(readOpts)
}()
if eventsError != nil {
return eventsError
}
w := bufio.NewWriter(os.Stdout)
for event := range eventChannel {
if c.Format == formats.JSONString {
jsonStr, err := event.ToJSONString()
if err != nil {
return errors.Wrapf(err, "unable to format json")
}
if _, err := w.Write([]byte(jsonStr)); err != nil {
return err
}
} else if len(c.Format) > 0 {
if err := tmpl.Execute(w, event); err != nil {
return err
}
} else {
if _, err := w.Write([]byte(event.ToHumanReadable())); err != nil {
return err
}
}
if _, err := w.Write([]byte("\n")); err != nil {
return err
}
if err := w.Flush(); err != nil {
return err
}
}
return nil
}
// Diff shows the difference in two objects
func (r *LocalRuntime) Diff(c *cliconfig.DiffValues, to string) ([]archive.Change, error) {
return r.Runtime.GetDiff("", to)
}
// GenerateKube creates kubernetes email from containers and pods
func (r *LocalRuntime) GenerateKube(c *cliconfig.GenerateKubeValues) (*v1.Pod, *v1.Service, error) {
return shared.GenerateKube(c.InputArgs[0], c.Service, r.Runtime)
}
// GetPodsByStatus returns a slice of pods filtered by a libpod status
func (r *LocalRuntime) GetPodsByStatus(statuses []string) ([]*libpod.Pod, error) {
filterFunc := func(p *libpod.Pod) bool {
state, _ := shared.GetPodStatus(p)
for _, status := range statuses {
if state == status {
return true
}
}
return false
}
pods, err := r.Runtime.Pods(filterFunc)
if err != nil {
return nil, err
}
return pods, nil
}
// GetVersion is an alias to satisfy interface{}
func (r *LocalRuntime) GetVersion() (define.Version, error) {
return define.GetVersion()
}
// RemoteEndpoint resolve interface requirement
func (r *LocalRuntime) RemoteEndpoint() (*Endpoint, error) {
return nil, errors.New("RemoteEndpoint() not implemented for local connection")
}
|
[
"\"REGISTRY_AUTH_FILE\""
] |
[] |
[
"REGISTRY_AUTH_FILE"
] |
[]
|
["REGISTRY_AUTH_FILE"]
|
go
| 1 | 0 | |
jvmgo/ch06/classpath/classpath.go
|
package classpath
import (
"path/filepath"
"os"
)
type Classpath struct {
bootClasspath Entry
extClasspath Entry
userClasspath Entry
}
func Parse(jreOption, cpOption string) *Classpath {
cp := &Classpath{}
cp.parseBootAndExtClasspath(jreOption)
cp.parseUserClasspath(cpOption)
return cp
}
func (self *Classpath) ReadClass(className string) ([]byte, Entry, error) {
className = className + ".class"
if data, entry, err := self.bootClasspath.readClass(className); err == nil {
return data, entry, err
}
if data, entry, err := self.extClasspath.readClass(className); err == nil {
return data, entry, err
}
return self.userClasspath.readClass(className)
}
func (self *Classpath) String() string {
return self.userClasspath.String()
}
func (self *Classpath) parseBootAndExtClasspath(jreOption string) {
jreDir := getJreDir(jreOption)
//jre/lib/*
jreLibPath := filepath.Join(jreDir, "lib", "*")
self.bootClasspath = newWildcardEntry(jreLibPath)
//jre/lib/ext/*
jreExtPath := filepath.Join(jreDir, "lib", "ext", "*")
self.extClasspath = newWildcardEntry(jreExtPath)
}
func getJreDir(jreOption string) string {
if jreOption != "" && exists(jreOption) {
return jreOption
}
if exists("./jre") {
return "./jre"
}
if jh := os.Getenv("JAVA_HOME"); jh != "" {
return filepath.Join(jh, "jre")
}
panic("Can not find jre folder")
}
func exists(path string) bool {
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
func (self *Classpath) parseUserClasspath(cpOption string) {
if cpOption == "" {
cpOption = "."
}
self.userClasspath = newEntry(cpOption)
}
|
[
"\"JAVA_HOME\""
] |
[] |
[
"JAVA_HOME"
] |
[]
|
["JAVA_HOME"]
|
go
| 1 | 0 | |
src/main/java/io/kubesure/publish/App.java
|
/*
* This Java source file was generated by the Gradle 'init' task.
*/
package io.kubesure.publish;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStream;
import java.util.Properties;
import java.util.logging.Logger;
import io.grpc.Server;
import io.grpc.ServerBuilder;
import io.grpc.protobuf.services.ProtoReflectionService;
import io.grpc.services.HealthStatusManager;
import io.kubesure.publish.PublisherGrpc.PublisherImplBase;
import io.kubesure.publish.PublisherProtos.Ack;
import io.kubesure.publish.PublisherProtos.Ack.Builder;
import io.kubesure.publish.PublisherProtos.Message;
/**
* App service is abstracts client from broker implemenation and provides a
* protobuff/GRPC interface for publishing messages to the abstracted broker.
* This implementation publisher message to Kafka.
*/
public class App {
private static final Logger logger = Logger.getLogger(App.class.getName());
private Server server;
public static void main(String[] args) throws IOException, InterruptedException {
final App server = new App();
server.start();
server.blockUntilShutdown();
}
private void start() throws IOException {
int port = 50051;
// server = ServerBuilder.forPort(port).addService(new
// PublisherImpl()).build().start();
ServerBuilder sBuilder = ServerBuilder.forPort(port);
sBuilder.addService(new PublisherImpl());
sBuilder.addService(new HealthStatusManager().getHealthService());
sBuilder.addService(ProtoReflectionService.newInstance());
server = sBuilder.build();
server.start();
logger.info("Server started, listening on " + port);
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
logger.info("*** shutting down gRPC server since JVM is shutting down");
App.this.stop();
logger.info("*** server shut down");
}
});
}
private void stop() {
if (server != null) {
server.shutdown();
}
}
/**
* Await termination on the main thread since the grpc library uses daemon
* threads.
*/
private void blockUntilShutdown() throws InterruptedException {
if (server != null) {
server.awaitTermination();
}
}
// publisher message to kafka
static class PublisherImpl extends PublisherImplBase {
@Override
public void publish(Message request, io.grpc.stub.StreamObserver<Ack> responseObserver) {
Builder aBuilder = Ack.newBuilder();
Ack ack;
try {
logger.info("payload : " + request.getPayload());
logger.info("destination : " + request.getDestination());
MessageMetaData metaData = new MessageMetaData();
metaData.setIsAsync(false);
metaData.setMessage(request.getPayload());
metaData.setTopic(request.getDestination());
metaData.setKafkaBrokerUrl(this.getBrokerURL());
KafkaMessage kafka = new KafkaMessage(metaData);
kafka.start();
aBuilder.setOk(true);
aBuilder.setOffset(kafka.getOffset());
ack = aBuilder.build();
responseObserver.onNext(ack);
responseObserver.onCompleted();
} catch (Exception e) {
aBuilder.setOk(false);
ack = aBuilder.build();
responseObserver.onCompleted();
logger.severe(e.getMessage());
}
}
/**
* Depricate method as propertie file is loaded in KafkaMessage. Broker url to be
* loaded in k8s env in method start() to avoid file read in container.
* @return kafka bootstrap hostname:port
* @throws IOException
*
**/
@Deprecated
private String getBrokerURL() throws IOException {
try {
String appConfigLocation = System.getenv("APP_CONFIG_FILE");
logger.info(appConfigLocation);
Properties appProps = new Properties();
InputStream in = null;
if (appConfigLocation != null && appConfigLocation.length() != 0) {
FileReader reader = new FileReader(appConfigLocation);
appProps.load(reader);
} else {
in = this.getClass().getClassLoader().getResourceAsStream("application.properties");
appProps = new Properties();
appProps.load(in);
}
logger.info(appProps.getProperty("KAFKA_SERVER"));
return appProps.getProperty("KAFKA_SERVER") + ":" + appProps.getProperty("KAFKA_SERVER_PORT");
} catch (IOException e) {
logger.severe("error loading properties file from classpath");
e.printStackTrace();
throw e;
}
}
}
}
|
[
"\"APP_CONFIG_FILE\""
] |
[] |
[
"APP_CONFIG_FILE"
] |
[]
|
["APP_CONFIG_FILE"]
|
java
| 1 | 0 | |
bot.py
|
import os
import signal
import asyncio
from dotenv import load_dotenv
load_dotenv()
import discord
BOT_TOKEN = os.getenv('BOT_TOKEN')
from periodic import Periodic
from mcstatus import MinecraftServer
SERVER_IP = os.getenv('SERVER_IP')
mc_server = MinecraftServer.lookup(SERVER_IP)
MAINTENANCE_MOTD = os.getenv('MAINTENANCE_MOTD')
bot = discord.Client()
async def update_status():
try:
mc_status = mc_server.status()
online_ppl = mc_status.players.online
max_ppl = mc_status.players.max
motd = mc_status.description
if MAINTENANCE_MOTD == motd:
status = discord.Status.do_not_disturb
status_msg = f'Under maintenance!'
else:
status = discord.Status.online
status_msg = f'Online: {online_ppl}/{max_ppl} players!'
except Exception as ex:
status = discord.Status.do_not_disturb
status_msg = 'Offline'
await bot.change_presence(
status=status,
activity=discord.Game(status_msg)
)
@bot.event
async def on_ready():
print(f'Logged on as {bot.user}!')
await update_status()
@bot.event
async def on_message(message):
if message.author == bot.user:
return
if message.content.strip() in ['!whodis', '!playing', '!status']:
try:
mc_status = mc_server.status()
mc_query = mc_server.query()
online_ppl = mc_status.players.online
max_ppl = mc_status.players.max
players = mc_query.players.names
motd = mc_status.description['extra'][0]['text']
if MAINTENANCE_MOTD == motd:
main_status = '**under maintenance**!'
else:
main_status = f'**online** with {online_ppl}/{max_ppl} players'
if online_ppl > 0:
main_status += ':\n' + '\n'.join([f'{i + 1}. {player}' for (i, player) in enumerate(players)])
else:
main_status += '!'
except Exception as ex:
main_status = '**offline**!'
await message.channel.send(f'The server is {main_status}')
# Source: https://www.roguelynn.com/words/asyncio-graceful-shutdowns/
async def shutdown(signal, loop):
await cron.stop()
await bot.change_presence(status=discord.Status.invisible)
await bot.close()
print('Logged out!')
tasks = [task for task in asyncio.all_tasks() if task is not asyncio.current_task()]
print(f'Cancelling {len(tasks)} outstanding tasks')
await asyncio.gather(*tasks, return_exceptions=True)
loop.stop()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
cron = Periodic(5, update_status)
signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
for signal in signals:
signal_handler_fn = lambda signal=signal: asyncio.create_task(shutdown(signal, loop))
loop.add_signal_handler(signal, signal_handler_fn)
print('Initializing bot...')
try:
loop.create_task(cron.start())
loop.create_task(bot.start(BOT_TOKEN))
loop.run_forever()
finally:
loop.close()
|
[] |
[] |
[
"MAINTENANCE_MOTD",
"SERVER_IP",
"BOT_TOKEN"
] |
[]
|
["MAINTENANCE_MOTD", "SERVER_IP", "BOT_TOKEN"]
|
python
| 3 | 0 | |
backend/pycon/celery.py
|
import os
from celery import Celery
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pycon.settings.celery")
app = Celery("pycon")
app.config_from_object("django.conf:settings", namespace="CELERY")
app.autodiscover_tasks(["integrations"])
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
Laboratory Works/Lab_9/hh_back/hh_back/wsgi.py
|
"""
WSGI config for hh_back project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hh_back.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
dygraph/similarity_net/run_classifier.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
SimNet Task
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import argparse
import multiprocessing
import sys
defaultencoding = 'utf-8'
if sys.getdefaultencoding() != defaultencoding:
reload(sys)
sys.setdefaultencoding(defaultencoding)
sys.path.append("..")
import paddle
import paddle.fluid as fluid
import numpy as np
import config
import utils
import reader
import nets.paddle_layers as layers
import io
import logging
from utils import ArgConfig
from utils import load_dygraph
from model_check import check_version
from model_check import check_cuda
def train(conf_dict, args):
"""
train process
"""
# Get device
if args.use_cuda:
place = fluid.CUDAPlace(0)
else:
place = fluid.CPUPlace()
# run train
logging.info("start train process ...")
def valid_and_test(pred_list, process, mode):
"""
return auc and acc
"""
pred_list = np.vstack(pred_list)
if mode == "test":
label_list = process.get_test_label()
elif mode == "valid":
label_list = process.get_valid_label()
if args.task_mode == "pairwise":
pred_list = (pred_list + 1) / 2
pred_list = np.hstack(
(np.ones_like(pred_list) - pred_list, pred_list))
metric.reset()
metric.update(pred_list, label_list)
auc = metric.eval()
if args.compute_accuracy:
acc = utils.get_accuracy(pred_list, label_list, args.task_mode,
args.lamda)
return auc, acc
else:
return auc
with fluid.dygraph.guard(place):
# used for continuous evaluation
if args.enable_ce:
SEED = 102
fluid.default_startup_program().random_seed = SEED
fluid.default_main_program().random_seed = SEED
# loading vocabulary
vocab = utils.load_vocab(args.vocab_path)
# get vocab size
conf_dict['dict_size'] = len(vocab)
conf_dict['seq_len'] = args.seq_len
# Load network structure dynamically
net = utils.import_class("./nets",
conf_dict["net"]["module_name"],
conf_dict["net"]["class_name"])(conf_dict)
if args.init_checkpoint is not "":
model, _ = load_dygraph(args.init_checkpoint)
net.set_dict(model)
# Load loss function dynamically
loss = utils.import_class("./nets/losses",
conf_dict["loss"]["module_name"],
conf_dict["loss"]["class_name"])(conf_dict)
# Load Optimization method
learning_rate = conf_dict["optimizer"]["learning_rate"]
optimizer_name = conf_dict["optimizer"]["class_name"]
if optimizer_name=='SGDOptimizer':
optimizer = fluid.optimizer.SGDOptimizer(learning_rate,parameter_list=net.parameters())
elif optimizer_name=='AdamOptimizer':
beta1 = conf_dict["optimizer"]["beta1"]
beta2 = conf_dict["optimizer"]["beta2"]
epsilon = conf_dict["optimizer"]["epsilon"]
optimizer = fluid.optimizer.AdamOptimizer(
learning_rate,
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
parameter_list=net.parameters())
# load auc method
metric = fluid.metrics.Auc(name="auc")
simnet_process = reader.SimNetProcessor(args, vocab)
# set global step
global_step = 0
ce_info = []
losses = []
start_time = time.time()
train_pyreader = fluid.io.PyReader(capacity=16, return_list=True, use_double_buffer=True)
get_train_examples = simnet_process.get_reader("train",epoch=args.epoch)
train_pyreader.decorate_sample_list_generator(
paddle.batch(get_train_examples, batch_size=args.batch_size),
place)
if args.do_valid:
valid_pyreader = fluid.io.PyReader(capacity=16, return_list=True, use_double_buffer=True)
get_valid_examples = simnet_process.get_reader("valid")
valid_pyreader.decorate_sample_list_generator(
paddle.batch(get_valid_examples, batch_size=args.batch_size),
place)
pred_list = []
if args.task_mode == "pairwise":
for left, pos_right, neg_right in train_pyreader():
left = fluid.layers.reshape(left, shape=[-1, 1])
pos_right = fluid.layers.reshape(pos_right, shape=[-1, 1])
neg_right = fluid.layers.reshape(neg_right, shape=[-1, 1])
net.train()
global_step += 1
left_feat, pos_score = net(left, pos_right)
pred = pos_score
_, neg_score = net(left, neg_right)
avg_cost = loss.compute(pos_score, neg_score)
losses.append(np.mean(avg_cost.numpy()))
avg_cost.backward()
optimizer.minimize(avg_cost)
net.clear_gradients()
if args.do_valid and global_step % args.validation_steps == 0:
for left, pos_right in valid_pyreader():
left = fluid.layers.reshape(left, shape=[-1, 1])
pos_right = fluid.layers.reshape(pos_right, shape=[-1, 1])
net.eval()
left_feat, pos_score = net(left, pos_right)
pred = pos_score
pred_list += list(pred.numpy())
valid_result = valid_and_test(pred_list, simnet_process, "valid")
if args.compute_accuracy:
valid_auc, valid_acc = valid_result
logging.info(
"global_steps: %d, valid_auc: %f, valid_acc: %f, valid_loss: %f" %
(global_step, valid_auc, valid_acc, np.mean(losses)))
else:
valid_auc = valid_result
logging.info("global_steps: %d, valid_auc: %f, valid_loss: %f" %
(global_step, valid_auc, np.mean(losses)))
if global_step % args.save_steps == 0:
model_save_dir = os.path.join(args.output_dir,
conf_dict["model_path"])
model_path = os.path.join(model_save_dir, str(global_step))
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
fluid.dygraph.save_dygraph(net.state_dict(), model_path)
logging.info("saving infer model in %s" % model_path)
else:
for left, right, label in train_pyreader():
left = fluid.layers.reshape(left, shape=[-1, 1])
right = fluid.layers.reshape(right, shape=[-1, 1])
label = fluid.layers.reshape(label, shape=[-1, 1])
net.train()
global_step += 1
left_feat, pred = net(left, right)
avg_cost = loss.compute(pred, label)
losses.append(np.mean(avg_cost.numpy()))
avg_cost.backward()
optimizer.minimize(avg_cost)
net.clear_gradients()
if args.do_valid and global_step % args.validation_steps == 0:
for left, right in valid_pyreader():
left = fluid.layers.reshape(left, shape=[-1, 1])
right = fluid.layers.reshape(right, shape=[-1, 1])
net.eval()
left_feat, pred = net(left, right)
pred_list += list(pred.numpy())
valid_result = valid_and_test(pred_list, simnet_process, "valid")
if args.compute_accuracy:
valid_auc, valid_acc = valid_result
logging.info(
"global_steps: %d, valid_auc: %f, valid_acc: %f, valid_loss: %f" %
(global_step, valid_auc, valid_acc, np.mean(losses)))
else:
valid_auc = valid_result
logging.info("global_steps: %d, valid_auc: %f, valid_loss: %f" %
(global_step, valid_auc, np.mean(losses)))
if global_step % args.save_steps == 0:
model_save_dir = os.path.join(args.output_dir,
conf_dict["model_path"])
model_path = os.path.join(model_save_dir, str(global_step))
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
fluid.dygraph.save_dygraph(net.state_dict(), model_path)
logging.info("saving infer model in %s" % model_path)
end_time = time.time()
ce_info.append([np.mean(losses), end_time - start_time])
# final save
logging.info("the final step is %s" % global_step)
model_save_dir = os.path.join(args.output_dir,
conf_dict["model_path"])
model_path = os.path.join(model_save_dir, str(global_step))
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
fluid.dygraph.save_dygraph(net.state_dict(), model_path)
logging.info("saving infer model in %s" % model_path)
# used for continuous evaluation
if args.enable_ce:
card_num = get_cards()
ce_loss = 0
ce_time = 0
try:
ce_loss = ce_info[-1][0]
ce_time = ce_info[-1][1]
except:
logging.info("ce info err!")
print("kpis\teach_step_duration_%s_card%s\t%s" %
(args.task_name, card_num, ce_time))
print("kpis\ttrain_loss_%s_card%s\t%f" %
(args.task_name, card_num, ce_loss))
if args.do_test:
# Get Feeder and Reader
test_pyreader = fluid.io.PyReader(capacity=16, return_list=True, use_double_buffer=True)
get_test_examples = simnet_process.get_reader("test")
test_pyreader.decorate_sample_list_generator(
paddle.batch(get_test_examples, batch_size=args.batch_size),
place)
pred_list = []
for left, pos_right in test_pyreader():
left = fluid.layers.reshape(left, shape=[-1, 1])
pos_right = fluid.layers.reshape(pos_right, shape=[-1, 1])
net.eval()
left = fluid.layers.reshape(left, shape=[-1, 1])
pos_right = fluid.layers.reshape(pos_right, shape=[-1, 1])
left_feat, pos_score = net(left, pos_right)
pred = pos_score
pred_list += list(pred.numpy())
test_result = valid_and_test(pred_list, simnet_process, "test")
if args.compute_accuracy:
test_auc, test_acc = test_result
logging.info("AUC of test is %f, Accuracy of test is %f" %
(test_auc, test_acc))
else:
test_auc = test_result
logging.info("AUC of test is %f" % test_auc)
def test(conf_dict, args):
"""
Evaluation Function
"""
logging.info("start test process ...")
if args.use_cuda:
place = fluid.CUDAPlace(0)
else:
place = fluid.CPUPlace()
with fluid.dygraph.guard(place):
vocab = utils.load_vocab(args.vocab_path)
simnet_process = reader.SimNetProcessor(args, vocab)
test_pyreader = fluid.io.PyReader(capacity=16, return_list=True, use_double_buffer=True)
get_test_examples = simnet_process.get_reader("test")
test_pyreader.decorate_sample_list_generator(
paddle.batch(get_test_examples, batch_size=args.batch_size),
place)
conf_dict['dict_size'] = len(vocab)
conf_dict['seq_len'] = args.seq_len
net = utils.import_class("./nets",
conf_dict["net"]["module_name"],
conf_dict["net"]["class_name"])(conf_dict)
model, _ = load_dygraph(args.init_checkpoint)
net.set_dict(model)
metric = fluid.metrics.Auc(name="auc")
pred_list = []
with io.open("predictions.txt", "w", encoding="utf8") as predictions_file:
if args.task_mode == "pairwise":
for left, pos_right in test_pyreader():
left = fluid.layers.reshape(left, shape=[-1, 1])
pos_right = fluid.layers.reshape(pos_right, shape=[-1, 1])
left_feat, pos_score = net(left, pos_right)
pred = pos_score
pred_list += list(map(lambda item: float(item[0]), pred.numpy()))
predictions_file.write(u"\n".join(
map(lambda item: str((item[0] + 1) / 2), pred.numpy())) + "\n")
else:
for left, right in test_pyreader():
left = fluid.layers.reshape(left, shape=[-1, 1])
right = fluid.layers.reshape(right, shape=[-1, 1])
left_feat, pred = net(left, right)
pred_list += list(map(lambda item: float(item[0]), pred.numpy()))
predictions_file.write(u"\n".join(
map(lambda item: str(np.argmax(item)), pred.numpy())) + "\n")
if args.task_mode == "pairwise":
pred_list = np.array(pred_list).reshape((-1, 1))
pred_list = (pred_list + 1) / 2
pred_list = np.hstack(
(np.ones_like(pred_list) - pred_list, pred_list))
else:
pred_list = np.array(pred_list)
labels = simnet_process.get_test_label()
metric.update(pred_list, labels)
if args.compute_accuracy:
acc = utils.get_accuracy(pred_list, labels, args.task_mode,
args.lamda)
logging.info("AUC of test is %f, Accuracy of test is %f" %
(metric.eval(), acc))
else:
logging.info("AUC of test is %f" % metric.eval())
if args.verbose_result:
utils.get_result_file(args)
logging.info("test result saved in %s" %
os.path.join(os.getcwd(), args.test_result_path))
def infer(conf_dict, args):
"""
run predict
"""
logging.info("start test process ...")
if args.use_cuda:
place = fluid.CUDAPlace(0)
else:
place = fluid.CPUPlace()
with fluid.dygraph.guard(place):
vocab = utils.load_vocab(args.vocab_path)
simnet_process = reader.SimNetProcessor(args, vocab)
get_infer_examples = simnet_process.get_infer_reader
infer_pyreader = fluid.io.PyReader(capacity=16, return_list=True, use_double_buffer=True)
infer_pyreader.decorate_sample_list_generator(
paddle.batch(get_infer_examples, batch_size=args.batch_size),
place)
conf_dict['dict_size'] = len(vocab)
conf_dict['seq_len'] = args.seq_len
net = utils.import_class("./nets",
conf_dict["net"]["module_name"],
conf_dict["net"]["class_name"])(conf_dict)
model, _ = load_dygraph(args.init_checkpoint)
net.set_dict(model)
pred_list = []
if args.task_mode == "pairwise":
for left, pos_right in infer_pyreader():
left = fluid.layers.reshape(left, shape=[-1, 1])
pos_right = fluid.layers.reshape(pos_right, shape=[-1, 1])
left_feat, pos_score = net(left, pos_right)
pred = pos_score
pred_list += list(
map(lambda item: str((item[0] + 1) / 2), pred.numpy()))
else:
for left, right in infer_pyreader():
left = fluid.layers.reshape(left, shape=[-1, 1])
pos_right = fluid.layers.reshape(right, shape=[-1, 1])
left_feat, pred = net(left, right)
pred_list += map(lambda item: str(np.argmax(item)), pred.numpy())
with io.open(args.infer_result_path, "w", encoding="utf8") as infer_file:
for _data, _pred in zip(simnet_process.get_infer_data(), pred_list):
infer_file.write(_data + "\t" + _pred + "\n")
logging.info("infer result saved in %s" %
os.path.join(os.getcwd(), args.infer_result_path))
def get_cards():
num = 0
cards = os.environ.get('CUDA_VISIBLE_DEVICES', '')
if cards != '':
num = len(cards.split(","))
return num
if __name__ == "__main__":
args = ArgConfig()
args = args.build_conf()
utils.print_arguments(args)
check_cuda(args.use_cuda)
check_version()
utils.init_log("./log/TextSimilarityNet")
conf_dict = config.SimNetConfig(args)
if args.do_train:
train(conf_dict, args)
elif args.do_test:
test(conf_dict, args)
elif args.do_infer:
infer(conf_dict, args)
else:
raise ValueError(
"one of do_train and do_test and do_infer must be True")
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"github.com/gogo/protobuf/proto"
plugin "github.com/golang/protobuf/protoc-gen-go/plugin"
"github.com/leeola/protoc-gen-twirp_json_typescript/generate"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
)
func main() {
if err := Main(); err != nil {
log.Fatal().Err(err).Msg("main failed")
}
}
func Main() error {
logfile := os.Getenv("TWIRP_JSON_TYPESCRIPT_LOG_FILE")
if logfile != "" {
logOutput, err := os.OpenFile(logfile, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)
if err != nil {
return fmt.Errorf("OpenFile: %v", err)
}
log.Logger = log.Output(zerolog.ConsoleWriter{Out: logOutput})
} else {
zerolog.SetGlobalLevel(zerolog.Disabled)
}
req, err := unmarshalRequest()
if err != nil {
return fmt.Errorf("unmarshalRequest: %v", err)
}
res, err := generate.Generate(req)
if err != nil {
return fmt.Errorf("Generate: %v", err)
}
if err := marshalResponse(res); err != nil {
return fmt.Errorf("marshalResponse: %v", err)
}
return nil
}
func unmarshalRequest() (plugin.CodeGeneratorRequest, error) {
b, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return plugin.CodeGeneratorRequest{}, fmt.Errorf("ReadAll: %v", err)
}
var req plugin.CodeGeneratorRequest
if err := proto.Unmarshal(b, &req); err != nil {
return plugin.CodeGeneratorRequest{}, fmt.Errorf("proto Unmarshal: %v", err)
}
return req, nil
}
func marshalResponse(res *plugin.CodeGeneratorResponse) error {
b, err := proto.Marshal(res)
if err != nil {
return fmt.Errorf("Marshal: %v", err)
}
if _, err := io.Copy(os.Stdout, bytes.NewReader(b)); err != nil {
return fmt.Errorf("Copy: %v", err)
}
return nil
}
|
[
"\"TWIRP_JSON_TYPESCRIPT_LOG_FILE\""
] |
[] |
[
"TWIRP_JSON_TYPESCRIPT_LOG_FILE"
] |
[]
|
["TWIRP_JSON_TYPESCRIPT_LOG_FILE"]
|
go
| 1 | 0 | |
run/main.go
|
package main
import (
"fmt"
"log"
"net/http"
"os"
"context"
"cloud.google.com/go/logging"
)
// assembles http response obj/data structure of incoming request
func indexHandler(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.NotFound(w,r)
return
}
fmt.Fprint(w, "Hello World!")
}
// To deploy:
// gcloud builds submit --tag gcr.io/go-logging/run
// gcloud run deploy --image gcr.io/go-logging/run --platform managed
func main() {
ctx := context.Background()
projectID := "go-logging"
// Create a logging client
client, err := logging.NewClient(ctx, projectID)
if err != nil {
log.Fatalf("Failed to create logging client: %V", err)
}
defer client.Close()
// Sets name of log to write to
logger := client.Logger("cloudrun-log")
log.Print("hello world!")
logger.Log(logging.Entry{Payload: "Hello world!"})
logger.StandardLogger(logging.Info).Println("Hello World!")
// Handle routes & ports
http.HandleFunc("/", indexHandler)
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
logger.Log(logging.Entry{Payload: "Listening on port: 8080"})
// Start server
if err := http.ListenAndServe(":"+port, nil); err != nil {
log.Fatal(err)
}
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
app/server_test.go
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package app
import (
"bufio"
"crypto/tls"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/httptest"
"os"
"path"
"strconv"
"strings"
"testing"
"time"
"github.com/getsentry/sentry-go"
"github.com/mattermost/mattermost-server/v5/mlog"
"github.com/mattermost/mattermost-server/v5/config"
"github.com/mattermost/mattermost-server/v5/model"
"github.com/mattermost/mattermost-server/v5/store/storetest"
"github.com/mattermost/mattermost-server/v5/utils/fileutils"
"github.com/stretchr/testify/require"
)
func TestStartServerSuccess(t *testing.T) {
s, err := NewServer()
require.NoError(t, err)
s.UpdateConfig(func(cfg *model.Config) { *cfg.ServiceSettings.ListenAddress = ":0" })
serverErr := s.Start()
client := &http.Client{}
checkEndpoint(t, client, "http://localhost:"+strconv.Itoa(s.ListenAddr.Port)+"/", http.StatusNotFound)
s.Shutdown()
require.NoError(t, serverErr)
}
func TestReadReplicaDisabledBasedOnLicense(t *testing.T) {
t.Skip("TODO: fix flaky test")
cfg := model.Config{}
cfg.SetDefaults()
driverName := os.Getenv("MM_SQLSETTINGS_DRIVERNAME")
if driverName == "" {
driverName = model.DATABASE_DRIVER_POSTGRES
}
dsn := ""
if driverName == model.DATABASE_DRIVER_POSTGRES {
dsn = os.Getenv("TEST_DATABASE_POSTGRESQL_DSN")
} else {
dsn = os.Getenv("TEST_DATABASE_MYSQL_DSN")
}
cfg.SqlSettings = *storetest.MakeSqlSettings(driverName)
if dsn != "" {
cfg.SqlSettings.DataSource = &dsn
}
cfg.SqlSettings.DataSourceReplicas = []string{*cfg.SqlSettings.DataSource}
cfg.SqlSettings.DataSourceSearchReplicas = []string{*cfg.SqlSettings.DataSource}
t.Run("Read Replicas with no License", func(t *testing.T) {
s, err := NewServer(func(server *Server) error {
configStore := config.NewTestMemoryStore()
configStore.Set(&cfg)
server.configStore = configStore
return nil
})
require.NoError(t, err)
defer s.Shutdown()
require.Same(t, s.sqlStore.GetMaster(), s.sqlStore.GetReplica())
require.Len(t, s.Config().SqlSettings.DataSourceReplicas, 1)
})
t.Run("Read Replicas With License", func(t *testing.T) {
s, err := NewServer(func(server *Server) error {
configStore := config.NewTestMemoryStore()
configStore.Set(&cfg)
server.licenseValue.Store(model.NewTestLicense())
return nil
})
require.NoError(t, err)
defer s.Shutdown()
require.NotSame(t, s.sqlStore.GetMaster(), s.sqlStore.GetReplica())
require.Len(t, s.Config().SqlSettings.DataSourceReplicas, 1)
})
t.Run("Search Replicas with no License", func(t *testing.T) {
s, err := NewServer(func(server *Server) error {
configStore := config.NewTestMemoryStore()
configStore.Set(&cfg)
server.configStore = configStore
return nil
})
require.NoError(t, err)
defer s.Shutdown()
require.Same(t, s.sqlStore.GetMaster(), s.sqlStore.GetSearchReplica())
require.Len(t, s.Config().SqlSettings.DataSourceSearchReplicas, 1)
})
t.Run("Search Replicas With License", func(t *testing.T) {
s, err := NewServer(func(server *Server) error {
configStore := config.NewTestMemoryStore()
configStore.Set(&cfg)
server.configStore = configStore
server.licenseValue.Store(model.NewTestLicense())
return nil
})
require.NoError(t, err)
defer s.Shutdown()
require.NotSame(t, s.sqlStore.GetMaster(), s.sqlStore.GetSearchReplica())
require.Len(t, s.Config().SqlSettings.DataSourceSearchReplicas, 1)
})
}
func TestStartServerPortUnavailable(t *testing.T) {
s, err := NewServer()
require.NoError(t, err)
// Listen on the next available port
listener, err := net.Listen("tcp", ":0")
require.NoError(t, err)
// Attempt to listen on the port used above.
s.UpdateConfig(func(cfg *model.Config) {
*cfg.ServiceSettings.ListenAddress = listener.Addr().String()
})
serverErr := s.Start()
s.Shutdown()
require.Error(t, serverErr)
}
func TestStartServerTLSSuccess(t *testing.T) {
s, err := NewServer()
require.NoError(t, err)
testDir, _ := fileutils.FindDir("tests")
s.UpdateConfig(func(cfg *model.Config) {
*cfg.ServiceSettings.ListenAddress = ":0"
*cfg.ServiceSettings.ConnectionSecurity = "TLS"
*cfg.ServiceSettings.TLSKeyFile = path.Join(testDir, "tls_test_key.pem")
*cfg.ServiceSettings.TLSCertFile = path.Join(testDir, "tls_test_cert.pem")
})
serverErr := s.Start()
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: tr}
checkEndpoint(t, client, "https://localhost:"+strconv.Itoa(s.ListenAddr.Port)+"/", http.StatusNotFound)
s.Shutdown()
require.NoError(t, serverErr)
}
func TestStartServerTLSVersion(t *testing.T) {
s, err := NewServer()
require.NoError(t, err)
testDir, _ := fileutils.FindDir("tests")
s.UpdateConfig(func(cfg *model.Config) {
*cfg.ServiceSettings.ListenAddress = ":0"
*cfg.ServiceSettings.ConnectionSecurity = "TLS"
*cfg.ServiceSettings.TLSMinVer = "1.2"
*cfg.ServiceSettings.TLSKeyFile = path.Join(testDir, "tls_test_key.pem")
*cfg.ServiceSettings.TLSCertFile = path.Join(testDir, "tls_test_cert.pem")
})
serverErr := s.Start()
tr := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
MaxVersion: tls.VersionTLS11,
},
}
client := &http.Client{Transport: tr}
err = checkEndpoint(t, client, "https://localhost:"+strconv.Itoa(s.ListenAddr.Port)+"/", http.StatusNotFound)
if !strings.Contains(err.Error(), "remote error: tls: protocol version not supported") {
t.Errorf("Expected protocol version error, got %s", err)
}
client.Transport = &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
}
err = checkEndpoint(t, client, "https://localhost:"+strconv.Itoa(s.ListenAddr.Port)+"/", http.StatusNotFound)
if err != nil {
t.Errorf("Expected nil, got %s", err)
}
s.Shutdown()
require.NoError(t, serverErr)
}
func TestStartServerTLSOverwriteCipher(t *testing.T) {
s, err := NewServer()
require.NoError(t, err)
testDir, _ := fileutils.FindDir("tests")
s.UpdateConfig(func(cfg *model.Config) {
*cfg.ServiceSettings.ListenAddress = ":0"
*cfg.ServiceSettings.ConnectionSecurity = "TLS"
cfg.ServiceSettings.TLSOverwriteCiphers = []string{
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
}
*cfg.ServiceSettings.TLSKeyFile = path.Join(testDir, "tls_test_key.pem")
*cfg.ServiceSettings.TLSCertFile = path.Join(testDir, "tls_test_cert.pem")
})
serverErr := s.Start()
tr := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
CipherSuites: []uint16{
tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
},
MaxVersion: tls.VersionTLS12,
},
}
client := &http.Client{Transport: tr}
err = checkEndpoint(t, client, "https://localhost:"+strconv.Itoa(s.ListenAddr.Port)+"/", http.StatusNotFound)
require.Error(t, err, "Expected error due to Cipher mismatch")
if !strings.Contains(err.Error(), "remote error: tls: handshake failure") {
t.Errorf("Expected protocol version error, got %s", err)
}
client.Transport = &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
CipherSuites: []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
},
MaxVersion: tls.VersionTLS12,
},
}
err = checkEndpoint(t, client, "https://localhost:"+strconv.Itoa(s.ListenAddr.Port)+"/", http.StatusNotFound)
if err != nil {
t.Errorf("Expected nil, got %s", err)
}
s.Shutdown()
require.NoError(t, serverErr)
}
func checkEndpoint(t *testing.T, client *http.Client, url string, expectedStatus int) error {
res, err := client.Get(url)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != expectedStatus {
t.Errorf("Response code was %d; want %d", res.StatusCode, expectedStatus)
}
return nil
}
func TestPanicLog(t *testing.T) {
// Creating a temp file to collect logs
tmpfile, err := ioutil.TempFile("", "mlog")
if err != nil {
require.NoError(t, err)
}
defer func() {
require.NoError(t, tmpfile.Close())
require.NoError(t, os.Remove(tmpfile.Name()))
}()
// This test requires Zap file target for now.
mlog.EnableZap()
defer mlog.DisableZap()
// Creating logger to log to console and temp file
logger := mlog.NewLogger(&mlog.LoggerConfiguration{
EnableConsole: true,
ConsoleJson: true,
EnableFile: true,
FileLocation: tmpfile.Name(),
FileLevel: mlog.LevelInfo,
})
// Creating a server with logger
s, err := NewServer(SetLogger(logger))
require.NoError(t, err)
// Route for just panicing
s.Router.HandleFunc("/panic", func(writer http.ResponseWriter, request *http.Request) {
s.Log.Info("inside panic handler")
panic("log this panic")
})
testDir, _ := fileutils.FindDir("tests")
s.UpdateConfig(func(cfg *model.Config) {
*cfg.ServiceSettings.ListenAddress = ":0"
*cfg.ServiceSettings.ConnectionSecurity = "TLS"
*cfg.ServiceSettings.TLSKeyFile = path.Join(testDir, "tls_test_key.pem")
*cfg.ServiceSettings.TLSCertFile = path.Join(testDir, "tls_test_cert.pem")
})
serverErr := s.Start()
require.NoError(t, serverErr)
// Calling panic route
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: tr}
client.Get("https://localhost:" + strconv.Itoa(s.ListenAddr.Port) + "/panic")
err = s.Shutdown()
require.NoError(t, err)
// Checking whether panic was logged
var panicLogged = false
var infoLogged = false
_, err = tmpfile.Seek(0, 0)
require.NoError(t, err)
scanner := bufio.NewScanner(tmpfile)
for scanner.Scan() {
if !infoLogged && strings.Contains(scanner.Text(), "inside panic handler") {
infoLogged = true
}
if strings.Contains(scanner.Text(), "log this panic") {
panicLogged = true
break
}
}
if !infoLogged {
t.Error("Info log line was supposed to be logged")
}
if !panicLogged {
t.Error("Panic was supposed to be logged")
}
}
func TestSentry(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
client := &http.Client{Timeout: 5 * time.Second, Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}}
testDir, _ := fileutils.FindDir("tests")
t.Run("sentry is disabled, should not receive a report", func(t *testing.T) {
data := make(chan bool, 1)
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
t.Log("Received sentry request for some reason")
data <- true
}))
defer server.Close()
// make sure we don't report anything when sentry is disabled
_, port, _ := net.SplitHostPort(server.Listener.Addr().String())
dsn, err := sentry.NewDsn(fmt.Sprintf("http://test:test@localhost:%s/123", port))
require.NoError(t, err)
SentryDSN = dsn.String()
s, err := NewServer(func(server *Server) error {
configStore, _ := config.NewFileStore("config.json", true)
store, _ := config.NewStoreFromBacking(configStore, nil)
server.configStore = store
server.UpdateConfig(func(cfg *model.Config) {
*cfg.ServiceSettings.ListenAddress = ":0"
*cfg.LogSettings.EnableSentry = false
*cfg.ServiceSettings.ConnectionSecurity = "TLS"
*cfg.ServiceSettings.TLSKeyFile = path.Join(testDir, "tls_test_key.pem")
*cfg.ServiceSettings.TLSCertFile = path.Join(testDir, "tls_test_cert.pem")
*cfg.LogSettings.EnableDiagnostics = true
})
return nil
})
require.NoError(t, err)
// Route for just panicing
s.Router.HandleFunc("/panic", func(writer http.ResponseWriter, request *http.Request) {
panic("log this panic")
})
require.NoError(t, s.Start())
defer s.Shutdown()
resp, err := client.Get("https://localhost:" + strconv.Itoa(s.ListenAddr.Port) + "/panic")
require.Nil(t, resp)
require.True(t, errors.Is(err, io.EOF), fmt.Sprintf("unexpected error: %s", err))
sentry.Flush(time.Second)
select {
case <-data:
require.Fail(t, "Sentry received a message, even though it's disabled!")
case <-time.After(time.Second):
t.Log("Sentry request didn't arrive. Good!")
}
})
t.Run("sentry is enabled, report should be received", func(t *testing.T) {
data := make(chan bool, 1)
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
t.Log("Received sentry request!")
data <- true
}))
defer server.Close()
_, port, _ := net.SplitHostPort(server.Listener.Addr().String())
dsn, err := sentry.NewDsn(fmt.Sprintf("http://test:test@localhost:%s/123", port))
require.NoError(t, err)
SentryDSN = dsn.String()
s, err := NewServer(func(server *Server) error {
configStore, _ := config.NewFileStore("config.json", true)
store, _ := config.NewStoreFromBacking(configStore, nil)
server.configStore = store
server.UpdateConfig(func(cfg *model.Config) {
*cfg.ServiceSettings.ListenAddress = ":0"
*cfg.ServiceSettings.ConnectionSecurity = "TLS"
*cfg.ServiceSettings.TLSKeyFile = path.Join(testDir, "tls_test_key.pem")
*cfg.ServiceSettings.TLSCertFile = path.Join(testDir, "tls_test_cert.pem")
*cfg.LogSettings.EnableSentry = true
*cfg.LogSettings.EnableDiagnostics = true
})
return nil
})
require.NoError(t, err)
// Route for just panicing
s.Router.HandleFunc("/panic", func(writer http.ResponseWriter, request *http.Request) {
panic("log this panic")
})
require.NoError(t, s.Start())
defer s.Shutdown()
resp, err := client.Get("https://localhost:" + strconv.Itoa(s.ListenAddr.Port) + "/panic")
require.Nil(t, resp)
require.True(t, errors.Is(err, io.EOF), fmt.Sprintf("unexpected error: %s", err))
sentry.Flush(time.Second)
select {
case <-data:
t.Log("Sentry request arrived. Good!")
case <-time.After(time.Second * 10):
require.Fail(t, "Sentry report didn't arrive")
}
})
}
|
[
"\"MM_SQLSETTINGS_DRIVERNAME\"",
"\"TEST_DATABASE_POSTGRESQL_DSN\"",
"\"TEST_DATABASE_MYSQL_DSN\""
] |
[] |
[
"MM_SQLSETTINGS_DRIVERNAME",
"TEST_DATABASE_MYSQL_DSN",
"TEST_DATABASE_POSTGRESQL_DSN"
] |
[]
|
["MM_SQLSETTINGS_DRIVERNAME", "TEST_DATABASE_MYSQL_DSN", "TEST_DATABASE_POSTGRESQL_DSN"]
|
go
| 3 | 0 | |
clone_test.go
|
package main
import (
"fmt"
"golang.org/x/crypto/ssh"
"gopkg.in/src-d/go-billy.v4/memfs"
"gopkg.in/src-d/go-git.v4"
"gopkg.in/src-d/go-git.v4/config"
"gopkg.in/src-d/go-git.v4/plumbing"
go_git_ssh "gopkg.in/src-d/go-git.v4/plumbing/transport/ssh"
"gopkg.in/src-d/go-git.v4/storage/memory"
"io/ioutil"
"log"
"os"
"testing"
)
func TestClone(t *testing.T) {
s := fmt.Sprintf("%s/.ssh/id_rsa", os.Getenv("HOME"))
sshKey, err := ioutil.ReadFile(s)
signer, err := ssh.ParsePrivateKey([]byte(sshKey))
auth := &go_git_ssh.PublicKeys{User: "git", Signer: signer}
r, err := git.Clone(memory.NewStorage(), memfs.New(), &git.CloneOptions{
URL: "[email protected]:binxio/google-pubsub-testbench.git",
Auth: auth,
})
if err != nil {
t.Fatal(err)
}
err = r.Fetch(&git.FetchOptions{
RefSpecs: []config.RefSpec{"refs/*:refs/*", "HEAD:refs/heads/HEAD"},
Auth: auth,
})
if err != nil {
t.Fatal(err)
}
wt, err := r.Worktree()
if err != nil {
t.Fatal(err)
}
err = wt.Checkout(&git.CheckoutOptions{
Branch: plumbing.NewBranchReferenceName("cloud_implementation"),
Force: true,
})
if err != nil {
t.Fatal(err)
}
log.Printf("%v", wt)
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
test/test_autopep8.py
|
#!/usr/bin/env python
# coding: utf-8
"""Test suite for autopep8.
Unit tests go in "UnitTests". System tests go in "SystemTests".
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import re
import sys
import contextlib
import io
import shutil
from subprocess import Popen, PIPE
from tempfile import mkstemp, mkdtemp
import tokenize
import unittest
import warnings
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
ROOT_DIR = os.path.split(os.path.abspath(os.path.dirname(__file__)))[0]
sys.path.insert(0, ROOT_DIR)
import autopep8 # NOQA: E402
from autopep8 import get_module_imports_on_top_of_file # NOQA: E402
FAKE_CONFIGURATION = os.path.join(ROOT_DIR, 'test', 'fake_configuration')
FAKE_PYCODESTYLE_CONFIGURATION = os.path.join(
ROOT_DIR, 'test', 'fake_pycodestyle_configuration'
)
if 'AUTOPEP8_COVERAGE' in os.environ and int(os.environ['AUTOPEP8_COVERAGE']):
AUTOPEP8_CMD_TUPLE = ('coverage', 'run', '--branch', '--parallel',
'--omit=*/site-packages/*',
os.path.join(ROOT_DIR, 'autopep8.py'),)
else:
# We need to specify the executable to make sure the correct Python
# interpreter gets used.
AUTOPEP8_CMD_TUPLE = (sys.executable,
os.path.join(ROOT_DIR,
'autopep8.py'),) # pragma: no cover
class UnitTests(unittest.TestCase):
maxDiff = None
def test_compile_value_error(self):
source = '"\\xhh" \\'
self.assertFalse(autopep8.check_syntax(source))
def test_find_newline_only_cr(self):
source = ['print 1\r', 'print 2\r', 'print3\r']
self.assertEqual(autopep8.CR, autopep8.find_newline(source))
def test_find_newline_only_lf(self):
source = ['print 1\n', 'print 2\n', 'print3\n']
self.assertEqual(autopep8.LF, autopep8.find_newline(source))
def test_find_newline_only_crlf(self):
source = ['print 1\r\n', 'print 2\r\n', 'print3\r\n']
self.assertEqual(autopep8.CRLF, autopep8.find_newline(source))
def test_find_newline_cr1_and_lf2(self):
source = ['print 1\n', 'print 2\r', 'print3\n']
self.assertEqual(autopep8.LF, autopep8.find_newline(source))
def test_find_newline_cr1_and_crlf2(self):
source = ['print 1\r\n', 'print 2\r', 'print3\r\n']
self.assertEqual(autopep8.CRLF, autopep8.find_newline(source))
def test_find_newline_should_default_to_lf(self):
self.assertEqual(autopep8.LF, autopep8.find_newline([]))
self.assertEqual(autopep8.LF, autopep8.find_newline(['', '']))
def test_detect_encoding(self):
self.assertEqual(
'utf-8',
autopep8.detect_encoding(
os.path.join(ROOT_DIR, 'setup.py')))
def test_detect_encoding_with_cookie(self):
self.assertEqual(
'iso-8859-1',
autopep8.detect_encoding(
os.path.join(ROOT_DIR, 'test', 'iso_8859_1.py')))
def test_readlines_from_file_with_bad_encoding(self):
"""Bad encoding should not cause an exception."""
self.assertEqual(
['# -*- coding: zlatin-1 -*-\n'],
autopep8.readlines_from_file(
os.path.join(ROOT_DIR, 'test', 'bad_encoding.py')))
def test_readlines_from_file_with_bad_encoding2(self):
"""Bad encoding should not cause an exception."""
# This causes a warning on Python 3.
with warnings.catch_warnings(record=True):
self.assertTrue(autopep8.readlines_from_file(
os.path.join(ROOT_DIR, 'test', 'bad_encoding2.py')))
def test_fix_whitespace(self):
self.assertEqual(
'a b',
autopep8.fix_whitespace('a b', offset=1, replacement=' '))
def test_fix_whitespace_with_tabs(self):
self.assertEqual(
'a b',
autopep8.fix_whitespace('a\t \t b', offset=1, replacement=' '))
def test_multiline_string_lines(self):
self.assertEqual(
{2},
autopep8.multiline_string_lines(
"""\
'''
'''
"""))
def test_multiline_string_lines_with_many(self):
self.assertEqual(
{2, 7, 10, 11, 12},
autopep8.multiline_string_lines(
"""\
'''
'''
''''''
''''''
''''''
'''
'''
'''
'''
"""))
def test_multiline_string_should_not_report_single_line(self):
self.assertEqual(
set(),
autopep8.multiline_string_lines(
"""\
'''abc'''
"""))
def test_multiline_string_should_not_report_docstrings(self):
self.assertEqual(
{5},
autopep8.multiline_string_lines(
"""\
def foo():
'''Foo.
Bar.'''
hello = '''
'''
"""))
def test_supported_fixes(self):
self.assertIn('E121', [f[0] for f in autopep8.supported_fixes()])
def test_shorten_comment(self):
self.assertEqual('# ' + '=' * 72 + '\n',
autopep8.shorten_comment('# ' + '=' * 100 + '\n',
max_line_length=79))
def test_shorten_comment_should_not_split_numbers(self):
line = '# ' + '0' * 100 + '\n'
self.assertEqual(line,
autopep8.shorten_comment(line,
max_line_length=79))
def test_shorten_comment_should_not_split_words(self):
line = '# ' + 'a' * 100 + '\n'
self.assertEqual(line,
autopep8.shorten_comment(line,
max_line_length=79))
def test_shorten_comment_should_not_split_urls(self):
line = '# http://foo.bar/' + 'abc-' * 100 + '\n'
self.assertEqual(line,
autopep8.shorten_comment(line,
max_line_length=79))
def test_shorten_comment_should_not_modify_special_comments(self):
line = '#!/bin/blah ' + ' x' * 90 + '\n'
self.assertEqual(line,
autopep8.shorten_comment(line,
max_line_length=79))
def test_format_block_comments(self):
self.assertEqual(
'# abc',
autopep8.fix_e265('#abc'))
self.assertEqual(
'# abc',
autopep8.fix_e265('####abc'))
self.assertEqual(
'# abc',
autopep8.fix_e265('## # ##abc'))
self.assertEqual(
'# abc "# noqa"',
autopep8.fix_e265('# abc "# noqa"'))
self.assertEqual(
'# *abc',
autopep8.fix_e265('#*abc'))
def test_format_block_comments_should_leave_outline_alone(self):
line = """\
###################################################################
## Some people like these crazy things. So leave them alone. ##
###################################################################
"""
self.assertEqual(line, autopep8.fix_e265(line))
line = """\
#################################################################
# Some people like these crazy things. So leave them alone. #
#################################################################
"""
self.assertEqual(line, autopep8.fix_e265(line))
def test_format_block_comments_with_multiple_lines(self):
self.assertEqual(
"""\
# abc
# blah blah
# four space indentation
''' #do not modify strings
#do not modify strings
#do not modify strings
#do not modify strings'''
#
""",
autopep8.fix_e265("""\
# abc
#blah blah
#four space indentation
''' #do not modify strings
#do not modify strings
#do not modify strings
#do not modify strings'''
#
"""))
def test_format_block_comments_should_not_corrupt_special_comments(self):
self.assertEqual(
'#: abc',
autopep8.fix_e265('#: abc'))
self.assertEqual(
'#!/bin/bash\n',
autopep8.fix_e265('#!/bin/bash\n'))
def test_format_block_comments_should_only_touch_real_comments(self):
commented_out_code = '#x = 1'
self.assertEqual(
commented_out_code,
autopep8.fix_e265(commented_out_code))
def test_fix_file(self):
self.assertIn(
'import ',
autopep8.fix_file(
filename=os.path.join(ROOT_DIR, 'test', 'example.py')))
def test_fix_file_with_diff(self):
filename = os.path.join(ROOT_DIR, 'test', 'example.py')
self.assertIn(
'@@',
autopep8.fix_file(
filename=filename,
options=autopep8.parse_args(['--diff', filename])))
def test_fix_lines(self):
self.assertEqual(
'print(123)\n',
autopep8.fix_lines(['print( 123 )\n'],
options=autopep8.parse_args([''])))
def test_fix_code(self):
self.assertEqual(
'print(123)\n',
autopep8.fix_code('print( 123 )\n'))
def test_fix_code_with_empty_string(self):
self.assertEqual(
'',
autopep8.fix_code(''))
def test_fix_code_with_multiple_lines(self):
self.assertEqual(
'print(123)\nx = 4\n',
autopep8.fix_code('print( 123 )\nx =4'))
def test_fix_code_byte_string(self):
"""This feature is here for friendliness to Python 2."""
self.assertEqual(
'print(123)\n',
autopep8.fix_code(b'print( 123 )\n'))
def test_fix_code_with_options(self):
self.assertEqual(
'print(123)\n',
autopep8.fix_code('print( 123 )\n', options={'ignore': ['W']}))
self.assertEqual(
'print( 123 )\n',
autopep8.fix_code('print( 123 )\n', options={'ignore': ['E']}))
self.assertEqual(
'y in x\n',
autopep8.fix_code('x.has_key(y)\n', options={'aggressive': True}))
def test_fix_code_with_bad_options(self):
with self.assertRaises(ValueError):
autopep8.fix_code('print( 123 )\n', options={'ignor': ['W']})
with self.assertRaises(ValueError):
autopep8.fix_code('print( 123 )\n', options={'ignore': 'W'})
def test_normalize_line_endings(self):
self.assertEqual(
['abc\n', 'def\n', '123\n', 'hello\n', 'world\n'],
autopep8.normalize_line_endings(
['abc\n', 'def\n', '123\n', 'hello\r\n', 'world\r'],
'\n'))
def test_normalize_line_endings_with_crlf(self):
self.assertEqual(
['abc\r\n', 'def\r\n', '123\r\n', 'hello\r\n', 'world\r\n'],
autopep8.normalize_line_endings(
['abc\n', 'def\r\n', '123\r\n', 'hello\r\n', 'world\r'],
'\r\n'))
def test_normalize_multiline(self):
self.assertEqual('def foo(): pass',
autopep8.normalize_multiline('def foo():'))
self.assertEqual('def _(): return 1',
autopep8.normalize_multiline('return 1'))
self.assertEqual('@decorator\ndef _(): pass',
autopep8.normalize_multiline('@decorator\n'))
self.assertEqual('class A: pass',
autopep8.normalize_multiline('class A:'))
def test_code_match(self):
self.assertTrue(autopep8.code_match('E2', select=['E2', 'E3'],
ignore=[]))
self.assertTrue(autopep8.code_match('E26', select=['E2', 'E3'],
ignore=[]))
self.assertFalse(autopep8.code_match('E26', select=[], ignore=['E']))
self.assertFalse(autopep8.code_match('E2', select=['E2', 'E3'],
ignore=['E2']))
self.assertFalse(autopep8.code_match('E26', select=['W'], ignore=['']))
self.assertFalse(autopep8.code_match('E26', select=['W'],
ignore=['E1']))
def test_split_at_offsets(self):
self.assertEqual([''], autopep8.split_at_offsets('', [0]))
self.assertEqual(['1234'], autopep8.split_at_offsets('1234', [0]))
self.assertEqual(['1', '234'], autopep8.split_at_offsets('1234', [1]))
self.assertEqual(['12', '34'], autopep8.split_at_offsets('1234', [2]))
self.assertEqual(['12', '3', '4'],
autopep8.split_at_offsets('1234', [2, 3]))
def test_split_at_offsets_with_out_of_order(self):
self.assertEqual(['12', '3', '4'],
autopep8.split_at_offsets('1234', [3, 2]))
def test_fix_2to3(self):
self.assertEqual(
'try: pass\nexcept ValueError as e: pass\n',
autopep8.fix_2to3('try: pass\nexcept ValueError, e: pass\n'))
self.assertEqual(
'while True: pass\n',
autopep8.fix_2to3('while 1: pass\n'))
self.assertEqual(
"""\
import sys
sys.maxsize
""",
autopep8.fix_2to3("""\
import sys
sys.maxint
"""))
def test_fix_2to3_subset(self):
line = 'type(res) == type(42)\n'
fixed = 'isinstance(res, type(42))\n'
self.assertEqual(fixed, autopep8.fix_2to3(line))
self.assertEqual(fixed, autopep8.fix_2to3(line, select=['E721']))
self.assertEqual(fixed, autopep8.fix_2to3(line, select=['E7']))
self.assertEqual(line, autopep8.fix_2to3(line, select=['W']))
self.assertEqual(line, autopep8.fix_2to3(line, select=['E999']))
self.assertEqual(line, autopep8.fix_2to3(line, ignore=['E721']))
def test_is_python_file(self):
self.assertTrue(autopep8.is_python_file(
os.path.join(ROOT_DIR, 'autopep8.py')))
with temporary_file_context('#!/usr/bin/env python') as filename:
self.assertTrue(autopep8.is_python_file(filename))
with temporary_file_context('#!/usr/bin/python') as filename:
self.assertTrue(autopep8.is_python_file(filename))
with temporary_file_context('#!/usr/bin/python3') as filename:
self.assertTrue(autopep8.is_python_file(filename))
with temporary_file_context('#!/usr/bin/pythonic') as filename:
self.assertFalse(autopep8.is_python_file(filename))
with temporary_file_context('###!/usr/bin/python') as filename:
self.assertFalse(autopep8.is_python_file(filename))
self.assertFalse(autopep8.is_python_file(os.devnull))
self.assertFalse(autopep8.is_python_file('/bin/bash'))
def test_match_file(self):
with temporary_file_context('', suffix='.py', prefix='.') as filename:
self.assertFalse(autopep8.match_file(filename, exclude=[]),
msg=filename)
self.assertFalse(autopep8.match_file(os.devnull, exclude=[]))
with temporary_file_context('', suffix='.py', prefix='') as filename:
self.assertTrue(autopep8.match_file(filename, exclude=[]),
msg=filename)
def test_find_files(self):
temp_directory = mkdtemp()
try:
target = os.path.join(temp_directory, 'dir')
os.mkdir(target)
with open(os.path.join(target, 'a.py'), 'w'):
pass
exclude = os.path.join(target, 'ex')
os.mkdir(exclude)
with open(os.path.join(exclude, 'b.py'), 'w'):
pass
sub = os.path.join(exclude, 'sub')
os.mkdir(sub)
with open(os.path.join(sub, 'c.py'), 'w'):
pass
# FIXME: Avoid changing directory. This may interfere with parallel
# test runs.
cwd = os.getcwd()
os.chdir(temp_directory)
try:
files = list(autopep8.find_files(
['dir'], True, [os.path.join('dir', 'ex')]))
finally:
os.chdir(cwd)
file_names = [os.path.basename(f) for f in files]
self.assertIn('a.py', file_names)
self.assertNotIn('b.py', file_names)
self.assertNotIn('c.py', file_names)
finally:
shutil.rmtree(temp_directory)
def test_line_shortening_rank(self):
self.assertGreater(
autopep8.line_shortening_rank('(1\n+1)\n',
indent_word=' ',
max_line_length=79),
autopep8.line_shortening_rank('(1+\n1)\n',
indent_word=' ',
max_line_length=79))
self.assertGreaterEqual(
autopep8.line_shortening_rank('(1+\n1)\n',
indent_word=' ',
max_line_length=79),
autopep8.line_shortening_rank('(1+1)\n',
indent_word=' ',
max_line_length=79))
# Do not crash.
autopep8.line_shortening_rank('\n',
indent_word=' ',
max_line_length=79)
self.assertGreater(
autopep8.line_shortening_rank('[foo(\nx) for x in y]\n',
indent_word=' ',
max_line_length=79),
autopep8.line_shortening_rank('[foo(x)\nfor x in y]\n',
indent_word=' ',
max_line_length=79))
def test_extract_code_from_function(self):
def fix_e123():
pass # pragma: no cover
self.assertEqual('e123', autopep8.extract_code_from_function(fix_e123))
def foo():
pass # pragma: no cover
self.assertEqual(None, autopep8.extract_code_from_function(foo))
def fix_foo():
pass # pragma: no cover
self.assertEqual(None, autopep8.extract_code_from_function(fix_foo))
def e123():
pass # pragma: no cover
self.assertEqual(None, autopep8.extract_code_from_function(e123))
def fix_():
pass # pragma: no cover
self.assertEqual(None, autopep8.extract_code_from_function(fix_))
def test_reindenter(self):
reindenter = autopep8.Reindenter('if True:\n pass\n')
self.assertEqual('if True:\n pass\n',
reindenter.run())
def test_reindenter_with_non_standard_indent_size(self):
reindenter = autopep8.Reindenter('if True:\n pass\n')
self.assertEqual('if True:\n pass\n',
reindenter.run(3))
def test_reindenter_with_good_input(self):
lines = 'if True:\n pass\n'
reindenter = autopep8.Reindenter(lines)
self.assertEqual(lines,
reindenter.run())
def test_reindenter_should_leave_stray_comment_alone(self):
lines = ' #\nif True:\n pass\n'
reindenter = autopep8.Reindenter(lines)
self.assertEqual(' #\nif True:\n pass\n',
reindenter.run())
@unittest.skipIf('AUTOPEP8_COVERAGE' in os.environ, 'exists form-feed')
def test_reindenter_not_affect_with_formfeed(self):
lines = """print('hello')
print('python')
"""
reindenter = autopep8.Reindenter(lines)
self.assertEqual(lines,
reindenter.run())
def test_fix_e225_avoid_failure(self):
fix_pep8 = autopep8.FixPEP8(filename='',
options=autopep8.parse_args(['']),
contents=' 1\n')
self.assertEqual(
[],
fix_pep8.fix_e225({'line': 1,
'column': 5}))
def test_fix_e271_ignore_redundant(self):
fix_pep8 = autopep8.FixPEP8(filename='',
options=autopep8.parse_args(['']),
contents='x = 1\n')
self.assertEqual(
[],
fix_pep8.fix_e271({'line': 1,
'column': 2}))
def test_fix_e401_avoid_non_import(self):
fix_pep8 = autopep8.FixPEP8(filename='',
options=autopep8.parse_args(['']),
contents=' 1\n')
self.assertEqual(
[],
fix_pep8.fix_e401({'line': 1,
'column': 5}))
def test_fix_e711_avoid_failure(self):
fix_pep8 = autopep8.FixPEP8(filename='',
options=autopep8.parse_args(['']),
contents='None == x\n')
self.assertEqual(
None,
fix_pep8.fix_e711({'line': 1,
'column': 6}))
self.assertEqual(
[],
fix_pep8.fix_e711({'line': 1,
'column': 700}))
fix_pep8 = autopep8.FixPEP8(filename='',
options=autopep8.parse_args(['']),
contents='x <> None\n')
self.assertEqual(
[],
fix_pep8.fix_e711({'line': 1,
'column': 3}))
def test_fix_e712_avoid_failure(self):
fix_pep8 = autopep8.FixPEP8(filename='',
options=autopep8.parse_args(['']),
contents='True == x\n')
self.assertEqual(
[],
fix_pep8.fix_e712({'line': 1,
'column': 5}))
self.assertEqual(
[],
fix_pep8.fix_e712({'line': 1,
'column': 700}))
fix_pep8 = autopep8.FixPEP8(filename='',
options=autopep8.parse_args(['']),
contents='x != True\n')
self.assertEqual(
[],
fix_pep8.fix_e712({'line': 1,
'column': 3}))
fix_pep8 = autopep8.FixPEP8(filename='',
options=autopep8.parse_args(['']),
contents='x == False\n')
self.assertEqual(
[],
fix_pep8.fix_e712({'line': 1,
'column': 3}))
def test_get_diff_text(self):
# We ignore the first two lines since it differs on Python 2.6.
self.assertEqual(
"""\
-foo
+bar
""",
'\n'.join(autopep8.get_diff_text(['foo\n'],
['bar\n'],
'').split('\n')[3:]))
def test_get_diff_text_without_newline(self):
# We ignore the first two lines since it differs on Python 2.6.
self.assertEqual(
"""\
-foo
\\ No newline at end of file
+foo
""",
'\n'.join(autopep8.get_diff_text(['foo'],
['foo\n'],
'').split('\n')[3:]))
def test_count_unbalanced_brackets(self):
self.assertEqual(
0,
autopep8.count_unbalanced_brackets('()'))
self.assertEqual(
1,
autopep8.count_unbalanced_brackets('('))
self.assertEqual(
2,
autopep8.count_unbalanced_brackets('(['))
self.assertEqual(
1,
autopep8.count_unbalanced_brackets('[])'))
self.assertEqual(
1,
autopep8.count_unbalanced_brackets(
"'','.join(['%s=%s' % (col, col)')"))
def test_refactor_with_2to3(self):
self.assertEqual(
'1 in {}\n',
autopep8.refactor_with_2to3('{}.has_key(1)\n', ['has_key']))
def test_refactor_with_2to3_should_handle_syntax_error_gracefully(self):
self.assertEqual(
'{}.has_key(1\n',
autopep8.refactor_with_2to3('{}.has_key(1\n', ['has_key']))
def test_commented_out_code_lines(self):
self.assertEqual(
[1, 4],
autopep8.commented_out_code_lines("""\
#x = 1
#Hello
#Hello world.
#html_use_index = True
"""))
def test_standard_deviation(self):
self.assertAlmostEqual(
2, autopep8.standard_deviation([2, 4, 4, 4, 5, 5, 7, 9]))
self.assertAlmostEqual(0, autopep8.standard_deviation([]))
self.assertAlmostEqual(0, autopep8.standard_deviation([1]))
self.assertAlmostEqual(.5, autopep8.standard_deviation([1, 2]))
def test_priority_key_with_non_existent_key(self):
pep8_result = {'id': 'foobar'}
self.assertGreater(autopep8._priority_key(pep8_result), 1)
def test_decode_filename(self):
self.assertEqual('foo.py', autopep8.decode_filename(b'foo.py'))
def test_almost_equal(self):
self.assertTrue(autopep8.code_almost_equal(
"""\
[1, 2, 3
4, 5]
""",
"""\
[1, 2, 3
4, 5]
"""))
self.assertTrue(autopep8.code_almost_equal(
"""\
[1,2,3
4,5]
""",
"""\
[1, 2, 3
4,5]
"""))
self.assertFalse(autopep8.code_almost_equal(
"""\
[1, 2, 3
4, 5]
""",
"""\
[1, 2, 3, 4,
5]
"""))
def test_token_offsets(self):
text = """\
1
"""
string_io = io.StringIO(text)
self.assertEqual(
[(tokenize.NUMBER, '1', 0, 1),
(tokenize.NEWLINE, '\n', 1, 2),
(tokenize.ENDMARKER, '', 2, 2)],
list(autopep8.token_offsets(
tokenize.generate_tokens(string_io.readline))))
def test_token_offsets_with_multiline(self):
text = """\
x = '''
1
2
'''
"""
string_io = io.StringIO(text)
self.assertEqual(
[(tokenize.NAME, 'x', 0, 1),
(tokenize.OP, '=', 2, 3),
(tokenize.STRING, "'''\n1\n2\n'''", 4, 15),
(tokenize.NEWLINE, '\n', 15, 16),
(tokenize.ENDMARKER, '', 16, 16)],
list(autopep8.token_offsets(
tokenize.generate_tokens(string_io.readline))))
def test_token_offsets_with_escaped_newline(self):
text = """\
True or \\
False
"""
string_io = io.StringIO(text)
self.assertEqual(
[(tokenize.NAME, 'True', 0, 4),
(tokenize.NAME, 'or', 5, 7),
(tokenize.NAME, 'False', 11, 16),
(tokenize.NEWLINE, '\n', 16, 17),
(tokenize.ENDMARKER, '', 17, 17)],
list(autopep8.token_offsets(
tokenize.generate_tokens(string_io.readline))))
def test_shorten_line_candidates_are_valid(self):
for text in [
"""\
[xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx, y] = [1, 2]
""",
"""\
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx, y = [1, 2]
""",
"""\
lambda xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: line_shortening_rank(x,
indent_word,
max_line_length)
""",
]:
indent = autopep8._get_indentation(text)
source = text[len(indent):]
assert source.lstrip() == source
tokens = list(autopep8.generate_tokens(source))
for candidate in autopep8.shorten_line(
tokens, source, indent,
indent_word=' ',
max_line_length=79,
aggressive=10,
experimental=True,
previous_line=''):
self.assertEqual(
re.sub(r'\s', '', text),
re.sub(r'\s', '', candidate))
def test_get_fixed_long_line_empty(self):
line = ''
self.assertEqual(line, autopep8.get_fixed_long_line(line, line, line))
class SystemTests(unittest.TestCase):
maxDiff = None
def test_e101(self):
line = """\
while True:
if True:
\t1
"""
fixed = """\
while True:
if True:
1
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e101_with_indent_size_0(self):
line = """\
while True:
if True:
\t1
"""
with autopep8_context(line, options=['--indent-size=0']) as result:
self.assertEqual(line, result)
def test_e101_with_indent_size_1(self):
line = """\
while True:
if True:
\t1
"""
fixed = """\
while True:
if True:
1
"""
with autopep8_context(line, options=['--indent-size=1']) as result:
self.assertEqual(fixed, result)
def test_e101_with_indent_size_2(self):
line = """\
while True:
if True:
\t1
"""
fixed = """\
while True:
if True:
1
"""
with autopep8_context(line, options=['--indent-size=2']) as result:
self.assertEqual(fixed, result)
def test_e101_with_indent_size_3(self):
line = """\
while True:
if True:
\t1
"""
fixed = """\
while True:
if True:
1
"""
with autopep8_context(line, options=['--indent-size=3']) as result:
self.assertEqual(fixed, result)
def test_e101_should_not_expand_non_indentation_tabs(self):
line = """\
while True:
if True:
\t1 == '\t'
"""
fixed = """\
while True:
if True:
1 == '\t'
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e101_should_ignore_multiline_strings(self):
line = """\
x = '''
while True:
if True:
\t1
'''
"""
fixed = """\
x = '''
while True:
if True:
\t1
'''
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e101_should_fix_docstrings(self):
line = """\
class Bar(object):
def foo():
'''
\tdocstring
'''
"""
fixed = """\
class Bar(object):
def foo():
'''
docstring
'''
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e101_when_pep8_mistakes_first_tab_in_string(self):
# pep8 will complain about this even if the tab indentation found
# elsewhere is in a multiline string.
line = """\
x = '''
\tHello.
'''
if True:
123
"""
fixed = """\
x = '''
\tHello.
'''
if True:
123
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e101_should_ignore_multiline_strings_complex(self):
line = """\
print(3 <> 4, '''
while True:
if True:
\t1
\t''', 4 <> 5)
"""
fixed = """\
print(3 != 4, '''
while True:
if True:
\t1
\t''', 4 != 5)
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e101_with_comments(self):
line = """\
while True: # My inline comment
# with a hanging
# comment.
# Hello
if True:
\t# My comment
\t1
\t# My other comment
"""
fixed = """\
while True: # My inline comment
# with a hanging
# comment.
# Hello
if True:
# My comment
1
# My other comment
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e101_skip_if_bad_indentation(self):
line = """\
try:
\t pass
except:
pass
"""
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_e101_skip_innocuous(self):
# pep8 will complain about this even if the tab indentation found
# elsewhere is in a multiline string. If we don't filter the innocuous
# report properly, the below command will take a long time.
p = Popen(list(AUTOPEP8_CMD_TUPLE) +
['-vvv', '--select=E101', '--diff',
'--global-config={}'.format(os.devnull),
os.path.join(ROOT_DIR, 'test', 'e101_example.py')],
stdout=PIPE, stderr=PIPE)
output = [x.decode('utf-8') for x in p.communicate()][0]
self.assertEqual('', output)
def test_e111_short(self):
line = 'class Dummy:\n\n def __init__(self):\n pass\n'
fixed = 'class Dummy:\n\n def __init__(self):\n pass\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e111_long(self):
line = 'class Dummy:\n\n def __init__(self):\n pass\n'
fixed = 'class Dummy:\n\n def __init__(self):\n pass\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e111_longer(self):
line = """\
while True:
if True:
1
elif True:
2
"""
fixed = """\
while True:
if True:
1
elif True:
2
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e111_multiple_levels(self):
line = """\
while True:
if True:
1
# My comment
print('abc')
"""
fixed = """\
while True:
if True:
1
# My comment
print('abc')
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e111_with_dedent(self):
line = """\
def foo():
if True:
2
1
"""
fixed = """\
def foo():
if True:
2
1
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e111_with_other_errors(self):
line = """\
def foo():
if True:
(2 , 1)
1
if True:
print('hello')\t
2
"""
fixed = """\
def foo():
if True:
(2, 1)
1
if True:
print('hello')
2
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e111_should_not_modify_string_contents(self):
line = """\
if True:
x = '''
1
'''
"""
fixed = """\
if True:
x = '''
1
'''
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e112_should_leave_bad_syntax_alone(self):
line = """\
if True:
pass
"""
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_e113(self):
line = """\
a = 1
b = 2
"""
fixed = """\
a = 1
b = 2
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e113_bad_syntax(self):
line = """\
pass
"""
fixed = """\
pass
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e114(self):
line = """\
# a = 1
"""
fixed = """\
# a = 1
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e115(self):
line = """\
if True:
# A comment.
pass
"""
fixed = """\
if True:
# A comment.
pass
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e116(self):
line = """\
a = 1
# b = 2
"""
fixed = """\
a = 1
# b = 2
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e117(self):
line = """\
for a in [1, 2, 3]:
print('hello world')
for b in [1, 2, 3]:
print(a, b)
"""
fixed = """\
for a in [1, 2, 3]:
print('hello world')
for b in [1, 2, 3]:
print(a, b)
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e12_reindent(self):
line = """\
def foo_bar(baz, frop,
fizz, bang): # E128
pass
if True:
x = {
} # E123
#: E121
print "E121", (
"dent")
#: E122
print "E122", (
"dent")
#: E124
print "E124", ("visual",
"indent_two"
)
#: E125
if (row < 0 or self.moduleCount <= row or
col < 0 or self.moduleCount <= col):
raise Exception("%s,%s - %s" % (row, col, self.moduleCount))
#: E126
print "E126", (
"dent")
#: E127
print "E127", ("over-",
"over-indent")
#: E128
print "E128", ("under-",
"under-indent")
"""
fixed = """\
def foo_bar(baz, frop,
fizz, bang): # E128
pass
if True:
x = {
} # E123
#: E121
print "E121", (
"dent")
#: E122
print "E122", (
"dent")
#: E124
print "E124", ("visual",
"indent_two"
)
#: E125
if (row < 0 or self.moduleCount <= row or
col < 0 or self.moduleCount <= col):
raise Exception("%s,%s - %s" % (row, col, self.moduleCount))
#: E126
print "E126", (
"dent")
#: E127
print "E127", ("over-",
"over-indent")
#: E128
print "E128", ("under-",
"under-indent")
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e12_reindent_with_multiple_fixes(self):
line = """\
sql = 'update %s set %s %s' % (from_table,
','.join(['%s=%s' % (col, col) for col in cols]),
where_clause)
"""
fixed = """\
sql = 'update %s set %s %s' % (from_table,
','.join(['%s=%s' % (col, col)
for col in cols]),
where_clause)
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e12_tricky(self):
line = """\
#: E126
if (
x == (
3
) or
x == (
3
) or
y == 4):
pass
"""
fixed = """\
#: E126
if (
x == (
3
) or
x == (
3
) or
y == 4):
pass
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e12_large(self):
line = """\
class BogusController(controller.CementBaseController):
class Meta:
pass
class BogusController2(controller.CementBaseController):
class Meta:
pass
class BogusController3(controller.CementBaseController):
class Meta:
pass
class BogusController4(controller.CementBaseController):
class Meta:
pass
class TestBaseController(controller.CementBaseController):
class Meta:
pass
class TestBaseController2(controller.CementBaseController):
class Meta:
pass
class TestStackedController(controller.CementBaseController):
class Meta:
arguments = [
]
class TestDuplicateController(controller.CementBaseController):
class Meta:
config_defaults = dict(
foo='bar',
)
arguments = [
(['-f2', '--foo2'], dict(action='store'))
]
def my_command(self):
pass
"""
fixed = """\
class BogusController(controller.CementBaseController):
class Meta:
pass
class BogusController2(controller.CementBaseController):
class Meta:
pass
class BogusController3(controller.CementBaseController):
class Meta:
pass
class BogusController4(controller.CementBaseController):
class Meta:
pass
class TestBaseController(controller.CementBaseController):
class Meta:
pass
class TestBaseController2(controller.CementBaseController):
class Meta:
pass
class TestStackedController(controller.CementBaseController):
class Meta:
arguments = [
]
class TestDuplicateController(controller.CementBaseController):
class Meta:
config_defaults = dict(
foo='bar',
)
arguments = [
(['-f2', '--foo2'], dict(action='store'))
]
def my_command(self):
pass
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e12_with_bad_indentation(self):
line = r"""
def bar():
foo(1,
2)
def baz():
pass
pass
"""
fixed = r"""
def bar():
foo(1,
2)
def baz():
pass
pass
"""
with autopep8_context(line, options=['--select=E12']) as result:
self.assertEqual(fixed, result)
def test_e121_with_multiline_string(self):
line = """\
testing = \\
'''inputs: d c b a
'''
"""
fixed = """\
testing = \\
'''inputs: d c b a
'''
"""
with autopep8_context(line, options=['--select=E12']) as result:
self.assertEqual(fixed, result)
def test_e122_with_fallback(self):
line = """\
foooo('',
scripts=[''],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
])
"""
fixed = """\
foooo('',
scripts=[''],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
])
"""
with autopep8_context(line, options=[]) as result:
self.assertEqual(fixed, result)
def test_e123(self):
line = """\
if True:
foo = (
)
"""
fixed = """\
if True:
foo = (
)
"""
with autopep8_context(line, options=['--select=E12']) as result:
self.assertEqual(fixed, result)
def test_e123_with_escaped_newline(self):
line = r"""
x = \
(
)
"""
fixed = r"""
x = \
(
)
"""
with autopep8_context(line, options=['--select=E12']) as result:
self.assertEqual(fixed, result)
def test_e128_with_aaa_option(self):
line = """\
def extractBlocks(self):
addLine = (self.matchMultiple(linesIncludePatterns, line)
and not self.matchMultiple(linesExcludePatterns, line)) or emptyLine
"""
fixed = """\
def extractBlocks(self):
addLine = (
self.matchMultiple(
linesIncludePatterns,
line) and not self.matchMultiple(
linesExcludePatterns,
line)) or emptyLine
"""
with autopep8_context(line, options=['-aaa']) as result:
self.assertEqual(fixed, result)
def test_e129(self):
line = """\
if (a and
b in [
'foo',
] or
c):
pass
"""
fixed = """\
if (a and
b in [
'foo',
] or
c):
pass
"""
with autopep8_context(line, options=['--select=E129']) as result:
self.assertEqual(fixed, result)
def test_e125_with_multiline_string(self):
line = """\
for foo in '''
abc
123
'''.strip().split():
print(foo)
"""
with autopep8_context(line, options=['--select=E12']) as result:
self.assertEqual(line, result)
def test_e125_with_multiline_string_okay(self):
line = """\
def bar(
a='''a'''):
print(foo)
"""
fixed = """\
def bar(
a='''a'''):
print(foo)
"""
with autopep8_context(line, options=['--select=E12']) as result:
self.assertEqual(fixed, result)
def test_e126(self):
line = """\
if True:
posted = models.DateField(
default=datetime.date.today,
help_text="help"
)
"""
fixed = """\
if True:
posted = models.DateField(
default=datetime.date.today,
help_text="help"
)
"""
with autopep8_context(line, options=['--select=E12']) as result:
self.assertEqual(fixed, result)
def test_e126_should_not_interfere_with_other_fixes(self):
line = """\
self.assertEqual('bottom 1',
SimpleNamedNode.objects.filter(id__gt=1).exclude(
name='bottom 3').filter(
name__in=['bottom 3', 'bottom 1'])[0].name)
"""
fixed = """\
self.assertEqual('bottom 1',
SimpleNamedNode.objects.filter(id__gt=1).exclude(
name='bottom 3').filter(
name__in=['bottom 3', 'bottom 1'])[0].name)
"""
with autopep8_context(line, options=['--select=E12']) as result:
self.assertEqual(fixed, result)
def test_e127(self):
line = """\
if True:
if True:
chksum = (sum([int(value[i]) for i in xrange(0, 9, 2)]) * 7 -
sum([int(value[i]) for i in xrange(1, 9, 2)])) % 10
"""
fixed = """\
if True:
if True:
chksum = (sum([int(value[i]) for i in xrange(0, 9, 2)]) * 7 -
sum([int(value[i]) for i in xrange(1, 9, 2)])) % 10
"""
with autopep8_context(line, options=['--select=E12']) as result:
self.assertEqual(fixed, result)
def test_e127_align_visual_indent(self):
line = """\
def draw(self):
color = [([0.2, 0.1, 0.3], [0.2, 0.1, 0.3], [0.2, 0.1, 0.3]),
([0.9, 0.3, 0.5], [0.5, 1.0, 0.5], [0.3, 0.3, 0.9]) ][self._p._colored ]
self.draw_background(color)
"""
fixed = """\
def draw(self):
color = [([0.2, 0.1, 0.3], [0.2, 0.1, 0.3], [0.2, 0.1, 0.3]),
([0.9, 0.3, 0.5], [0.5, 1.0, 0.5], [0.3, 0.3, 0.9])][self._p._colored]
self.draw_background(color)
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e127_align_visual_indent_okay(self):
"""This is for code coverage."""
line = """\
want = (have + _leading_space_count(
after[jline - 1]) -
_leading_space_count(lines[jline]))
"""
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_e127_with_backslash(self):
line = r"""
if True:
if True:
self.date = meta.session.query(schedule.Appointment)\
.filter(schedule.Appointment.id ==
appointment_id).one().agenda.endtime
"""
fixed = r"""
if True:
if True:
self.date = meta.session.query(schedule.Appointment)\
.filter(schedule.Appointment.id ==
appointment_id).one().agenda.endtime
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e127_with_bracket_then_parenthesis(self):
line = r"""
if True:
foo = [food(1)
for bar in bars]
"""
fixed = r"""
if True:
foo = [food(1)
for bar in bars]
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e12_with_backslash(self):
line = r"""
if True:
assert reeval == parsed, \
'Repr gives different object:\n %r !=\n %r' % (parsed, reeval)
"""
fixed = r"""
if True:
assert reeval == parsed, \
'Repr gives different object:\n %r !=\n %r' % (parsed, reeval)
"""
with autopep8_context(line, options=['--select=E12']) as result:
self.assertEqual(fixed, result)
def test_e133(self):
line = """\
if True:
e = [
1, 2
]
"""
fixed = """\
if True:
e = [
1, 2
]
"""
with autopep8_context(line, options=['--hang-closing']) as result:
self.assertEqual(fixed, result)
def test_e133_not_effected(self):
line = """\
if True:
e = [
1, 2
]
"""
with autopep8_context(line, options=['--hang-closing']) as result:
self.assertEqual(line, result)
def test_w191(self):
line = """\
while True:
\tif True:
\t\t1
"""
fixed = """\
while True:
if True:
1
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e131_with_select_option(self):
line = 'd = f(\n a="hello"\n "world",\n b=1)\n'
fixed = 'd = f(\n a="hello"\n "world",\n b=1)\n'
with autopep8_context(line, options=['--select=E131']) as result:
self.assertEqual(fixed, result)
def test_e131_invalid_indent_with_select_option(self):
line = 'd = (\n "hello"\n "world")\n'
fixed = 'd = (\n "hello"\n "world")\n'
with autopep8_context(line, options=['--select=E131']) as result:
self.assertEqual(fixed, result)
def test_e201(self):
line = '( 1)\n'
fixed = '(1)\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e202(self):
line = '(1 )\n[2 ]\n{3 }\n'
fixed = '(1)\n[2]\n{3}\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e202_multiline(self):
line = """\
('''
a
b
c
''' )
"""
fixed = """\
('''
a
b
c
''')
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e202_skip_multiline_with_escaped_newline(self):
line = r"""
('c\
' )
"""
fixed = r"""
('c\
')
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e203_colon(self):
line = '{4 : 3}\n'
fixed = '{4: 3}\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e203_comma(self):
line = '[1 , 2 , 3]\n'
fixed = '[1, 2, 3]\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e203_semicolon(self):
line = "print(a, end=' ') ; nl = 0\n"
fixed = "print(a, end=' '); nl = 0\n"
with autopep8_context(line, options=['--select=E203']) as result:
self.assertEqual(fixed, result)
def test_e203_with_newline(self):
line = "print(a\n, end=' ')\n"
fixed = "print(a, end=' ')\n"
with autopep8_context(line, options=['--select=E203']) as result:
self.assertEqual(fixed, result)
def test_e211(self):
line = 'd = [1, 2, 3]\nprint d [0]\n'
fixed = 'd = [1, 2, 3]\nprint d[0]\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e221(self):
line = 'a = 1 + 1\n'
fixed = 'a = 1 + 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e221_do_not_skip_multiline(self):
line = '''\
def javascript(self):
return u"""
<script type="text/javascript" src="++resource++ptg.shufflegallery/jquery.promptu-menu.js"></script>
<script type="text/javascript">
$(function(){
$('ul.promptu-menu').promptumenu({width: %(width)i, height: %(height)i, rows: %(rows)i, columns: %(columns)i, direction: '%(direction)s', intertia: %(inertia)i, pages: %(pages)i});
\t$('ul.promptu-menu a').click(function(e) {
e.preventDefault();
});
$('ul.promptu-menu a').dblclick(function(e) {
window.location.replace($(this).attr("href"));
});
});
</script>
""" % {
}
'''
fixed = '''\
def javascript(self):
return u"""
<script type="text/javascript" src="++resource++ptg.shufflegallery/jquery.promptu-menu.js"></script>
<script type="text/javascript">
$(function(){
$('ul.promptu-menu').promptumenu({width: %(width)i, height: %(height)i, rows: %(rows)i, columns: %(columns)i, direction: '%(direction)s', intertia: %(inertia)i, pages: %(pages)i});
\t$('ul.promptu-menu a').click(function(e) {
e.preventDefault();
});
$('ul.promptu-menu a').dblclick(function(e) {
window.location.replace($(this).attr("href"));
});
});
</script>
""" % {
}
'''
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e222(self):
line = 'a = 1 + 1\n'
fixed = 'a = 1 + 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e222_with_multiline(self):
line = 'a = \"\"\"bar\nbaz\"\"\"\n'
fixed = 'a = \"\"\"bar\nbaz\"\"\"\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e223(self):
line = 'a = 1 + 1\n' # include TAB
fixed = 'a = 1 + 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e223_double(self):
line = 'a = 1 + 1\n' # include TAB
fixed = 'a = 1 + 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e223_with_tab_indentation(self):
line = """\
class Foo():
\tdef __init__(self):
\t\tx= 1\t+ 3
"""
fixed = """\
class Foo():
\tdef __init__(self):
\t\tx = 1 + 3
"""
with autopep8_context(line, options=['--ignore=E1,W191']) as result:
self.assertEqual(fixed, result)
def test_e224(self):
line = 'a = 11 + 1\n' # include TAB
fixed = 'a = 11 + 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e224_double(self):
line = 'a = 11 + 1\n' # include TAB
fixed = 'a = 11 + 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e224_with_tab_indentation(self):
line = """\
class Foo():
\tdef __init__(self):
\t\tx= \t3
"""
fixed = """\
class Foo():
\tdef __init__(self):
\t\tx = 3
"""
with autopep8_context(line, options=['--ignore=E1,W191']) as result:
self.assertEqual(fixed, result)
def test_e225(self):
line = '1+1\n2 +2\n3+ 3\n'
fixed = '1 + 1\n2 + 2\n3 + 3\n'
with autopep8_context(line, options=['--select=E,W']) as result:
self.assertEqual(fixed, result)
def test_e225_with_indentation_fix(self):
line = """\
class Foo(object):
def bar(self):
return self.elephant!='test'
"""
fixed = """\
class Foo(object):
def bar(self):
return self.elephant != 'test'
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e226(self):
line = '1*1\n2*2\n3*3\n'
fixed = '1 * 1\n2 * 2\n3 * 3\n'
with autopep8_context(line, options=['--select=E22']) as result:
self.assertEqual(fixed, result)
def test_e227(self):
line = '1&1\n2&2\n3&3\n'
fixed = '1 & 1\n2 & 2\n3 & 3\n'
with autopep8_context(line, options=['--select=E22']) as result:
self.assertEqual(fixed, result)
def test_e228(self):
line = '1%1\n2%2\n3%3\n'
fixed = '1 % 1\n2 % 2\n3 % 3\n'
with autopep8_context(line, options=['--select=E22']) as result:
self.assertEqual(fixed, result)
def test_e231(self):
line = '[1,2,3]\n'
fixed = '[1, 2, 3]\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e231_with_many_commas(self):
fixed = str(list(range(200))) + '\n'
line = re.sub(', ', ',', fixed)
with autopep8_context(line, options=['--select=E231']) as result:
self.assertEqual(fixed, result)
def test_e231_with_colon_after_comma(self):
"""ws_comma fixer ignores this case."""
line = 'a[b1,:]\n'
fixed = 'a[b1, :]\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e231_should_only_do_ws_comma_once(self):
"""If we don't check appropriately, we end up doing ws_comma multiple
times and skipping all other fixes."""
line = """\
print( 1 )
foo[0,:]
bar[zap[0][0]:zig[0][0],:]
"""
fixed = """\
print(1)
foo[0, :]
bar[zap[0][0]:zig[0][0], :]
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e241(self):
line = 'l = (1, 2)\n'
fixed = 'l = (1, 2)\n'
with autopep8_context(line, options=['--select=E']) as result:
self.assertEqual(fixed, result)
def test_e241_should_be_enabled_by_aggressive(self):
line = 'l = (1, 2)\n'
fixed = 'l = (1, 2)\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e241_double(self):
line = 'l = (1, 2)\n'
fixed = 'l = (1, 2)\n'
with autopep8_context(line, options=['--select=E']) as result:
self.assertEqual(fixed, result)
def test_e242(self):
line = 'l = (1,\t2)\n'
fixed = 'l = (1, 2)\n'
with autopep8_context(line, options=['--select=E']) as result:
self.assertEqual(fixed, result)
def test_e242_double(self):
line = 'l = (1,\t\t2)\n'
fixed = 'l = (1, 2)\n'
with autopep8_context(line, options=['--select=E']) as result:
self.assertEqual(fixed, result)
def test_e251(self):
line = 'def a(arg = 1):\n print arg\n'
fixed = 'def a(arg=1):\n print arg\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e251_with_escaped_newline(self):
line = '1\n\n\ndef a(arg=\\\n1):\n print(arg)\n'
fixed = '1\n\n\ndef a(arg=1):\n print(arg)\n'
with autopep8_context(line, options=['--select=E251']) as result:
self.assertEqual(fixed, result)
def test_e251_with_calling(self):
line = 'foo(bar= True)\n'
fixed = 'foo(bar=True)\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e251_with_argument_on_next_line(self):
line = 'foo(bar\n=None)\n'
fixed = 'foo(bar=None)\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e252(self):
line = 'def a(arg1: int=1, arg2: int =1, arg3: int= 1):\n print arg\n'
fixed = 'def a(arg1: int = 1, arg2: int = 1, arg3: int = 1):\n print arg\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e252_with_argument_on_next_line(self):
line = 'def a(arg: int\n=1):\n print arg\n'
fixed = 'def a(arg: int\n= 1):\n print arg\n'
with autopep8_context(line, options=['--select=E252']) as result:
self.assertEqual(fixed, result)
def test_e252_with_escaped_newline(self):
line = 'def a(arg: int\\\n=1):\n print arg\n'
fixed = 'def a(arg: int\\\n= 1):\n print arg\n'
with autopep8_context(line, options=['--select=E252']) as result:
self.assertEqual(fixed, result)
def test_e261(self):
line = "print 'a b '# comment\n"
fixed = "print 'a b ' # comment\n"
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e261_with_inline_commented_out_code(self):
line = '1 # 0 + 0\n'
fixed = '1 # 0 + 0\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e261_with_dictionary(self):
line = 'd = {# comment\n1: 2}\n'
fixed = 'd = { # comment\n 1: 2}\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e261_with_dictionary_no_space(self):
line = 'd = {#comment\n1: 2}\n'
fixed = 'd = { # comment\n 1: 2}\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e261_with_comma(self):
line = '{1: 2 # comment\n , }\n'
fixed = '{1: 2 # comment\n , }\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e262_more_space(self):
line = "print 'a b ' # comment\n"
fixed = "print 'a b ' # comment\n"
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e262_none_space(self):
line = "print 'a b ' #comment\n"
fixed = "print 'a b ' # comment\n"
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e262_hash_in_string(self):
line = "print 'a b #string' #comment\n"
fixed = "print 'a b #string' # comment\n"
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e262_hash_in_string_and_multiple_hashes(self):
line = "print 'a b #string' #comment #comment\n"
fixed = "print 'a b #string' # comment #comment\n"
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e262_more_complex(self):
line = "print 'a b ' #comment\n123\n"
fixed = "print 'a b ' # comment\n123\n"
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e271(self):
line = 'True and False\n'
fixed = 'True and False\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e271_with_multiline(self):
line = 'if True and False \\\n True:\n pass\n'
fixed = 'if True and False \\\n True:\n pass\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e272(self):
line = 'True and False\n'
fixed = 'True and False\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e273(self):
line = 'True and\tFalse\n'
fixed = 'True and False\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e274(self):
line = 'True\tand False\n'
fixed = 'True and False\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e306(self):
line = """
def test_descriptors(self):
class descriptor(object):
def __init__(self, fn):
self.fn = fn
def __get__(self, obj, owner):
if obj is not None:
return self.fn(obj, obj)
else:
return self
def method(self):
return 'method'
"""
fixed = """
def test_descriptors(self):
class descriptor(object):
def __init__(self, fn):
self.fn = fn
def __get__(self, obj, owner):
if obj is not None:
return self.fn(obj, obj)
else:
return self
def method(self):
return 'method'
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e301(self):
line = 'class k:\n s = 0\n def f():\n print 1\n'
fixed = 'class k:\n s = 0\n\n def f():\n print 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e301_extended_with_docstring(self):
line = '''\
class Foo(object):
"""Test."""
def foo(self):
"""Test."""
def bar():
pass
'''
fixed = '''\
class Foo(object):
"""Test."""
def foo(self):
"""Test."""
def bar():
pass
'''
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_not_e301_extended_with_comment(self):
line = '''\
class Foo(object):
"""Test."""
# A comment.
def foo(self):
pass
'''
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_e302(self):
line = 'def f():\n print 1\n\ndef ff():\n print 2\n'
fixed = 'def f():\n print 1\n\n\ndef ff():\n print 2\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e302_bug(self):
"""Avoid creating bad syntax."""
line = r"""def repeatable_expr(): return [bracketed_choice, simple_match, rule_ref],\
Optional(repeat_operator)
# def match(): return [simple_match , mixin_rule_match] TODO
def simple_match(): return [str_match, re_match]
"""
self.assertTrue(autopep8.check_syntax(line))
with autopep8_context(line) as result:
self.assertTrue(autopep8.check_syntax(result))
def test_e303(self):
line = '\n\n\n# alpha\n\n1\n'
fixed = '\n\n# alpha\n\n1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e303_extended(self):
line = '''\
def foo():
"""Document."""
'''
fixed = '''\
def foo():
"""Document."""
'''
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e303_with_e305(self):
line = """\
def foo():
pass
# comment (E303)
a = 1 # (E305)
"""
fixed = """\
def foo():
pass
# comment (E303)
a = 1 # (E305)
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e304(self):
line = '@contextmanager\n\ndef f():\n print 1\n'
fixed = '@contextmanager\ndef f():\n print 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e304_with_comment(self):
line = '@contextmanager\n# comment\n\ndef f():\n print 1\n'
fixed = '@contextmanager\n# comment\ndef f():\n print 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e305(self):
line = 'def a():\n pass\na()\n'
fixed = 'def a():\n pass\n\n\na()\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e401(self):
line = 'import os, sys\n'
fixed = 'import os\nimport sys\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e401_with_indentation(self):
line = 'def a():\n import os, sys\n'
fixed = 'def a():\n import os\n import sys\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e401_should_ignore_commented_comma(self):
line = 'import bdist_egg, egg # , not a module, neither is this\n'
fixed = 'import bdist_egg\nimport egg # , not a module, neither is this\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e401_should_ignore_commented_comma_with_indentation(self):
line = 'if True:\n import bdist_egg, egg # , not a module, neither is this\n'
fixed = 'if True:\n import bdist_egg\n import egg # , not a module, neither is this\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e401_should_ignore_false_positive(self):
line = 'import bdist_egg; bdist_egg.write_safety_flag(cmd.egg_info, safe)\n'
with autopep8_context(line, options=['--select=E401']) as result:
self.assertEqual(line, result)
def test_e401_with_escaped_newline_case(self):
line = 'import foo, \\\n bar\n'
fixed = 'import foo\nimport \\\n bar\n'
with autopep8_context(line, options=['--select=E401']) as result:
self.assertEqual(fixed, result)
def test_e402(self):
line = 'a = 1\nimport os\n'
fixed = 'import os\na = 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e402_duplicate_module(self):
line = 'a = 1\nimport os\nprint(os)\nimport os\n'
fixed = 'import os\na = 1\nprint(os)\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e402_with_future_import(self):
line = 'from __future__ import print_function\na = 1\nimport os\n'
fixed = 'from __future__ import print_function\nimport os\na = 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e401_with_multiline_from_import(self):
line = """\
from os import (
chroot
)
def f():
pass
from a import b
from b import c
from c import d
"""
fixed = """\
from a import b
from c import d
from b import c
from os import (
chroot
)
def f():
pass
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e402_with_multiline_from_future_import(self):
line = """\
from __future__ import (
absolute_import,
print_function
)
def f():
pass
import os
"""
fixed = """\
from __future__ import (
absolute_import,
print_function
)
import os
def f():
pass
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e402_with_module_doc(self):
line1 = '"""\nmodule doc\n"""\na = 1\nimport os\n'
fixed1 = '"""\nmodule doc\n"""\nimport os\na = 1\n'
line2 = '# comment\nr"""\nmodule doc\n"""\na = 1\nimport os\n'
fixed2 = '# comment\nr"""\nmodule doc\n"""\nimport os\na = 1\n'
line3 = "u'''one line module doc'''\na = 1\nimport os\n"
fixed3 = "u'''one line module doc'''\nimport os\na = 1\n"
line4 = "'''\n\"\"\"\ndoc'''\na = 1\nimport os\n"
fixed4 = "'''\n\"\"\"\ndoc'''\nimport os\na = 1\n"
for line, fixed in [(line1, fixed1), (line2, fixed2),
(line3, fixed3), (line4, fixed4)]:
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e402_import_some_modules(self):
line = """\
a = 1
from csv import (
reader,
writer,
)
import os
print(os, reader, writer)
import os
"""
fixed = """\
import os
from csv import (
reader,
writer,
)
a = 1
print(os, reader, writer)
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e402_with_dunder(self):
line = """\
__all__ = ["a", "b"]
def f():
pass
import os
"""
fixed = """\
import os
__all__ = ["a", "b"]
def f():
pass
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e402_with_dunder_lines(self):
line = """\
__all__ = [
"a",
"b",
]
def f():
pass
import os
"""
fixed = """\
import os
__all__ = [
"a",
"b",
]
def f():
pass
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_basic(self):
line = """\
print(111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333, 333, 333, 333)
"""
fixed = """\
print(111, 111, 111, 111, 222, 222, 222, 222,
222, 222, 222, 222, 222, 333, 333, 333, 333)
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_with_dictionary(self):
line = """\
myDict = { 'kg': 1, 'tonnes': tonne, 't/y': tonne / year, 'Mt/y': 1e6 * tonne / year}
"""
fixed = """\
myDict = {
'kg': 1,
'tonnes': tonne,
't/y': tonne / year,
'Mt/y': 1e6 * tonne / year}
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_with_in(self):
line = """\
if True:
if True:
if True:
if True:
if True:
if True:
if True:
if True:
if k_left in ('any', k_curr) and k_right in ('any', k_curr):
pass
"""
fixed = """\
if True:
if True:
if True:
if True:
if True:
if True:
if True:
if True:
if k_left in ('any', k_curr) and k_right in ('any', k_curr):
pass
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_with_commas_and_colons(self):
line = """\
foobar = {'aaaaaaaaaaaa': 'bbbbbbbbbbbbbbbb', 'dddddd': 'eeeeeeeeeeeeeeee', 'ffffffffffff': 'gggggggg'}
"""
fixed = """\
foobar = {'aaaaaaaaaaaa': 'bbbbbbbbbbbbbbbb',
'dddddd': 'eeeeeeeeeeeeeeee', 'ffffffffffff': 'gggggggg'}
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_with_inline_comments(self):
line = """\
' ' # Long inline comments should be moved above.
if True:
' ' # Long inline comments should be moved above.
"""
fixed = """\
# Long inline comments should be moved above.
' '
if True:
# Long inline comments should be moved above.
' '
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_with_inline_comments_should_skip_multiline(self):
line = """\
'''This should be left alone. -----------------------------------------------------
''' # foo
'''This should be left alone. -----------------------------------------------------
''' \\
# foo
'''This should be left alone. -----------------------------------------------------
''' \\
\\
# foo
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(line, result)
def test_e501_with_inline_comments_should_skip_keywords(self):
line = """\
' ' # noqa Long inline comments should be moved above.
if True:
' ' # pylint: disable-msgs=E0001
' ' # pragma: no cover
' ' # pragma: no cover
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(line, result)
def test_e501_with_inline_comments_should_skip_keywords_without_aggressive(
self):
line = """\
' ' # noqa Long inline comments should be moved above.
if True:
' ' # pylint: disable-msgs=E0001
' ' # pragma: no cover
' ' # pragma: no cover
"""
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_e501_with_inline_comments_should_skip_edge_cases(self):
line = """\
if True:
x = \\
' ' # Long inline comments should be moved above.
"""
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_e501_basic_should_prefer_balanced_brackets(self):
line = """\
if True:
reconstructed = iradon(radon(image), filter="ramp", interpolation="nearest")
"""
fixed = """\
if True:
reconstructed = iradon(radon(image), filter="ramp",
interpolation="nearest")
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_with_very_long_line(self):
line = """\
x = [3244234243234, 234234234324, 234234324, 23424234, 234234234, 234234, 234243, 234243, 234234234324, 234234324, 23424234, 234234234, 234234, 234243, 234243]
"""
fixed = """\
x = [
3244234243234,
234234234324,
234234324,
23424234,
234234234,
234234,
234243,
234243,
234234234324,
234234324,
23424234,
234234234,
234234,
234243,
234243]
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_with_lambda(self):
line = """\
self.mock_group.modify_state.side_effect = lambda *_: defer.fail(NoSuchScalingGroupError(1, 2))
"""
fixed = """\
self.mock_group.modify_state.side_effect = lambda *_: defer.fail(
NoSuchScalingGroupError(1, 2))
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_shorten_with_backslash(self):
line = """\
class Bar(object):
def bar(self, position):
if 0 <= position <= self._blocks[-1].position + len(self._blocks[-1].text):
pass
"""
fixed = """\
class Bar(object):
def bar(self, position):
if 0 <= position <= self._blocks[-1].position + \\
len(self._blocks[-1].text):
pass
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_shorten_at_commas_skip(self):
line = """\
parser.add_argument('source_corpus', help='corpus name/path relative to an nltk_data directory')
parser.add_argument('target_corpus', help='corpus name/path relative to an nltk_data directory')
"""
fixed = """\
parser.add_argument(
'source_corpus',
help='corpus name/path relative to an nltk_data directory')
parser.add_argument(
'target_corpus',
help='corpus name/path relative to an nltk_data directory')
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_with_shorter_length(self):
line = "foooooooooooooooooo('abcdefghijklmnopqrstuvwxyz')\n"
fixed = "foooooooooooooooooo(\n 'abcdefghijklmnopqrstuvwxyz')\n"
with autopep8_context(line,
options=['--max-line-length=40']) as result:
self.assertEqual(fixed, result)
def test_e501_with_indent(self):
line = """\
def d():
print(111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333, 333, 333, 333)
"""
fixed = """\
def d():
print(111, 111, 111, 111, 222, 222, 222, 222,
222, 222, 222, 222, 222, 333, 333, 333, 333)
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_alone_with_indentation(self):
line = """\
if True:
print(111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333, 333, 333, 333)
"""
fixed = """\
if True:
print(111, 111, 111, 111, 222, 222, 222, 222,
222, 222, 222, 222, 222, 333, 333, 333, 333)
"""
with autopep8_context(line, options=['--select=E501']) as result:
self.assertEqual(fixed, result)
def test_e501_alone_with_tuple(self):
line = """\
fooooooooooooooooooooooooooooooo000000000000000000000000 = [1,
('TransferTime', 'FLOAT')
]
"""
fixed = """\
fooooooooooooooooooooooooooooooo000000000000000000000000 = [1,
('TransferTime',
'FLOAT')
]
"""
with autopep8_context(line, options=['--select=E501']) as result:
self.assertEqual(fixed, result)
def test_e501_should_not_try_to_break_at_every_paren_in_arithmetic(self):
line = """\
term3 = w6 * c5 * (8.0 * psi4 * (11.0 - 24.0 * t2) - 28 * psi3 * (1 - 6.0 * t2) + psi2 * (1 - 32 * t2) - psi * (2.0 * t2) + t4) / 720.0
this_should_be_shortened = (' ', ' ')
"""
fixed = """\
term3 = w6 * c5 * (8.0 * psi4 * (11.0 - 24.0 * t2) - 28 * psi3 *
(1 - 6.0 * t2) + psi2 * (1 - 32 * t2) - psi * (2.0 * t2) + t4) / 720.0
this_should_be_shortened = (
' ',
' ')
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_arithmetic_operator_with_indent(self):
line = """\
def d():
111 + 111 + 111 + 111 + 111 + 222 + 222 + 222 + 222 + 222 + 222 + 222 + 222 + 222 + 333 + 333 + 333 + 333
"""
fixed = r"""def d():
111 + 111 + 111 + 111 + 111 + 222 + 222 + 222 + 222 + \
222 + 222 + 222 + 222 + 222 + 333 + 333 + 333 + 333
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_more_complicated(self):
line = """\
blahblah = os.environ.get('blahblah') or os.environ.get('blahblahblah') or os.environ.get('blahblahblahblah')
"""
fixed = """\
blahblah = os.environ.get('blahblah') or os.environ.get(
'blahblahblah') or os.environ.get('blahblahblahblah')
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_skip_even_more_complicated(self):
line = """\
if True:
if True:
if True:
blah = blah.blah_blah_blah_bla_bl(blahb.blah, blah.blah,
blah=blah.label, blah_blah=blah_blah,
blah_blah2=blah_blah)
"""
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_e501_avoid_breaking_at_empty_parentheses_if_possible(self):
line = """\
someverylongindenttionwhatnot().foo().bar().baz("and here is a long string 123456789012345678901234567890")
"""
fixed = """\
someverylongindenttionwhatnot().foo().bar().baz(
"and here is a long string 123456789012345678901234567890")
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_with_logical_fix(self):
line = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(aaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb, cccccccccccccccccccccccccccc, dddddddddddddddddddddddd)
"""
fixed = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb,
cccccccccccccccccccccccccccc,
dddddddddddddddddddddddd)
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_logical_fix_and_physical_fix(self):
line = """\
# ------------------------------------ ------------------------------------------
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(aaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb, cccccccccccccccccccccccccccc, dddddddddddddddddddddddd)
"""
fixed = """\
# ------------------------------------ -----------------------------------
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb,
cccccccccccccccccccccccccccc,
dddddddddddddddddddddddd)
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_logical_fix_and_adjacent_strings(self):
line = """\
print('a-----------------------' 'b-----------------------' 'c-----------------------'
'd-----------------------''e'"f"r"g")
"""
fixed = """\
print(
'a-----------------------'
'b-----------------------'
'c-----------------------'
'd-----------------------'
'e'
"f"
r"g")
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_multiple_lines(self):
line = """\
foo_bar_zap_bing_bang_boom(111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333, 333,
111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333, 333)
"""
fixed = """\
foo_bar_zap_bing_bang_boom(
111,
111,
111,
111,
222,
222,
222,
222,
222,
222,
222,
222,
222,
333,
333,
111,
111,
111,
111,
222,
222,
222,
222,
222,
222,
222,
222,
222,
333,
333)
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_multiple_lines_and_quotes(self):
line = """\
if True:
xxxxxxxxxxx = xxxxxxxxxxxxxxxxx(xxxxxxxxxxx, xxxxxxxxxxxxxxxx={'xxxxxxxxxxxx': 'xxxxx',
'xxxxxxxxxxx': xx,
'xxxxxxxx': False,
})
"""
fixed = """\
if True:
xxxxxxxxxxx = xxxxxxxxxxxxxxxxx(
xxxxxxxxxxx,
xxxxxxxxxxxxxxxx={
'xxxxxxxxxxxx': 'xxxxx',
'xxxxxxxxxxx': xx,
'xxxxxxxx': False,
})
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_do_not_break_on_keyword(self):
# We don't want to put a newline after equals for keywords as this
# violates PEP 8.
line = """\
if True:
long_variable_name = tempfile.mkstemp(prefix='abcdefghijklmnopqrstuvwxyz0123456789')
"""
fixed = """\
if True:
long_variable_name = tempfile.mkstemp(
prefix='abcdefghijklmnopqrstuvwxyz0123456789')
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_do_not_begin_line_with_comma(self):
# This fix is incomplete. (The line is still too long.) But it is here
# just to confirm that we do not put a comma at the beginning of a
# line.
line = """\
def dummy():
if True:
if True:
if True:
object = ModifyAction( [MODIFY70.text, OBJECTBINDING71.text, COLON72.text], MODIFY70.getLine(), MODIFY70.getCharPositionInLine() )
"""
fixed = """\
def dummy():
if True:
if True:
if True:
object = ModifyAction([MODIFY70.text, OBJECTBINDING71.text, COLON72.text], MODIFY70.getLine(
), MODIFY70.getCharPositionInLine())
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_should_not_break_on_dot(self):
line = """\
if True:
if True:
raise xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx('xxxxxxxxxxxxxxxxx "{d}" xxxxxxxxxxxxxx'.format(d='xxxxxxxxxxxxxxx'))
"""
fixed = """\
if True:
if True:
raise xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx(
'xxxxxxxxxxxxxxxxx "{d}" xxxxxxxxxxxxxx'.format(d='xxxxxxxxxxxxxxx'))
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_with_comment(self):
line = """123
if True:
if True:
if True:
if True:
if True:
if True:
# This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
pass
# http://foo.bar/abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-
# The following is ugly commented-out code and should not be touched.
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx = 1
"""
fixed = """123
if True:
if True:
if True:
if True:
if True:
if True:
# This is a long comment that should be wrapped. I will
# wrap it using textwrap to be within 72 characters.
pass
# http://foo.bar/abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-
# The following is ugly commented-out code and should not be touched.
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx = 1
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_with_comment_should_not_modify_docstring(self):
line = '''\
def foo():
"""
# This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
"""
'''
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(line, result)
def test_e501_should_only_modify_last_comment(self):
line = """123
if True:
if True:
if True:
if True:
if True:
if True:
# This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
# 1. This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
# 2. This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
# 3. This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
"""
fixed = """123
if True:
if True:
if True:
if True:
if True:
if True:
# This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
# 1. This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
# 2. This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
# 3. This is a long comment that should be wrapped. I
# will wrap it using textwrap to be within 72
# characters.
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_should_not_interfere_with_non_comment(self):
line = '''
"""
# not actually a comment %d. 12345678901234567890, 12345678901234567890, 12345678901234567890.
""" % (0,)
'''
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(line, result)
def test_e501_should_cut_comment_pattern(self):
line = """123
# -- Useless lines ----------------------------------------------------------------------
321
"""
fixed = """123
# -- Useless lines -------------------------------------------------------
321
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_with_function_should_not_break_on_colon(self):
line = r"""
class Useless(object):
def _table_field_is_plain_widget(self, widget):
if widget.__class__ == Widget or\
(widget.__class__ == WidgetMeta and Widget in widget.__bases__):
return True
return False
"""
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_e501_should_break_before_tuple_start(self):
line = """\
xxxxxxxxxxxxx(aaaaaaaaaaaaa, bbbbbbbbbbbbbbbbbb, cccccccccc, (dddddddddddddddddddddd, eeeeeeeeeeee, fffffffffff, gggggggggg))
"""
fixed = """\
xxxxxxxxxxxxx(aaaaaaaaaaaaa, bbbbbbbbbbbbbbbbbb, cccccccccc,
(dddddddddddddddddddddd, eeeeeeeeeeee, fffffffffff, gggggggggg))
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_with_aggressive(self):
line = """\
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
}
"""
fixed = """\
models = {
'auth.group': {
'Meta': {
'object_name': 'Group'},
'permissions': (
'django.db.models.fields.related.ManyToManyField',
[],
{
'to': "orm['auth.Permission']",
'symmetrical': 'False',
'blank': 'True'})},
'auth.permission': {
'Meta': {
'ordering': "('content_type__app_label', 'content_type__model', 'codename')",
'unique_together': "(('content_type', 'codename'),)",
'object_name': 'Permission'},
'name': (
'django.db.models.fields.CharField',
[],
{
'max_length': '50'})},
}
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_aggressive_and_multiple_logical_lines(self):
line = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(aaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb, cccccccccccccccccccccccccccc, dddddddddddddddddddddddd)
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(aaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb, cccccccccccccccccccccccccccc, dddddddddddddddddddddddd)
"""
fixed = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb,
cccccccccccccccccccccccccccc,
dddddddddddddddddddddddd)
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb,
cccccccccccccccccccccccccccc,
dddddddddddddddddddddddd)
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_aggressive_and_multiple_logical_lines_with_math(self):
line = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx([-1 + 5 / 10,
100,
-3 - 4])
"""
fixed = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx(
[-1 + 5 / 10, 100, -3 - 4])
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_aggressive_and_import(self):
line = """\
from . import (xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy)
"""
fixed = """\
from . import (
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy)
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_aggressive_and_massive_number_of_logical_lines(self):
"""We do not care about results here.
We just want to know that it doesn't take a ridiculous amount of
time. Caching is currently required to avoid repeately trying
the same line.
"""
line = """\
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from provider.compat import user_model_label
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Client'
db.create_table('oauth2_client', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_model_label])),
('url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('redirect_uri', self.gf('django.db.models.fields.URLField')(max_length=200)),
('client_id', self.gf('django.db.models.fields.CharField')(default='37b581bdc702c732aa65', max_length=255)),
('client_secret', self.gf('django.db.models.fields.CharField')(default='5cf90561f7566aa81457f8a32187dcb8147c7b73', max_length=255)),
('client_type', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('oauth2', ['Client'])
# Adding model 'Grant'
db.create_table('oauth2_grant', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_model_label])),
('client', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['oauth2.Client'])),
('code', self.gf('django.db.models.fields.CharField')(default='f0cda1a5f4ae915431ff93f477c012b38e2429c4', max_length=255)),
('expires', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2012, 2, 8, 10, 43, 45, 620301))),
('redirect_uri', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('scope', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('oauth2', ['Grant'])
# Adding model 'AccessToken'
db.create_table('oauth2_accesstoken', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_model_label])),
('token', self.gf('django.db.models.fields.CharField')(default='b10b8f721e95117cb13c', max_length=255)),
('client', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['oauth2.Client'])),
('expires', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2013, 2, 7, 10, 33, 45, 618854))),
('scope', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('oauth2', ['AccessToken'])
# Adding model 'RefreshToken'
db.create_table('oauth2_refreshtoken', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_model_label])),
('token', self.gf('django.db.models.fields.CharField')(default='84035a870dab7c820c2c501fb0b10f86fdf7a3fe', max_length=255)),
('access_token', self.gf('django.db.models.fields.related.OneToOneField')(related_name='refresh_token', unique=True, to=orm['oauth2.AccessToken'])),
('client', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['oauth2.Client'])),
('expired', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('oauth2', ['RefreshToken'])
def backwards(self, orm):
# Deleting model 'Client'
db.delete_table('oauth2_client')
# Deleting model 'Grant'
db.delete_table('oauth2_grant')
# Deleting model 'AccessToken'
db.delete_table('oauth2_accesstoken')
# Deleting model 'RefreshToken'
db.delete_table('oauth2_refreshtoken')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': user_model_label.split('.')[-1]},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'oauth2.accesstoken': {
'Meta': {'object_name': 'AccessToken'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oauth2.Client']"}),
'expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 7, 10, 33, 45, 624553)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scope': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'d5c1f65020ebdc89f20c'", 'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_model_label})
},
'oauth2.client': {
'Meta': {'object_name': 'Client'},
'client_id': ('django.db.models.fields.CharField', [], {'default': "'306fb26cbcc87dd33cdb'", 'max_length': '255'}),
'client_secret': ('django.db.models.fields.CharField', [], {'default': "'7e5785add4898448d53767f15373636b918cf0e3'", 'max_length': '255'}),
'client_type': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'redirect_uri': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_model_label})
},
'oauth2.grant': {
'Meta': {'object_name': 'Grant'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oauth2.Client']"}),
'code': ('django.db.models.fields.CharField', [], {'default': "'310b2c63e27306ecf5307569dd62340cc4994b73'", 'max_length': '255'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 8, 10, 43, 45, 625956)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'redirect_uri': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'scope': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_model_label})
},
'oauth2.refreshtoken': {
'Meta': {'object_name': 'RefreshToken'},
'access_token': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'refresh_token'", 'unique': 'True', 'to': "orm['oauth2.AccessToken']"}),
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oauth2.Client']"}),
'expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'ef0ab76037f17769ab2975a816e8f41a1c11d25e'", 'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_model_label})
}
}
complete_apps = ['oauth2']
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(''.join(line.split()),
''.join(result.split()))
def test_e501_shorten_comment_with_aggressive(self):
line = """\
# --------- ----------------------------------------------------------------------
"""
fixed = """\
# --------- --------------------------------------------------------------
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_shorten_comment_without_aggressive(self):
"""Do nothing without aggressive."""
line = """\
def foo():
pass
# --------- ----------------------------------------------------------------------
"""
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_e501_with_aggressive_and_escaped_newline(self):
line = """\
if True or \\
False: # test test test test test test test test test test test test test test
pass
"""
fixed = """\
if True or \\
False: # test test test test test test test test test test test test test test
pass
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_aggressive_and_multiline_string(self):
line = """\
print('---------------------------------------------------------------------',
('================================================', '====================='),
'''--------------------------------------------------------------------------------
''')
"""
fixed = """\
print(
'---------------------------------------------------------------------',
('================================================',
'====================='),
'''--------------------------------------------------------------------------------
''')
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_aggressive_and_multiline_string_with_addition(self):
line = '''\
def f():
email_text += """<html>This is a really long docstring that goes over the column limit and is multi-line.<br><br>
<b>Czar: </b>"""+despot["Nicholas"]+"""<br>
<b>Minion: </b>"""+serf["Dmitri"]+"""<br>
<b>Residence: </b>"""+palace["Winter"]+"""<br>
</body>
</html>"""
'''
fixed = '''\
def f():
email_text += """<html>This is a really long docstring that goes over the column limit and is multi-line.<br><br>
<b>Czar: </b>""" + despot["Nicholas"] + """<br>
<b>Minion: </b>""" + serf["Dmitri"] + """<br>
<b>Residence: </b>""" + palace["Winter"] + """<br>
</body>
</html>"""
'''
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_aggressive_and_multiline_string_in_parens(self):
line = '''\
def f():
email_text += ("""<html>This is a really long docstring that goes over the column limit and is multi-line.<br><br>
<b>Czar: </b>"""+despot["Nicholas"]+"""<br>
<b>Minion: </b>"""+serf["Dmitri"]+"""<br>
<b>Residence: </b>"""+palace["Winter"]+"""<br>
</body>
</html>""")
'''
fixed = '''\
def f():
email_text += (
"""<html>This is a really long docstring that goes over the column limit and is multi-line.<br><br>
<b>Czar: </b>""" +
despot["Nicholas"] +
"""<br>
<b>Minion: </b>""" +
serf["Dmitri"] +
"""<br>
<b>Residence: </b>""" +
palace["Winter"] +
"""<br>
</body>
</html>""")
'''
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_aggressive_and_indentation(self):
line = """\
if True:
# comment here
print(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,cccccccccccccccccccccccccccccccccccccccccc)
"""
fixed = """\
if True:
# comment here
print(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,
cccccccccccccccccccccccccccccccccccccccccc)
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_multiple_keys_and_aggressive(self):
line = """\
one_two_three_four_five_six = {'one two three four five': 12345, 'asdfsdflsdkfjl sdflkjsdkfkjsfjsdlkfj sdlkfjlsfjs': '343',
1: 1}
"""
fixed = """\
one_two_three_four_five_six = {
'one two three four five': 12345,
'asdfsdflsdkfjl sdflkjsdkfkjsfjsdlkfj sdlkfjlsfjs': '343',
1: 1}
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_aggressive_and_carriage_returns_only(self):
"""Make sure _find_logical() does not crash."""
line = 'if True:\r from aaaaaaaaaaaaaaaa import bbbbbbbbbbbbbbbbbbb\r \r ccccccccccc = None\r'
fixed = 'if True:\r from aaaaaaaaaaaaaaaa import bbbbbbbbbbbbbbbbbbb\r\r ccccccccccc = None\r'
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_should_ignore_imports(self):
line = """\
import logging, os, bleach, commonware, urllib2, json, time, requests, urlparse, re
"""
with autopep8_context(line, options=['--select=E501']) as result:
self.assertEqual(line, result)
def test_e501_should_not_do_useless_things(self):
line = """\
foo(' ')
"""
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_e501_aggressive_with_percent(self):
line = """\
raise MultiProjectException("Ambiguous workspace: %s=%s, %s" % ( varname, varname_path, os.path.abspath(config_filename)))
"""
fixed = """\
raise MultiProjectException(
"Ambiguous workspace: %s=%s, %s" %
(varname, varname_path, os.path.abspath(config_filename)))
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_aggressive_with_def(self):
line = """\
def foo(sldfkjlsdfsdf, kksdfsdfsf,sdfsdfsdf, sdfsdfkdk, szdfsdfsdf, sdfsdfsdfsdlkfjsdlf, sdfsdfddf,sdfsdfsfd, sdfsdfdsf):
pass
"""
fixed = """\
def foo(sldfkjlsdfsdf, kksdfsdfsf, sdfsdfsdf, sdfsdfkdk, szdfsdfsdf,
sdfsdfsdfsdlkfjsdlf, sdfsdfddf, sdfsdfsfd, sdfsdfdsf):
pass
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_more_aggressive_with_def(self):
line = """\
def foobar(sldfkjlsdfsdf, kksdfsdfsf,sdfsdfsdf, sdfsdfkdk, szdfsdfsdf, sdfsdfsdfsdlkfjsdlf, sdfsdfddf,sdfsdfsfd, sdfsdfdsf):
pass
"""
fixed = """\
def foobar(
sldfkjlsdfsdf,
kksdfsdfsf,
sdfsdfsdf,
sdfsdfkdk,
szdfsdfsdf,
sdfsdfsdfsdlkfjsdlf,
sdfsdfddf,
sdfsdfsfd,
sdfsdfdsf):
pass
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_aggressive_with_tuple(self):
line = """\
def f():
man_this_is_a_very_long_function_name(an_extremely_long_variable_name,
('a string that is long: %s'%'bork'))
"""
fixed = """\
def f():
man_this_is_a_very_long_function_name(
an_extremely_long_variable_name,
('a string that is long: %s' % 'bork'))
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_aggressive_with_tuple_in_list(self):
line = """\
def f(self):
self._xxxxxxxx(aaaaaa, bbbbbbbbb, cccccccccccccccccc,
[('mmmmmmmmmm', self.yyyyyyyyyy.zzzzzzz/_DDDDD)], eee, 'ff')
"""
fixed = """\
def f(self):
self._xxxxxxxx(aaaaaa, bbbbbbbbb, cccccccccccccccccc, [
('mmmmmmmmmm', self.yyyyyyyyyy.zzzzzzz / _DDDDD)], eee, 'ff')
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_aggressive_decorator(self):
line = """\
@foo(('xxxxxxxxxxxxxxxxxxxxxxxxxx', users.xxxxxxxxxxxxxxxxxxxxxxxxxx), ('yyyyyyyyyyyy', users.yyyyyyyyyyyy), ('zzzzzzzzzzzzzz', users.zzzzzzzzzzzzzz))
"""
fixed = """\
@foo(('xxxxxxxxxxxxxxxxxxxxxxxxxx', users.xxxxxxxxxxxxxxxxxxxxxxxxxx),
('yyyyyyyyyyyy', users.yyyyyyyyyyyy), ('zzzzzzzzzzzzzz', users.zzzzzzzzzzzzzz))
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_aggressive_long_class_name(self):
line = """\
class AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA(BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB):
pass
"""
fixed = """\
class AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA(
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB):
pass
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_aggressive_long_comment_and_long_line(self):
line = """\
def foo():
# This is not a novel to be tossed aside lightly. It should be throw with great force.
self.xxxxxxxxx(_('yyyyyyyyyyyyy yyyyyyyyyyyy yyyyyyyy yyyyyyyy y'), 'zzzzzzzzzzzzzzzzzzz', bork='urgent')
"""
fixed = """\
def foo():
# This is not a novel to be tossed aside lightly. It should be throw with
# great force.
self.xxxxxxxxx(
_('yyyyyyyyyyyyy yyyyyyyyyyyy yyyyyyyy yyyyyyyy y'),
'zzzzzzzzzzzzzzzzzzz',
bork='urgent')
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_aggressive_intermingled_comments(self):
line = """\
A = [
# A comment
['aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 'bbbbbbbbbbbbbbbbbbbbbb', 'cccccccccccccccccccccc']
]
"""
fixed = """\
A = [
# A comment
['aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'bbbbbbbbbbbbbbbbbbbbbb',
'cccccccccccccccccccccc']
]
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_if_line_over_limit(self):
line = """\
if not xxxxxxxxxxxx(aaaaaaaaaaaaaaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccc, dddddddddddddddddddddd):
return 1
"""
fixed = """\
if not xxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbb,
cccccccccccccc,
dddddddddddddddddddddd):
return 1
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_for_line_over_limit(self):
line = """\
for aaaaaaaaa in xxxxxxxxxxxx(aaaaaaaaaaaaaaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccc, dddddddddddddddddddddd):
pass
"""
fixed = """\
for aaaaaaaaa in xxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbb,
cccccccccccccc,
dddddddddddddddddddddd):
pass
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_while_line_over_limit(self):
line = """\
while xxxxxxxxxxxx(aaaaaaaaaaaaaaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccc, dddddddddddddddddddddd):
pass
"""
fixed = """\
while xxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbb,
cccccccccccccc,
dddddddddddddddddddddd):
pass
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_avoid_breaking_at_opening_slice(self):
"""Prevents line break on slice notation, dict access in this example:
GYakymOSMc=GYakymOSMW(GYakymOSMJ,GYakymOSMA,GYakymOSMr,GYakymOSMw[
'abc'],GYakymOSMU,GYakymOSMq,GYakymOSMH,GYakymOSMl,svygreNveyvarf=GYakymOSME)
"""
line = """\
GYakymOSMc=GYakymOSMW(GYakymOSMJ,GYakymOSMA,GYakymOSMr,GYakymOSMw['abc'],GYakymOSMU,GYakymOSMq,GYakymOSMH,GYakymOSMl,svygreNveyvarf=GYakymOSME)
"""
fixed = """\
GYakymOSMc = GYakymOSMW(GYakymOSMJ, GYakymOSMA, GYakymOSMr,
GYakymOSMw['abc'], GYakymOSMU, GYakymOSMq, GYakymOSMH, GYakymOSMl, svygreNveyvarf=GYakymOSME)
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_avoid_breaking_at_multi_level_slice(self):
"""Prevents line break on slice notation, dict access in this example:
GYakymOSMc=GYakymOSMW(GYakymOSMJ,GYakymOSMA,GYakymOSMr,GYakymOSMw['abc'][
'def'],GYakymOSMU,GYakymOSMq,GYakymOSMH,GYakymOSMl,svygreNveyvarf=GYakymOSME)
"""
line = """\
GYakymOSMc=GYakymOSMW(GYakymOSMJ,GYakymOSMA,GYakymOSMr,GYakymOSMw['abc']['def'],GYakymOSMU,GYakymOSMq,GYakymOSMH,GYakymOSMl,svygreNveyvarf=GYakymOSME)
"""
fixed = """\
GYakymOSMc = GYakymOSMW(GYakymOSMJ, GYakymOSMA, GYakymOSMr,
GYakymOSMw['abc']['def'], GYakymOSMU, GYakymOSMq, GYakymOSMH, GYakymOSMl, svygreNveyvarf=GYakymOSME)
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e502(self):
line = "print('abc'\\\n 'def')\n"
fixed = "print('abc'\n 'def')\n"
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e701(self):
line = 'if True: print True\n'
fixed = 'if True:\n print True\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e701_with_escaped_newline(self):
line = 'if True:\\\nprint True\n'
fixed = 'if True:\n print True\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e701_with_escaped_newline_and_spaces(self):
line = 'if True: \\ \nprint True\n'
fixed = 'if True:\n print True\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702(self):
line = 'print 1; print 2\n'
fixed = 'print 1\nprint 2\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702_after_colon_should_be_untouched(self):
# https://docs.python.org/2/reference/compound_stmts.html
line = 'def foo(): print(1); print(2)\n'
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_e702_with_semicolon_at_end(self):
line = 'print 1;\n'
fixed = 'print 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702_with_semicolon_and_space_at_end(self):
line = 'print 1; \n'
fixed = 'print 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702_with_whitespace(self):
line = 'print 1 ; print 2\n'
fixed = 'print 1\nprint 2\n'
with autopep8_context(line, options=['--select=E702']) as result:
self.assertEqual(fixed, result)
def test_e702_with_non_ascii_file(self):
line = """\
# -*- coding: utf-8 -*-
# French comment with accent é
# Un commentaire en français avec un accent é
import time
time.strftime('%d-%m-%Y');
"""
fixed = """\
# -*- coding: utf-8 -*-
# French comment with accent é
# Un commentaire en français avec un accent é
import time
time.strftime('%d-%m-%Y')
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702_with_escaped_newline(self):
line = '1; \\\n2\n'
fixed = '1\n2\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702_with_escaped_newline_with_indentation(self):
line = '1; \\\n 2\n'
fixed = '1\n2\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702_more_complicated(self):
line = """\
def foo():
if bar : bar+=1; bar=bar*bar ; return bar
"""
fixed = """\
def foo():
if bar:
bar += 1
bar = bar * bar
return bar
"""
with autopep8_context(line, options=['--select=E,W']) as result:
self.assertEqual(fixed, result)
def test_e702_with_semicolon_in_string(self):
line = 'print(";");\n'
fixed = 'print(";")\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702_with_semicolon_in_string_to_the_right(self):
line = 'x = "x"; y = "y;y"\n'
fixed = 'x = "x"\ny = "y;y"\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702_indent_correctly(self):
line = """\
(
1,
2,
3); 4; 5; 5 # pyflakes
"""
fixed = """\
(
1,
2,
3)
4
5
5 # pyflakes
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702_with_triple_quote(self):
line = '"""\n hello\n """; 1\n'
fixed = '"""\n hello\n """\n1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702_with_triple_quote_and_indent(self):
line = 'def f():\n """\n hello\n """; 1\n'
fixed = 'def f():\n """\n hello\n """\n 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702_with_semicolon_after_string(self):
line = """\
raise IOError('abc '
'def.');
"""
fixed = """\
raise IOError('abc '
'def.')
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702_with_dict_semicolon(self):
line = """\
MY_CONST = [
{'A': 1},
{'B': 2}
];
"""
fixed = """\
MY_CONST = [
{'A': 1},
{'B': 2}
]
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e703_with_inline_comment(self):
line = 'a = 5; # inline comment\n'
fixed = 'a = 5 # inline comment\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e703_in_example_of_readme(self):
line = """\
def example2(): return ('' in {'f': 2}) in {'has_key() is deprecated': True};
"""
fixed = """\
def example2(): return ('' in {'f': 2}) in {'has_key() is deprecated': True}
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e704(self):
line = 'def f(x): return 2*x\n'
fixed = 'def f(x):\n return 2 * x\n'
with autopep8_context(line, options=['-aaa']) as result:
self.assertEqual(fixed, result)
def test_e704_not_work_with_aa_option(self):
line = 'def f(x): return 2*x\n'
with autopep8_context(line, options=['-aa', '--select=E704']) as result:
self.assertEqual(line, result)
def test_e711(self):
line = 'foo == None\n'
fixed = 'foo is None\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
line = 'None == foo\n'
fixed = 'None is foo\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e711_in_conditional(self):
line = 'if foo == None and None == foo:\npass\n'
fixed = 'if foo is None and None is foo:\npass\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e711_in_conditional_with_multiple_instances(self):
line = 'if foo == None and bar == None:\npass\n'
fixed = 'if foo is None and bar is None:\npass\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e711_with_not_equals_none(self):
line = 'foo != None\n'
fixed = 'foo is not None\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e712(self):
line = 'foo == True\n'
fixed = 'foo\n'
with autopep8_context(line,
options=['-aa', '--select=E712']) as result:
self.assertEqual(fixed, result)
def test_e712_in_conditional_with_multiple_instances(self):
line = 'if foo == True and bar == True:\npass\n'
fixed = 'if foo and bar:\npass\n'
with autopep8_context(line,
options=['-aa', '--select=E712']) as result:
self.assertEqual(fixed, result)
def test_e712_with_false(self):
line = 'foo != False\n'
fixed = 'foo\n'
with autopep8_context(line,
options=['-aa', '--select=E712']) as result:
self.assertEqual(fixed, result)
def test_e712_with_special_case_equal_not_true(self):
line = 'if foo != True:\n pass\n'
fixed = 'if not foo:\n pass\n'
with autopep8_context(line,
options=['-aa', '--select=E712']) as result:
self.assertEqual(fixed, result)
def test_e712_with_special_case_equal_false(self):
line = 'if foo == False:\n pass\n'
fixed = 'if not foo:\n pass\n'
with autopep8_context(line,
options=['-aa', '--select=E712']) as result:
self.assertEqual(fixed, result)
def test_e712_with_dict_value(self):
line = 'if d["key"] != True:\n pass\n'
fixed = 'if not d["key"]:\n pass\n'
with autopep8_context(line,
options=['-aa', '--select=E712']) as result:
self.assertEqual(fixed, result)
def test_e712_only_if_aggressive_level_2(self):
line = 'foo == True\n'
with autopep8_context(line, options=['-a']) as result:
self.assertEqual(line, result)
def test_e711_and_e712(self):
line = 'if (foo == None and bar == True) or (foo != False and bar != None):\npass\n'
fixed = 'if (foo is None and bar) or (foo and bar is not None):\npass\n'
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e713(self):
line = 'if not x in y:\n pass\n'
fixed = 'if x not in y:\n pass\n'
with autopep8_context(line,
options=['-aa', '--select=E713']) as result:
self.assertEqual(fixed, result)
def test_e713_more(self):
line = 'if not "." in y:\n pass\n'
fixed = 'if "." not in y:\n pass\n'
with autopep8_context(line,
options=['-aa', '--select=E713']) as result:
self.assertEqual(fixed, result)
def test_e713_with_in(self):
line = 'if not "." in y and "," in y:\n pass\n'
fixed = 'if "." not in y and "," in y:\n pass\n'
with autopep8_context(line,
options=['-aa', '--select=E713']) as result:
self.assertEqual(fixed, result)
def test_e713_with_tuple(self):
line = """
if not role in ("domaincontroller_master",
"domaincontroller_backup",
"domaincontroller_slave",
"memberserver",
):
pass
"""
fixed = """
if role not in ("domaincontroller_master",
"domaincontroller_backup",
"domaincontroller_slave",
"memberserver",
):
pass
"""
with autopep8_context(line,
options=['-aa', '--select=E713']) as result:
self.assertEqual(fixed, result)
def test_e713_chain(self):
line = 'if "@" not in x or not "/" in y:\n pass\n'
fixed = 'if "@" not in x or "/" not in y:\n pass\n'
with autopep8_context(line,
options=['-aa', '--select=E713']) as result:
self.assertEqual(fixed, result)
def test_e713_chain2(self):
line = 'if "@" not in x or "[" not in x or not "/" in y:\n pass\n'
fixed = 'if "@" not in x or "[" not in x or "/" not in y:\n pass\n'
with autopep8_context(line,
options=['-aa', '--select=E713']) as result:
self.assertEqual(fixed, result)
def test_e713_chain3(self):
line = 'if not "@" in x or "[" not in x or not "/" in y:\n pass\n'
fixed = 'if "@" not in x or "[" not in x or "/" not in y:\n pass\n'
with autopep8_context(line,
options=['-aa', '--select=E713']) as result:
self.assertEqual(fixed, result)
def test_e713_chain4(self):
line = 'if not "." in y and not "," in y:\n pass\n'
fixed = 'if "." not in y and "," not in y:\n pass\n'
with autopep8_context(line,
options=['-aa', '--select=E713']) as result:
self.assertEqual(fixed, result)
def test_e714(self):
line = 'if not x is y:\n pass\n'
fixed = 'if x is not y:\n pass\n'
with autopep8_context(line,
options=['-aa', '--select=E714']) as result:
self.assertEqual(fixed, result)
def test_e714_with_is(self):
line = 'if not x is y or x is z:\n pass\n'
fixed = 'if x is not y or x is z:\n pass\n'
with autopep8_context(line,
options=['-aa', '--select=E714']) as result:
self.assertEqual(fixed, result)
def test_e714_chain(self):
line = 'if not x is y or not x is z:\n pass\n'
fixed = 'if x is not y or x is not z:\n pass\n'
with autopep8_context(line,
options=['-aa', '--select=E714']) as result:
self.assertEqual(fixed, result)
def test_e713_and_e714(self):
line = """
if not x is y:
pass
if not role in ("domaincontroller_master",
"domaincontroller_backup",
"domaincontroller_slave",
"memberserver",
):
pass
"""
fixed = """
if x is not y:
pass
if role not in ("domaincontroller_master",
"domaincontroller_backup",
"domaincontroller_slave",
"memberserver",
):
pass
"""
with autopep8_context(line,
options=['-aa', '--select=E713,E714']) as result:
self.assertEqual(fixed, result)
def test_e713_with_single_quote(self):
line = "if not 'DC IP' in info:\n"
fixed = "if 'DC IP' not in info:\n"
with autopep8_context(line,
options=['-aa', '--select=E713,E714']) as result:
self.assertEqual(fixed, result)
def test_e714_with_single_quote(self):
line = "if not 'DC IP' is info:\n"
fixed = "if 'DC IP' is not info:\n"
with autopep8_context(line,
options=['-aa', '--select=E713,E714']) as result:
self.assertEqual(fixed, result)
def test_e721(self):
line = "type('') == type('')\n"
fixed = "isinstance('', type(''))\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e721_with_str(self):
line = "str == type('')\n"
fixed = "isinstance('', str)\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e721_in_conditional(self):
line = "if str == type(''):\n pass\n"
fixed = "if isinstance('', str):\n pass\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e722(self):
line = "try:\n print(a)\nexcept:\n pass\n"
fixed = "try:\n print(a)\nexcept BaseException:\n pass\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e722_with_if_else_stmt(self):
line = "try:\n print(a)\nexcept:\n if a==b:\n print(a)\n else:\n print(b)\n"
fixed = "try:\n print(a)\nexcept BaseException:\n if a == b:\n print(a)\n else:\n print(b)\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e722_non_aggressive(self):
line = "try:\n print(a)\nexcept:\n pass\n"
with autopep8_context(line, options=[]) as result:
self.assertEqual(line, result)
def test_e731(self):
line = 'a = lambda x: x * 2\n'
fixed = 'def a(x): return x * 2\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e731_no_arg(self):
line = 'a = lambda: x * 2\n'
fixed = 'def a(): return x * 2\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e731_with_tuple_arg(self):
line = 'a = lambda (x, y), z: x * 2\n'
fixed = 'def a((x, y), z): return x * 2\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e731_with_args(self):
line = 'a = lambda x, y: x * 2 + y\n'
fixed = 'def a(x, y): return x * 2 + y\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e731_with_select_option(self):
line = 'a = lambda x: x * 2\n'
fixed = 'def a(x): return x * 2\n'
with autopep8_context(line, options=['--select=E731']) as result:
self.assertEqual(fixed, result)
def test_e731_with_default_arguments(self):
line = 'a = lambda k, d=None: bar.get("%s/%s" % (prefix, k), d)\n'
fixed = 'def a(k, d=None): return bar.get("%s/%s" % (prefix, k), d)\n'
with autopep8_context(line, options=['--select=E731']) as result:
self.assertEqual(fixed, result)
def test_e901_should_cause_indentation_screw_up(self):
line = """\
def tmp(g):
g(4)))
if not True:
pass
pass
"""
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_should_preserve_vertical_tab(self):
line = """\
#Memory Bu\vffer Register:
"""
fixed = """\
# Memory Bu\vffer Register:
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_w191_should_ignore_multiline_strings(self):
line = """\
print(3 <> 4, '''
while True:
if True:
\t1
\t''', 4 <> 5)
if True:
\t123
"""
fixed = """\
print(3 != 4, '''
while True:
if True:
\t1
\t''', 4 != 5)
if True:
123
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w191_should_ignore_tabs_in_strings(self):
line = """\
if True:
\tx = '''
\t\tblah
\tif True:
\t1
\t'''
if True:
\t123
else:
\t32
"""
fixed = """\
if True:
x = '''
\t\tblah
\tif True:
\t1
\t'''
if True:
123
else:
32
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w291(self):
line = "print 'a b '\t \n"
fixed = "print 'a b '\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w291_with_comment(self):
line = "print 'a b ' # comment\t \n"
fixed = "print 'a b ' # comment\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w292(self):
line = '1\n2'
fixed = '1\n2\n'
with autopep8_context(line, options=['--aggressive',
'--select=W292']) as result:
self.assertEqual(fixed, result)
def test_w293(self):
line = '1\n \n2\n'
fixed = '1\n\n2\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w391(self):
line = ' \n'
fixed = ''
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w391_more_complex(self):
line = '123\n456\n \n'
fixed = '123\n456\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w503(self):
line = '(width == 0\n + height == 0)\n'
fixed = '(width == 0 +\n height == 0)\n'
with autopep8_context(line, options=['--select=W503']) as result:
self.assertEqual(fixed, result)
def test_w503_with_ignore_w504(self):
line = '(width == 0\n + height == 0)\n'
fixed = '(width == 0 +\n height == 0)\n'
with autopep8_context(line, options=['--ignore=E,W504']) as result:
self.assertEqual(fixed, result)
def test_w504_with_ignore_w503(self):
line = '(width == 0 +\n height == 0)\n'
fixed = '(width == 0\n + height == 0)\n'
with autopep8_context(line, options=['--ignore=E,W503']) as result:
self.assertEqual(fixed, result)
def test_w503_w504_none_ignored(self):
line = '(width == 0 +\n height == 0\n+ depth == 0)\n'
fixed = '(width == 0 +\n height == 0\n+ depth == 0)\n'
with autopep8_context(line, options=['--ignore=E']) as result:
self.assertEqual(fixed, result)
def test_w503_w504_both_ignored(self):
line = '(width == 0 +\n height == 0\n+ depth == 0)\n'
fixed = '(width == 0 +\n height == 0\n+ depth == 0)\n'
with autopep8_context(
line, options=['--ignore=E,W503, W504'],
) as result:
self.assertEqual(fixed, result)
def test_w503_skip_default(self):
line = '(width == 0\n + height == 0)\n'
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_w503_and_or(self):
line = '(width == 0\n and height == 0\n or name == "")\n'
fixed = '(width == 0 and\n height == 0 or\n name == "")\n'
with autopep8_context(line, options=['--select=W503']) as result:
self.assertEqual(fixed, result)
def test_w503_with_comment(self):
line = '(width == 0 # this is comment\n + height == 0)\n'
fixed = '(width == 0 + # this is comment\n height == 0)\n'
with autopep8_context(line, options=['--select=W503']) as result:
self.assertEqual(fixed, result)
def test_w503_with_comment_into_point_out_line(self):
line = """\
def test():
return (
True not in []
and False # comment required
)
"""
fixed = """\
def test():
return (
True not in [] and
False # comment required
)
"""
with autopep8_context(line, options=['--select=W503']) as result:
self.assertEqual(fixed, result)
def test_w503_with_comment_double(self):
line = """\
(
1111 # C1
and 22222222 # C2
and 333333333333 # C3
)
"""
fixed = """\
(
1111 and # C1
22222222 and # C2
333333333333 # C3
)
"""
with autopep8_context(line, options=['--select=W503']) as result:
self.assertEqual(fixed, result)
def test_w503_with_comment_with_only_comment_block_charactor(self):
line = """\
if (True #
and True
and True):
print(1)
"""
fixed = """\
if (True and #
True and
True):
print(1)
"""
with autopep8_context(line, options=['--select=W503']) as result:
self.assertEqual(fixed, result)
def test_w503_over_5lines(self):
line = """\
X = (
1 # 1
+ 2 # 2
+ 3 # 3
+ 4 # 4
+ 5 # 5
+ 6 # 6
+ 7 # 7
)
"""
fixed = """\
X = (
1 + # 1
2 + # 2
3 + # 3
4 + # 4
5 + # 5
6 + # 6
7 # 7
)
"""
with autopep8_context(line, options=['--select=W503']) as result:
self.assertEqual(fixed, result)
def test_w503_with_line_comment(self):
line = '(width == 0\n # this is comment\n + height == 0)\n'
fixed = '(width == 0 +\n # this is comment\n height == 0)\n'
with autopep8_context(line, options=['--select=W503', '--ignore=E']) as result:
self.assertEqual(fixed, result)
def test_w503_with_empty_line(self):
line = """\
# this is comment
a = 2
b = (1 +
2 +
3) / 2.0
"""
fixed = """\
# this is comment
a = 2
b = (1 +
2 +
3) / 2.0
"""
with autopep8_context(line, options=['--ignore=E721']) as result:
self.assertEqual(fixed, result)
def test_w503_with_line_comments(self):
line = '(width == 0\n # this is comment\n # comment2\n + height == 0)\n'
fixed = '(width == 0 +\n # this is comment\n # comment2\n height == 0)\n'
with autopep8_context(line, options=['--select=W503', '--ignore=E']) as result:
self.assertEqual(fixed, result)
def test_ignore_only_w503_with_select_w(self):
line = """\
a = (
11 + 22 +
33 +
44
+ 55
)
"""
fixed = """\
a = (
11 + 22
+ 33
+ 44
+ 55
)
"""
with autopep8_context(line, options=['--select=W', '--ignore=W503']) as result:
self.assertEqual(fixed, result)
with autopep8_context(line, options=['--select=W5', '--ignore=W503']) as result:
self.assertEqual(fixed, result)
with autopep8_context(line, options=['--select=W50', '--ignore=W503']) as result:
self.assertEqual(fixed, result)
def test_ignore_only_w504_with_select_w(self):
line = """\
a = (
11 + 22 +
33 +
44
+ 55
)
"""
fixed = """\
a = (
11 + 22 +
33 +
44 +
55
)
"""
with autopep8_context(line, options=['--select=W', '--ignore=W504']) as result:
self.assertEqual(fixed, result)
with autopep8_context(line, options=['--select=W5', '--ignore=W504']) as result:
self.assertEqual(fixed, result)
with autopep8_context(line, options=['--select=W50', '--ignore=W504']) as result:
self.assertEqual(fixed, result)
def test_ignore_w503_and_w504_with_select_w(self):
line = """\
a = (
11 + 22 +
33 +
44
+ 55
)
"""
with autopep8_context(line, options=['--select=W', '--ignore=W503,W504']) as result:
self.assertEqual(line, result)
with autopep8_context(line, options=['--select=W5', '--ignore=W503,W504']) as result:
self.assertEqual(line, result)
with autopep8_context(line, options=['--select=W50', '--ignore=W503,W504']) as result:
self.assertEqual(line, result)
def test_w504(self):
line = '(width == 0 +\n height == 0)\n'
fixed = '(width == 0\n + height == 0)\n'
with autopep8_context(line, options=['--select=W504', '--ignore=E']) as result:
self.assertEqual(fixed, result)
def test_w504_comment_on_first_line(self):
line = 'x = (1 | # test\n2)\n'
fixed = 'x = (1 # test\n| 2)\n'
with autopep8_context(line, options=['--select=W504', '--ignore=E']) as result:
self.assertEqual(fixed, result)
def test_w504_comment_on_second_line(self):
line = 'x = (1 |\n2) # test\n'
fixed = 'x = (1\n| 2) # test\n'
with autopep8_context(line, options=['--select=W504', '--ignore=E']) as result:
self.assertEqual(fixed, result)
def test_w504_comment_on_each_lines(self):
line = 'x = (1 |# test\n2 |# test\n3) # test\n'
fixed = 'x = (1# test\n| 2# test\n| 3) # test\n'
with autopep8_context(line, options=['--select=W504', '--ignore=E']) as result:
self.assertEqual(fixed, result)
def test_w504_with_e265_ignore_option(self):
line = '(width == 0 +\n height == 0)\n'
with autopep8_context(line, options=['--ignore=E265']) as result:
self.assertEqual(line, result)
def test_w504_with_e265_ignore_option_regression(self):
line = """\
if True:
if True:
if (
link.is_wheel and
isinstance(link.comes_from, HTMLPage) and
link.comes_from.url.startswith(index_url)
):
_store_wheel_in_cache(file_path, index_url)
"""
with autopep8_context(line, options=['--ignore=E265']) as result:
self.assertEqual(line, result)
def test_w504_with_line_comment(self):
line = '(width == 0 +\n # this is comment\n height == 0)\n'
fixed = '(width == 0\n # this is comment\n + height == 0)\n'
with autopep8_context(line, options=['--select=W504', '--ignore=E']) as result:
self.assertEqual(fixed, result)
def test_w504_not_applied_by_default_when_modifying_with_ignore(self):
line = """\
q = 1
def x(y, z):
if (
y and
z
):
pass
"""
fixed = line.replace('\n\n\n\n', '\n\n')
with autopep8_context(line, options=['--ignore=E265']) as result:
self.assertEqual(fixed, result)
def test_w503_and_w504_conflict(self):
line = """\
if True:
if True:
assert_equal(self.nodes[0].getbalance(
), bal + Decimal('50.00000000') + Decimal('2.19000000')) # block reward + tx
"""
fixed = """\
if True:
if True:
assert_equal(
self.nodes[0].getbalance(),
bal +
Decimal('50.00000000') +
Decimal('2.19000000')) # block reward + tx
"""
with autopep8_context(line, options=['-aa', '--select=E,W']) as result:
self.assertEqual(fixed, result)
with autopep8_context(line, options=['-aa', '--select=E,W5']) as result:
self.assertEqual(fixed, result)
with autopep8_context(line, options=['-aa', '--select=E,W50']) as result:
self.assertEqual(fixed, result)
def test_w601(self):
line = 'a = {0: 1}\na.has_key(0)\n'
fixed = 'a = {0: 1}\n0 in a\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w601_word(self):
line = 'my_dict = {0: 1}\nmy_dict.has_key(0)\n'
fixed = 'my_dict = {0: 1}\n0 in my_dict\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w601_conditional(self):
line = 'a = {0: 1}\nif a.has_key(0):\n print 1\n'
fixed = 'a = {0: 1}\nif 0 in a:\n print 1\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w601_self(self):
line = 'self.a.has_key(0)\n'
fixed = '0 in self.a\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w601_self_with_conditional(self):
line = 'if self.a.has_key(0):\n print 1\n'
fixed = 'if 0 in self.a:\n print 1\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w601_with_multiple(self):
line = 'a.has_key(0) and b.has_key(0)\n'
fixed = '0 in a and 0 in b\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w601_with_multiple_nested(self):
line = 'alpha.has_key(nested.has_key(12)) and beta.has_key(1)\n'
fixed = '(12 in nested) in alpha and 1 in beta\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w601_with_more_complexity(self):
line = 'y.has_key(0) + x.has_key(x.has_key(0) + x.has_key(x.has_key(0) + x.has_key(1)))\n'
fixed = '(0 in y) + ((0 in x) + ((0 in x) + (1 in x) in x) in x)\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w601_precedence(self):
line = 'if self.a.has_key(1 + 2):\n print 1\n'
fixed = 'if 1 + 2 in self.a:\n print 1\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w601_with_parens(self):
line = 'foo(12) in alpha\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(line, result)
def test_w601_with_multiline(self):
line = """\
a.has_key(
0
)
"""
fixed = '0 in a\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w601_with_non_ascii(self):
line = """\
# -*- coding: utf-8 -*-
## éはe
correct = dict().has_key('good syntax ?')
"""
fixed = """\
# -*- coding: utf-8 -*-
# éはe
correct = 'good syntax ?' in dict()
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_arg_is_string(self):
line = "raise ValueError, \"w602 test\"\n"
fixed = "raise ValueError(\"w602 test\")\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_arg_is_string_with_comment(self):
line = "raise ValueError, \"w602 test\" # comment\n"
fixed = "raise ValueError(\"w602 test\") # comment\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_skip_ambiguous_case(self):
line = "raise 'a', 'b', 'c'\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(line, result)
def test_w602_with_logic(self):
line = "raise TypeError, e or 'hello'\n"
fixed = "raise TypeError(e or 'hello')\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_triple_quotes(self):
line = 'raise ValueError, """hello"""\n1\n'
fixed = 'raise ValueError("""hello""")\n1\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_multiline(self):
line = 'raise ValueError, """\nhello"""\n'
fixed = 'raise ValueError("""\nhello""")\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_with_complex_multiline(self):
line = 'raise ValueError, """\nhello %s %s""" % (\n 1, 2)\n'
fixed = 'raise ValueError("""\nhello %s %s""" % (\n 1, 2))\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_multiline_with_trailing_spaces(self):
line = 'raise ValueError, """\nhello""" \n'
fixed = 'raise ValueError("""\nhello""")\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_multiline_with_escaped_newline(self):
line = 'raise ValueError, \\\n"""\nhello"""\n'
fixed = 'raise ValueError("""\nhello""")\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_multiline_with_escaped_newline_and_comment(self):
line = 'raise ValueError, \\\n"""\nhello""" # comment\n'
fixed = 'raise ValueError("""\nhello""") # comment\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_multiline_with_multiple_escaped_newlines(self):
line = 'raise ValueError, \\\n\\\n\\\n"""\nhello"""\n'
fixed = 'raise ValueError("""\nhello""")\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_multiline_with_nested_quotes(self):
line = 'raise ValueError, """hello\'\'\'blah"a"b"c"""\n'
fixed = 'raise ValueError("""hello\'\'\'blah"a"b"c""")\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_with_multiline_with_single_quotes(self):
line = "raise ValueError, '''\nhello'''\n"
fixed = "raise ValueError('''\nhello''')\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_multiline_string_stays_the_same(self):
line = 'raise """\nhello"""\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(line, result)
def test_w602_escaped_lf(self):
line = 'raise ValueError, \\\n"hello"\n'
fixed = 'raise ValueError("hello")\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_escaped_crlf(self):
line = 'raise ValueError, \\\r\n"hello"\r\n'
fixed = 'raise ValueError("hello")\r\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_indentation(self):
line = 'def foo():\n raise ValueError, "hello"\n'
fixed = 'def foo():\n raise ValueError("hello")\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_escaped_cr(self):
line = 'raise ValueError, \\\r"hello"\n\n'
fixed = 'raise ValueError("hello")\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_multiple_statements(self):
line = 'raise ValueError, "hello";print 1\n'
fixed = 'raise ValueError("hello")\nprint 1\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_raise_argument_with_indentation(self):
line = 'if True:\n raise ValueError, "error"\n'
fixed = 'if True:\n raise ValueError("error")\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_skip_raise_argument_triple(self):
line = 'raise ValueError, "info", traceback\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(line, result)
def test_w602_skip_raise_argument_triple_with_comment(self):
line = 'raise ValueError, "info", traceback # comment\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(line, result)
def test_w602_raise_argument_triple_fake(self):
line = 'raise ValueError, "info, info2"\n'
fixed = 'raise ValueError("info, info2")\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_with_list_comprehension(self):
line = 'raise Error, [x[0] for x in probs]\n'
fixed = 'raise Error([x[0] for x in probs])\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_with_bad_syntax(self):
line = "raise Error, 'abc\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(line, result)
def test_w602_invalid_2to3_fixed_case(self):
line = """\
raise (ValueError
if True else TypeError)
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(line, result)
@unittest.skip('TODO')
def test_w602_invalid_2to3_fixed_case_with_valid_syntax(self):
line = """\
raise (ValueError
if True else TypeError)
raise ValueError, "error"
"""
fixed = """\
raise (ValueError
if True else TypeError)
raise ValueError("error")
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w603(self):
line = 'if 2 <> 2:\n print False'
fixed = 'if 2 != 2:\n print False\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w604(self):
line = '`1`\n'
fixed = 'repr(1)\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w604_with_multiple_instances(self):
line = '``1`` + ``b``\n'
fixed = 'repr(repr(1)) + repr(repr(b))\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w604_with_multiple_lines(self):
line = '`(1\n )`\n'
fixed = 'repr((1\n ))\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w605_simple(self):
line = "escape = '\\.jpg'\n"
fixed = "escape = r'\\.jpg'\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w605_identical_token(self):
# ***NOTE***: The --pep8-passes option is requred to prevent an infinite loop in
# the old, failing code. DO NOT REMOVE.
line = "escape = foo('\\.bar', '\\.kilroy')\n"
fixed = "escape = foo(r'\\.bar', r'\\.kilroy')\n"
with autopep8_context(line, options=['--aggressive', '--pep8-passes', '5']) as result:
self.assertEqual(fixed, result, "Two tokens get r added")
line = "escape = foo('\\.bar', r'\\.kilroy')\n"
fixed = "escape = foo(r'\\.bar', r'\\.kilroy')\n"
with autopep8_context(line, options=['--aggressive', '--pep8-passes', '5']) as result:
self.assertEqual(fixed, result, "r not added if already there")
# Test Case to catch bad behavior reported in Issue #449
line = "escape = foo('\\.bar', '\\.bar')\n"
fixed = "escape = foo(r'\\.bar', r'\\.bar')\n"
with autopep8_context(line, options=['--aggressive', '--pep8-passes', '5']) as result:
self.assertEqual(fixed, result)
def test_w605_with_invalid_syntax(self):
line = "escape = rr'\\.jpg'\n"
fixed = "escape = rr'\\.jpg'\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_trailing_whitespace_in_multiline_string(self):
line = 'x = """ \nhello""" \n'
fixed = 'x = """ \nhello"""\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_trailing_whitespace_in_multiline_string_aggressive(self):
line = 'x = """ \nhello""" \n'
fixed = 'x = """\nhello"""\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_execfile_in_lambda_should_not_be_modified(self):
"""Modifying this to the exec() form is invalid in Python 2."""
line = 'lambda: execfile("foo.py")\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(line, result)
# FIXME: These tests should use multiline strings for readability.
def test_range(self):
line = 'print( 1 )\nprint( 2 )\n print( 3 )\n'
fixed = 'print( 1 )\nprint(2)\n print( 3 )\n'
with autopep8_context(line, options=['--line-range', '2', '2']) as result:
self.assertEqual(fixed, result)
def test_range_line_number_changes_from_one_line(self):
line = 'a=12\na=1; b=2;c=3\nd=4;\n\ndef f(a = 1):\n pass\n'
fixed = 'a=12\na = 1\nb = 2\nc = 3\nd=4;\n\ndef f(a = 1):\n pass\n'
with autopep8_context(line, options=['--line-range', '2', '2']) as result:
self.assertEqual(fixed, result)
def test_range_indent_changes_small_range(self):
line = '\nif True:\n (1, \n 2,\n3)\nelif False:\n a = 1\nelse:\n a = 2\n\nc = 1\nif True:\n c = 2\n a = (1,\n2)\n'
fixed2_5 = '\nif True:\n (1,\n 2,\n 3)\nelif False:\n a = 1\nelse:\n a = 2\n\nc = 1\nif True:\n c = 2\n a = (1,\n2)\n'
with autopep8_context(line, options=['--line-range', '2', '5']) as result:
self.assertEqual(fixed2_5, result)
def test_range_indent_deep_if_blocks_first_block(self):
line = '\nif a:\n if a = 1:\n b = 1\n else:\n b = 2\nelif a == 0:\n b = 3\nelse:\n b = 4\n'
with autopep8_context(line, options=['--line-range', '2', '5']) as result:
self.assertEqual(line, result)
def test_range_indent_deep_if_blocks_second_block(self):
line = '\nif a:\n if a = 1:\n b = 1\n else:\n b = 2\nelif a == 0:\n b = 3\nelse:\n b = 4\n'
with autopep8_context(line, options=['--line-range', '6', '9']) as result:
self.assertEqual(line, result)
def test_range_indent_continued_statements_partial(self):
line = '\nif a == 1:\n\ttry:\n\t foo\n\texcept AttributeError:\n\t pass\n\telse:\n\t "nooo"\n\tb = 1\n'
with autopep8_context(line, options=['--line-range', '2', '6']) as result:
self.assertEqual(line, result)
def test_range_indent_continued_statements_last_block(self):
line = '\nif a == 1:\n\ttry:\n\t foo\n\texcept AttributeError:\n\t pass\n\telse:\n\t "nooo"\n\tb = 1\n'
with autopep8_context(line, options=['--line-range', '6', '9']) as result:
self.assertEqual(line, result)
def test_range_with_broken_syntax(self):
line = """\
if True:
if True:
pass
else:
pass
"""
with autopep8_context(line, options=['--line-range', '1', '1']) as result:
self.assertEqual(line, result)
def test_long_import_line(self):
line = """\
s
from t import a, \
bbbbbbbbbbbbbbbbbbbbbbbbbbbbb, ccccccccccccccccccccccccccccccc, ddddddddddddddddddddddddddddddddddd
"""
fixed = """\
from t import a, \
bbbbbbbbbbbbbbbbbbbbbbbbbbbbb, ccccccccccccccccccccccccccccccc, ddddddddddddddddddddddddddddddddddd
s
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_exchange_multiple_imports_with_def(self):
line = """\
def f(n):
return n
from a import fa
from b import fb
from c import fc
"""
with autopep8_context(line) as result:
self.assertEqual(result[:4], 'from')
class UtilityFunctionTests(unittest.TestCase):
def test_get_module_imports(self):
line = """\
import os
import sys
if True:
print(1)
"""
target_line_index = 8
result = get_module_imports_on_top_of_file(line.splitlines(),
target_line_index)
self.assertEqual(result, 0)
def test_get_module_imports_case_of_autopep8(self):
line = """\
#!/usr/bin/python
# comment
# comment
'''this module ...
this module ...
'''
import os
import sys
if True:
print(1)
"""
target_line_index = 11
result = get_module_imports_on_top_of_file(line.splitlines(),
target_line_index)
self.assertEqual(result, 10)
class CommandLineTests(unittest.TestCase):
maxDiff = None
def test_diff(self):
line = "'abc' \n"
fixed = "-'abc' \n+'abc'\n"
with autopep8_subprocess(line, ['--diff']) as (result, retcode):
self.assertEqual(fixed, '\n'.join(result.split('\n')[3:]))
self.assertEqual(retcode, autopep8.EXIT_CODE_OK)
def test_diff_with_exit_code_option(self):
line = "'abc' \n"
fixed = "-'abc' \n+'abc'\n"
with autopep8_subprocess(line, ['--diff', '--exit-code']) as (result, retcode):
self.assertEqual(fixed, '\n'.join(result.split('\n')[3:]))
self.assertEqual(retcode, autopep8.EXIT_CODE_EXISTS_DIFF)
def test_non_diff_with_exit_code_option(self):
line = "'abc'\n"
with autopep8_subprocess(line, ['--diff', '--exit-code']) as (result, retcode):
self.assertEqual('', '\n'.join(result.split('\n')[3:]))
self.assertEqual(retcode, autopep8.EXIT_CODE_OK)
def test_diff_with_empty_file(self):
with autopep8_subprocess('', ['--diff']) as (result, retcode):
self.assertEqual('\n'.join(result.split('\n')[3:]), '')
self.assertEqual(retcode, autopep8.EXIT_CODE_OK)
def test_diff_with_nonexistent_file(self):
p = Popen(list(AUTOPEP8_CMD_TUPLE) + ['--diff', 'non_existent_file'],
stdout=PIPE, stderr=PIPE)
error = p.communicate()[1].decode('utf-8')
self.assertIn('non_existent_file', error)
def test_diff_with_standard_in(self):
p = Popen(list(AUTOPEP8_CMD_TUPLE) + ['--diff', '-'],
stdout=PIPE, stderr=PIPE)
error = p.communicate()[1].decode('utf-8')
self.assertIn('cannot', error)
def test_pep8_passes(self):
line = "'abc' \n"
fixed = "'abc'\n"
with autopep8_subprocess(line, ['--pep8-passes', '0']) as (result, retcode):
self.assertEqual(fixed, result)
self.assertEqual(retcode, autopep8.EXIT_CODE_OK)
def test_pep8_ignore(self):
line = "'abc' \n"
with autopep8_subprocess(line, ['--ignore=E,W']) as (result, retcode):
self.assertEqual(line, result)
self.assertEqual(retcode, autopep8.EXIT_CODE_OK)
def test_pep8_ignore_should_handle_trailing_comma_gracefully(self):
line = "'abc' \n"
fixed = "'abc'\n"
with autopep8_subprocess(line, ['--ignore=,']) as (result, retcode):
self.assertEqual(fixed, result)
self.assertEqual(retcode, autopep8.EXIT_CODE_OK)
def test_help(self):
p = Popen(list(AUTOPEP8_CMD_TUPLE) + ['-h'],
stdout=PIPE)
self.assertIn('usage:', p.communicate()[0].decode('utf-8').lower())
def test_verbose(self):
line = 'bad_syntax)'
with temporary_file_context(line) as filename:
p = Popen(list(AUTOPEP8_CMD_TUPLE) + [filename, '-vvv'],
stdout=PIPE, stderr=PIPE)
verbose_error = p.communicate()[1].decode('utf-8')
self.assertIn("'fix_e901' is not defined", verbose_error)
def test_verbose_diff(self):
line = '+'.join(100 * ['323424234234'])
with temporary_file_context(line) as filename:
p = Popen(list(AUTOPEP8_CMD_TUPLE) +
[filename, '-vvvv', '--diff'],
stdout=PIPE, stderr=PIPE)
verbose_error = p.communicate()[1].decode('utf-8')
self.assertIn('------------', verbose_error)
def test_in_place(self):
line = "'abc' \n"
fixed = "'abc'\n"
with temporary_file_context(line) as filename:
p = Popen(list(AUTOPEP8_CMD_TUPLE) + [filename, '--in-place'])
p.wait()
with open(filename) as f:
self.assertEqual(fixed, f.read())
self.assertEqual(p.returncode, autopep8.EXIT_CODE_OK)
def test_in_place_no_modifications_no_writes(self):
with temporary_file_context('import os\n') as filename:
# ensure that noops do not do writes by making writing an error
os.chmod(filename, 0o444)
p = Popen(
list(AUTOPEP8_CMD_TUPLE) + [filename, '--in-place'],
stderr=PIPE,
)
_, err = p.communicate()
self.assertEqual(err, b'')
self.assertEqual(p.returncode, autopep8.EXIT_CODE_OK)
def test_in_place_with_exit_code_option(self):
line = "'abc' \n"
fixed = "'abc'\n"
with temporary_file_context(line) as filename:
p = Popen(list(AUTOPEP8_CMD_TUPLE) + [filename,
'--in-place',
'--exit-code'])
p.wait()
with open(filename) as f:
self.assertEqual(fixed, f.read())
self.assertEqual(p.returncode, autopep8.EXIT_CODE_EXISTS_DIFF)
def test_in_place_with_exit_code_option_with_w391(self):
line = "\n\n\n"
fixed = ""
with temporary_file_context(line) as filename:
p = Popen(list(AUTOPEP8_CMD_TUPLE) + [filename,
'--in-place',
'--exit-code'])
p.wait()
with open(filename) as f:
self.assertEqual(fixed, f.read())
self.assertEqual(p.returncode, autopep8.EXIT_CODE_EXISTS_DIFF)
def test_parallel_jobs(self):
line = "'abc' \n"
fixed = "'abc'\n"
with temporary_file_context(line) as filename_a:
with temporary_file_context(line) as filename_b:
p = Popen(list(AUTOPEP8_CMD_TUPLE) +
[filename_a, filename_b, '--jobs=3', '--in-place'])
p.wait()
with open(filename_a) as f:
self.assertEqual(fixed, f.read())
with open(filename_b) as f:
self.assertEqual(fixed, f.read())
def test_parallel_jobs_with_automatic_cpu_count(self):
line = "'abc' \n"
fixed = "'abc'\n"
with temporary_file_context(line) as filename_a:
with temporary_file_context(line) as filename_b:
p = Popen(list(AUTOPEP8_CMD_TUPLE) +
[filename_a, filename_b, '--jobs=0', '--in-place'])
p.wait()
with open(filename_a) as f:
self.assertEqual(fixed, f.read())
with open(filename_b) as f:
self.assertEqual(fixed, f.read())
def test_in_place_with_empty_file(self):
line = ''
with temporary_file_context(line) as filename:
p = Popen(list(AUTOPEP8_CMD_TUPLE) + [filename, '--in-place'])
p.wait()
self.assertEqual(0, p.returncode)
with open(filename) as f:
self.assertEqual(f.read(), line)
def test_in_place_and_diff(self):
line = "'abc' \n"
with temporary_file_context(line) as filename:
p = Popen(
list(AUTOPEP8_CMD_TUPLE) + [filename,
'--in-place', '--diff'],
stderr=PIPE)
result = p.communicate()[1].decode('utf-8')
self.assertIn('--in-place and --diff are mutually exclusive', result)
def test_recursive(self):
temp_directory = mkdtemp(dir='.')
try:
with open(os.path.join(temp_directory, 'a.py'), 'w') as output:
output.write("'abc' \n")
os.mkdir(os.path.join(temp_directory, 'd'))
with open(os.path.join(temp_directory, 'd', 'b.py'),
'w') as output:
output.write('123 \n')
p = Popen(list(AUTOPEP8_CMD_TUPLE) +
[temp_directory, '--recursive', '--diff'],
stdout=PIPE)
result = p.communicate()[0].decode('utf-8')
self.assertEqual(
"-'abc' \n+'abc'",
'\n'.join(result.split('\n')[3:5]))
self.assertEqual(
'-123 \n+123',
'\n'.join(result.split('\n')[8:10]))
finally:
shutil.rmtree(temp_directory)
def test_recursive_should_not_crash_on_unicode_filename(self):
temp_directory = mkdtemp(dir='.')
try:
for filename in ['x.py', 'é.py', 'é.txt']:
with open(os.path.join(temp_directory, filename), 'w'):
pass
p = Popen(list(AUTOPEP8_CMD_TUPLE) +
[temp_directory,
'--recursive',
'--diff'],
stdout=PIPE)
self.assertFalse(p.communicate()[0])
self.assertEqual(0, p.returncode)
finally:
shutil.rmtree(temp_directory)
def test_recursive_should_ignore_hidden(self):
temp_directory = mkdtemp(dir='.')
temp_subdirectory = mkdtemp(prefix='.', dir=temp_directory)
try:
with open(os.path.join(temp_subdirectory, 'a.py'), 'w') as output:
output.write("'abc' \n")
p = Popen(list(AUTOPEP8_CMD_TUPLE) +
[temp_directory, '--recursive', '--diff'],
stdout=PIPE)
result = p.communicate()[0].decode('utf-8')
self.assertEqual(0, p.returncode)
self.assertEqual('', result)
finally:
shutil.rmtree(temp_directory)
def test_exclude(self):
temp_directory = mkdtemp(dir='.')
try:
with open(os.path.join(temp_directory, 'a.py'), 'w') as output:
output.write("'abc' \n")
os.mkdir(os.path.join(temp_directory, 'd'))
with open(os.path.join(temp_directory, 'd', 'b.py'),
'w') as output:
output.write('123 \n')
p = Popen(list(AUTOPEP8_CMD_TUPLE) +
[temp_directory, '--recursive', '--exclude=a*',
'--diff'],
stdout=PIPE)
result = p.communicate()[0].decode('utf-8')
self.assertNotIn('abc', result)
self.assertIn('123', result)
finally:
shutil.rmtree(temp_directory)
def test_invalid_option_combinations(self):
line = "'abc' \n"
with temporary_file_context(line) as filename:
for options in [['--recursive', filename], # without --diff
['--jobs=2', filename], # without --diff
['--max-line-length=0', filename],
[], # no argument
['-', '--in-place'],
['-', '--recursive'],
['-', filename],
['--line-range', '0', '2', filename],
['--line-range', '2', '1', filename],
['--line-range', '-1', '-1', filename],
]:
p = Popen(list(AUTOPEP8_CMD_TUPLE) + options,
stderr=PIPE)
result = p.communicate()[1].decode('utf-8')
self.assertNotEqual(0, p.returncode, msg=str(options))
self.assertTrue(len(result))
def test_list_fixes(self):
with autopep8_subprocess('', options=['--list-fixes']) as (result, retcode):
self.assertIn('E121', result)
self.assertEqual(retcode, autopep8.EXIT_CODE_OK)
def test_fixpep8_class_constructor(self):
line = 'print 1\nprint 2\n'
with temporary_file_context(line) as filename:
pep8obj = autopep8.FixPEP8(filename, None)
self.assertEqual(''.join(pep8obj.source), line)
def test_inplace_with_multi_files(self):
exception = None
with disable_stderr():
try:
autopep8.parse_args(['test.py', 'dummy.py'])
except SystemExit as e:
exception = e
self.assertTrue(exception)
self.assertEqual(exception.code, 2)
def test_standard_out_should_use_native_line_ending(self):
line = '1\r\n2\r\n3\r\n'
with temporary_file_context(line) as filename:
process = Popen(list(AUTOPEP8_CMD_TUPLE) +
[filename],
stdout=PIPE)
self.assertEqual(
os.linesep.join(['1', '2', '3', '']),
process.communicate()[0].decode('utf-8'))
def test_standard_out_should_use_native_line_ending_with_cr_input(self):
line = '1\r2\r3\r'
with temporary_file_context(line) as filename:
process = Popen(list(AUTOPEP8_CMD_TUPLE) +
[filename],
stdout=PIPE)
self.assertEqual(
os.linesep.join(['1', '2', '3', '']),
process.communicate()[0].decode('utf-8'))
def test_standard_in(self):
line = 'print( 1 )\n'
fixed = 'print(1)' + os.linesep
process = Popen(list(AUTOPEP8_CMD_TUPLE) +
['-'],
stdout=PIPE,
stdin=PIPE)
self.assertEqual(
fixed,
process.communicate(line.encode('utf-8'))[0].decode('utf-8'))
def test_exit_code_should_be_set_when_standard_in(self):
line = 'print( 1 )\n'
process = Popen(list(AUTOPEP8_CMD_TUPLE) +
['--exit-code', '-'],
stdout=PIPE,
stdin=PIPE)
process.communicate(line.encode('utf-8'))[0].decode('utf-8')
self.assertEqual(
process.returncode,
autopep8.EXIT_CODE_EXISTS_DIFF)
class ConfigurationTests(unittest.TestCase):
def test_local_config(self):
args = autopep8.parse_args(
[os.path.join(FAKE_CONFIGURATION, 'foo.py'),
'--global-config={}'.format(os.devnull)],
apply_config=True)
self.assertEqual(args.indent_size, 2)
def test_config_override(self):
args = autopep8.parse_args(
[os.path.join(FAKE_CONFIGURATION, 'foo.py'),
'--indent-size=7'],
apply_config=True)
self.assertEqual(args.indent_size, 7)
def test_config_false_with_local(self):
args = autopep8.parse_args(
[os.path.join(FAKE_CONFIGURATION, 'foo.py'),
'--global-config=False'],
apply_config=True)
self.assertEqual(args.global_config, 'False')
self.assertEqual(args.indent_size, 2)
def test_config_false_with_local_space(self):
args = autopep8.parse_args(
[os.path.join(FAKE_CONFIGURATION, 'foo.py'),
'--global-config', 'False'],
apply_config=True)
self.assertEqual(args.global_config, 'False')
self.assertEqual(args.indent_size, 2)
def test_local_pycodestyle_config_line_length(self):
args = autopep8.parse_args(
[os.path.join(FAKE_PYCODESTYLE_CONFIGURATION, 'foo.py'),
'--global-config={}'.format(os.devnull)],
apply_config=True)
self.assertEqual(args.max_line_length, 40)
def test_config_false_with_local_autocomplete(self):
args = autopep8.parse_args(
[os.path.join(FAKE_CONFIGURATION, 'foo.py'),
'--g', 'False'],
apply_config=True)
self.assertEqual(args.global_config, 'False')
self.assertEqual(args.indent_size, 2)
def test_config_false_without_local(self):
args = autopep8.parse_args(['/nowhere/foo.py',
'--global-config={}'.format(os.devnull)],
apply_config=True)
self.assertEqual(args.indent_size, 4)
def test_global_config_with_locals(self):
with temporary_file_context('[pep8]\nindent-size=3\n') as filename:
args = autopep8.parse_args(
[os.path.join(FAKE_CONFIGURATION, 'foo.py'),
'--global-config={}'.format(filename)],
apply_config=True)
self.assertEqual(args.indent_size, 2)
def test_global_config_ignore_locals(self):
with temporary_file_context('[pep8]\nindent-size=3\n') as filename:
args = autopep8.parse_args(
[os.path.join(FAKE_CONFIGURATION, 'foo.py'),
'--global-config={}'.format(filename),
'--ignore-local-config'],
apply_config=True)
self.assertEqual(args.indent_size, 3)
def test_global_config_without_locals(self):
with temporary_file_context('[pep8]\nindent-size=3\n') as filename:
args = autopep8.parse_args(
['/nowhere/foo.py', '--global-config={}'.format(filename)],
apply_config=True)
self.assertEqual(args.indent_size, 3)
def test_config_local_int_value(self):
with temporary_file_context('[pep8]\naggressive=1\n') as filename:
args = autopep8.parse_args(
[os.path.join(FAKE_CONFIGURATION, 'foo.py'),
'--global-config={}'.format(filename)],
apply_config=True)
self.assertEqual(args.aggressive, 1)
def test_config_local_inclue_invalid_key(self):
configstr = """\
[pep8]
count=True
aggressive=1
"""
with temporary_file_context(configstr) as filename:
args = autopep8.parse_args(
[os.path.join(FAKE_CONFIGURATION, 'foo.py'),
'--global-config={}'.format(filename)],
apply_config=True)
self.assertEqual(args.aggressive, 1)
def test_pyproject_toml_config_local_int_value(self):
with temporary_file_context('[tool.autopep8]\naggressive=2\n') as filename:
args = autopep8.parse_args(
[os.path.join(FAKE_CONFIGURATION, 'foo.py'),
'--global-config={}'.format(filename)],
apply_config=True)
self.assertEqual(args.aggressive, 2)
class ConfigurationFileTests(unittest.TestCase):
def test_pyproject_toml_with_flake8_config(self):
"""override to flake8 config"""
line = "a = 1\n"
dot_flake8 = """[pep8]\naggressive=0\n"""
pyproject_toml = """[tool.autopep8]\naggressvie=2\nignore=E,W\n"""
with temporary_project_directory() as dirname:
with open(os.path.join(dirname, "pyproject.toml"), "w") as fp:
fp.write(pyproject_toml)
with open(os.path.join(dirname, ".flake8"), "w") as fp:
fp.write(dot_flake8)
target_filename = os.path.join(dirname, "foo.py")
with open(target_filename, "w") as fp:
fp.write(line)
p = Popen(list(AUTOPEP8_CMD_TUPLE) + [target_filename], stdout=PIPE)
self.assertEqual(p.communicate()[0].decode("utf-8"), line)
self.assertEqual(p.returncode, 0)
def test_pyproject_toml_with_verbose_option(self):
"""override to flake8 config"""
line = "a = 1\n"
verbose_line = "enable pyproject.toml config: section=tool.autopep8, key=ignore, value=E,W\n"
pyproject_toml = """[tool.autopep8]\naggressvie=2\nignore=E,W\n"""
with temporary_project_directory() as dirname:
with open(os.path.join(dirname, "pyproject.toml"), "w") as fp:
fp.write(pyproject_toml)
target_filename = os.path.join(dirname, "foo.py")
with open(target_filename, "w") as fp:
fp.write(line)
p = Popen(list(AUTOPEP8_CMD_TUPLE) + [target_filename, "-vvv"], stdout=PIPE)
output = p.communicate()[0].decode("utf-8")
self.assertTrue(line in output)
self.assertTrue(verbose_line in output)
self.assertEqual(p.returncode, 0)
class ExperimentalSystemTests(unittest.TestCase):
maxDiff = None
def test_e501_experimental_basic(self):
line = """\
print(111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333, 333, 333, 333)
"""
fixed = """\
print(111, 111, 111, 111, 222, 222, 222, 222,
222, 222, 222, 222, 222, 333, 333, 333, 333)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_commas_and_colons(self):
line = """\
foobar = {'aaaaaaaaaaaa': 'bbbbbbbbbbbbbbbb', 'dddddd': 'eeeeeeeeeeeeeeee', 'ffffffffffff': 'gggggggg'}
"""
fixed = """\
foobar = {'aaaaaaaaaaaa': 'bbbbbbbbbbbbbbbb',
'dddddd': 'eeeeeeeeeeeeeeee', 'ffffffffffff': 'gggggggg'}
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_inline_comments(self):
line = """\
' ' # Long inline comments should be moved above.
if True:
' ' # Long inline comments should be moved above.
"""
fixed = """\
# Long inline comments should be moved above.
' '
if True:
# Long inline comments should be moved above.
' '
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_inline_comments_should_skip_multiline(
self):
line = """\
'''This should be left alone. -----------------------------------------------------
''' # foo
'''This should be left alone. -----------------------------------------------------
''' \\
# foo
'''This should be left alone. -----------------------------------------------------
''' \\
\\
# foo
"""
fixed = """\
'''This should be left alone. -----------------------------------------------------
''' # foo
'''This should be left alone. -----------------------------------------------------
''' # foo
'''This should be left alone. -----------------------------------------------------
''' # foo
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_inline_comments_should_skip_keywords(self):
line = """\
' ' # noqa Long inline comments should be moved above.
if True:
' ' # pylint: disable-msgs=E0001
' ' # pragma: no cover
' ' # pragma: no cover
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(line, result)
def test_e501_experimental_with_inline_comments_should_skip_edge_cases(
self):
line = """\
if True:
x = \\
' ' # Long inline comments should be moved above.
"""
fixed = """\
if True:
# Long inline comments should be moved above.
x = ' '
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_basic_should_prefer_balanced_brackets(self):
line = """\
if True:
reconstructed = iradon(radon(image), filter="ramp", interpolation="nearest")
"""
fixed = """\
if True:
reconstructed = iradon(
radon(image),
filter="ramp", interpolation="nearest")
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_very_long_line(self):
line = """\
x = [3244234243234, 234234234324, 234234324, 23424234, 234234234, 234234, 234243, 234243, 234234234324, 234234324, 23424234, 234234234, 234234, 234243, 234243]
"""
fixed = """\
x = [3244234243234, 234234234324, 234234324, 23424234, 234234234, 234234, 234243,
234243, 234234234324, 234234324, 23424234, 234234234, 234234, 234243, 234243]
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_shorten_at_commas_skip(self):
line = """\
parser.add_argument('source_corpus', help='corpus name/path relative to an nltk_data directory')
parser.add_argument('target_corpus', help='corpus name/path relative to an nltk_data directory')
"""
fixed = """\
parser.add_argument(
'source_corpus',
help='corpus name/path relative to an nltk_data directory')
parser.add_argument(
'target_corpus',
help='corpus name/path relative to an nltk_data directory')
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_shorter_length(self):
line = """\
foooooooooooooooooo('abcdefghijklmnopqrstuvwxyz')
"""
fixed = """\
foooooooooooooooooo(
'abcdefghijklmnopqrstuvwxyz')
"""
with autopep8_context(line,
options=['--max-line-length=40',
'--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_indent(self):
line = """\
def d():
print(111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333, 333, 333, 333)
"""
fixed = """\
def d():
print(111, 111, 111, 111, 222, 222, 222, 222,
222, 222, 222, 222, 222, 333, 333, 333, 333)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_alone_with_indentation(self):
line = """\
if True:
print(111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333, 333, 333, 333)
"""
fixed = """\
if True:
print(111, 111, 111, 111, 222, 222, 222, 222,
222, 222, 222, 222, 222, 333, 333, 333, 333)
"""
with autopep8_context(line, options=['--select=E501',
'--experimental']) as result:
self.assertEqual(fixed, result)
@unittest.skip('Not sure why space is not removed anymore')
def test_e501_experimental_alone_with_tuple(self):
line = """\
fooooooooooooooooooooooooooooooo000000000000000000000000 = [1,
('TransferTime', 'FLOAT')
]
"""
fixed = """\
fooooooooooooooooooooooooooooooo000000000000000000000000 = [
1, ('TransferTime', 'FLOAT')]
"""
with autopep8_context(line, options=['--select=E501',
'--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_should_not_try_to_break_at_every_paren_in_arithmetic(
self):
line = """\
term3 = w6 * c5 * (8.0 * psi4 * (11.0 - 24.0 * t2) - 28 * psi3 * (1 - 6.0 * t2) + psi2 * (1 - 32 * t2) - psi * (2.0 * t2) + t4) / 720.0
this_should_be_shortened = (' ', ' ')
"""
fixed = """\
term3 = w6 * c5 * (8.0 * psi4 * (11.0 - 24.0 * t2) - 28 * psi3 * (1 - 6.0 * t2) +
psi2 * (1 - 32 * t2) - psi * (2.0 * t2) + t4) / 720.0
this_should_be_shortened = (
' ',
' ')
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_arithmetic_operator_with_indent(self):
line = """\
def d():
111 + 111 + 111 + 111 + 111 + 222 + 222 + 222 + 222 + 222 + 222 + 222 + 222 + 222 + 333 + 333 + 333 + 333
"""
fixed = """\
def d():
111 + 111 + 111 + 111 + 111 + 222 + 222 + 222 + 222 + \\
222 + 222 + 222 + 222 + 222 + 333 + 333 + 333 + 333
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_more_complicated(self):
line = """\
blahblah = os.environ.get('blahblah') or os.environ.get('blahblahblah') or os.environ.get('blahblahblahblah')
"""
fixed = """\
blahblah = os.environ.get('blahblah') or os.environ.get(
'blahblahblah') or os.environ.get('blahblahblahblah')
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_skip_even_more_complicated(self):
line = """\
if True:
if True:
if True:
blah = blah.blah_blah_blah_bla_bl(blahb.blah, blah.blah,
blah=blah.label, blah_blah=blah_blah,
blah_blah2=blah_blah)
"""
fixed = """\
if True:
if True:
if True:
blah = blah.blah_blah_blah_bla_bl(
blahb.blah, blah.blah, blah=blah.label, blah_blah=blah_blah,
blah_blah2=blah_blah)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_logical_fix(self):
line = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(aaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb, cccccccccccccccccccccccccccc, dddddddddddddddddddddddd)
"""
fixed = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaaaaaaa, bbbbbbbbbbbbbbbbbbbbbbbbbbbb,
cccccccccccccccccccccccccccc, dddddddddddddddddddddddd)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_logical_fix_and_physical_fix(self):
line = """\
# ------ ------------------------------------------------------------------------
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(aaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb, cccccccccccccccccccccccccccc, dddddddddddddddddddddddd)
"""
fixed = """\
# ------ -----------------------------------------------------------------
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb,
cccccccccccccccccccccccccccc,
dddddddddddddddddddddddd)
"""
with autopep8_context(line, options=['--experimental',
'--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_with_logical_fix_and_adjacent_strings(self):
line = """\
print('a-----------------------' 'b-----------------------' 'c-----------------------'
'd-----------------------''e'"f"r"g")
"""
fixed = """\
print(
'a-----------------------'
'b-----------------------'
'c-----------------------'
'd-----------------------'
'e'
"f"
r"g")
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_multiple_lines(self):
line = """\
foo_bar_zap_bing_bang_boom(111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333, 333,
111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333, 333)
"""
fixed = """\
foo_bar_zap_bing_bang_boom(
111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333, 333,
111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333, 333)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_do_not_break_on_keyword(self):
# We don't want to put a newline after equals for keywords as this
# violates PEP 8.
line = """\
if True:
long_variable_name = tempfile.mkstemp(prefix='abcdefghijklmnopqrstuvwxyz0123456789')
"""
fixed = """\
if True:
long_variable_name = tempfile.mkstemp(
prefix='abcdefghijklmnopqrstuvwxyz0123456789')
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_do_not_begin_line_with_comma(self):
line = """\
def dummy():
if True:
if True:
if True:
object = ModifyAction( [MODIFY70.text, OBJECTBINDING71.text, COLON72.text], MODIFY70.getLine(), MODIFY70.getCharPositionInLine() )
"""
fixed = """\
def dummy():
if True:
if True:
if True:
object = ModifyAction(
[MODIFY70.text, OBJECTBINDING71.text, COLON72.text],
MODIFY70.getLine(),
MODIFY70.getCharPositionInLine())
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_should_not_break_on_dot(self):
line = """\
if True:
if True:
raise xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx('xxxxxxxxxxxxxxxxx "{d}" xxxxxxxxxxxxxx'.format(d='xxxxxxxxxxxxxxx'))
"""
fixed = """\
if True:
if True:
raise xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx(
'xxxxxxxxxxxxxxxxx "{d}" xxxxxxxxxxxxxx'.format(
d='xxxxxxxxxxxxxxx'))
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_comment(self):
line = """123
if True:
if True:
if True:
if True:
if True:
if True:
# This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
pass
# http://foo.bar/abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-
# The following is ugly commented-out code and should not be touched.
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx = 1
"""
fixed = """123
if True:
if True:
if True:
if True:
if True:
if True:
# This is a long comment that should be wrapped. I will
# wrap it using textwrap to be within 72 characters.
pass
# http://foo.bar/abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-
# The following is ugly commented-out code and should not be touched.
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx = 1
"""
with autopep8_context(line, options=['--experimental',
'--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_comment_should_not_modify_docstring(self):
line = '''\
def foo():
"""
# This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
"""
'''
with autopep8_context(line, options=['--experimental',
'--aggressive']) as result:
self.assertEqual(line, result)
def test_e501_experimental_should_only_modify_last_comment(self):
line = """123
if True:
if True:
if True:
if True:
if True:
if True:
# This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
# 1. This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
# 2. This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
# 3. This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
"""
fixed = """123
if True:
if True:
if True:
if True:
if True:
if True:
# This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
# 1. This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
# 2. This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
# 3. This is a long comment that should be wrapped. I
# will wrap it using textwrap to be within 72
# characters.
"""
with autopep8_context(line, options=['--experimental',
'--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_should_not_interfere_with_non_comment(self):
line = '''
"""
# not actually a comment %d. 12345678901234567890, 12345678901234567890, 12345678901234567890.
""" % (0,)
'''
with autopep8_context(line, options=['--experimental',
'--aggressive']) as result:
self.assertEqual(line, result)
def test_e501_experimental_should_cut_comment_pattern(self):
line = """123
# -- Useless lines ----------------------------------------------------------------------
321
"""
fixed = """123
# -- Useless lines -------------------------------------------------------
321
"""
with autopep8_context(line, options=['--experimental',
'--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_function_should_not_break_on_colon(self):
line = r"""
class Useless(object):
def _table_field_is_plain_widget(self, widget):
if widget.__class__ == Widget or\
(widget.__class__ == WidgetMeta and Widget in widget.__bases__):
return True
return False
"""
fixed = r"""
class Useless(object):
def _table_field_is_plain_widget(self, widget):
if widget.__class__ == Widget or(
widget.__class__ == WidgetMeta and Widget in widget.__bases__):
return True
return False
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_with_experimental(self):
line = """\
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
}
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(line, result)
def test_e501_experimental_and_multiple_logical_lines(self):
line = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(aaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb, cccccccccccccccccccccccccccc, dddddddddddddddddddddddd)
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(aaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb, cccccccccccccccccccccccccccc, dddddddddddddddddddddddd)
"""
fixed = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaaaaaaa, bbbbbbbbbbbbbbbbbbbbbbbbbbbb,
cccccccccccccccccccccccccccc, dddddddddddddddddddddddd)
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaaaaaaa, bbbbbbbbbbbbbbbbbbbbbbbbbbbb,
cccccccccccccccccccccccccccc, dddddddddddddddddddddddd)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_and_multiple_logical_lines_with_math(self):
line = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx([-1 + 5 / -10,
100,
-3 - 4])
"""
fixed = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx(
[-1 + 5 / -10, 100, -3 - 4])
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_and_import(self):
line = """\
from . import (xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy)
"""
fixed = """\
from . import (
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_shorten_comment_with_experimental(self):
line = """\
# ------ -------------------------------------------------------------------------
"""
fixed = """\
# ------ -----------------------------------------------------------------
"""
with autopep8_context(line, options=['--experimental',
'--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_with_experimental_and_escaped_newline(self):
line = """\
if True or \\
False: # test test test test test test test test test test test test test test
pass
"""
fixed = """\
if True or \\
False: # test test test test test test test test test test test test test test
pass
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_with_experimental_and_multiline_string(self):
line = """\
print('---------------------------------------------------------------------',
('================================================', '====================='),
'''--------------------------------------------------------------------------------
''')
"""
fixed = """\
print(
'---------------------------------------------------------------------',
('================================================',
'====================='),
'''--------------------------------------------------------------------------------
''')
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_with_experimental_and_multiline_string_with_addition(self):
line = '''\
def f():
email_text += """<html>This is a really long docstring that goes over the column limit and is multi-line.<br><br>
<b>Czar: </b>"""+despot["Nicholas"]+"""<br>
<b>Minion: </b>"""+serf["Dmitri"]+"""<br>
<b>Residence: </b>"""+palace["Winter"]+"""<br>
</body>
</html>"""
'''
fixed = '''\
def f():
email_text += """<html>This is a really long docstring that goes over the column limit and is multi-line.<br><br>
<b>Czar: </b>"""+despot["Nicholas"]+"""<br>
<b>Minion: </b>"""+serf["Dmitri"]+"""<br>
<b>Residence: </b>"""+palace["Winter"]+"""<br>
</body>
</html>"""
'''
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_with_experimental_and_multiline_string_in_parens(self):
line = '''\
def f():
email_text += ("""<html>This is a really long docstring that goes over the column limit and is multi-line.<br><br>
<b>Czar: </b>"""+despot["Nicholas"]+"""<br>
<b>Minion: </b>"""+serf["Dmitri"]+"""<br>
<b>Residence: </b>"""+palace["Winter"]+"""<br>
</body>
</html>""")
'''
fixed = '''\
def f():
email_text += (
"""<html>This is a really long docstring that goes over the column limit and is multi-line.<br><br>
<b>Czar: </b>"""+despot["Nicholas"]+"""<br>
<b>Minion: </b>"""+serf["Dmitri"]+"""<br>
<b>Residence: </b>"""+palace["Winter"]+"""<br>
</body>
</html>""")
'''
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_with_experimental_and_indentation(self):
line = """\
if True:
# comment here
print(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,cccccccccccccccccccccccccccccccccccccccccc)
"""
fixed = """\
if True:
# comment here
print(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,
cccccccccccccccccccccccccccccccccccccccccc)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_with_multiple_keys_and_experimental(self):
line = """\
one_two_three_four_five_six = {'one two three four five': 12345, 'asdfsdflsdkfjl sdflkjsdkfkjsfjsdlkfj sdlkfjlsfjs': '343',
1: 1}
"""
fixed = """\
one_two_three_four_five_six = {
'one two three four five': 12345,
'asdfsdflsdkfjl sdflkjsdkfkjsfjsdlkfj sdlkfjlsfjs': '343', 1: 1}
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_with_experimental_and_carriage_returns_only(self):
"""Make sure _find_logical() does not crash."""
line = 'if True:\r from aaaaaaaaaaaaaaaa import bbbbbbbbbbbbbbbbbbb\r \r ccccccccccc = None\r'
fixed = 'if True:\r from aaaaaaaaaaaaaaaa import bbbbbbbbbbbbbbbbbbb\r\r ccccccccccc = None\r'
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_should_ignore_imports(self):
line = """\
import logging, os, bleach, commonware, urllib2, json, time, requests, urlparse, re
"""
with autopep8_context(line, options=['--select=E501',
'--experimental']) as result:
self.assertEqual(line, result)
def test_e501_experimental_should_not_do_useless_things(self):
line = """\
foo(' ')
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(line, result)
def test_e501_experimental_with_percent(self):
line = """\
raise MultiProjectException("Ambiguous workspace: %s=%s, %s" % ( varname, varname_path, os.path.abspath(config_filename)))
"""
fixed = """\
raise MultiProjectException(
"Ambiguous workspace: %s=%s, %s" %
(varname, varname_path, os.path.abspath(config_filename)))
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_def(self):
line = """\
def foobar(sldfkjlsdfsdf, kksdfsdfsf,sdfsdfsdf, sdfsdfkdk, szdfsdfsdf, sdfsdfsdfsdlkfjsdlf, sdfsdfddf,sdfsdfsfd, sdfsdfdsf):
pass
"""
fixed = """\
def foobar(sldfkjlsdfsdf, kksdfsdfsf, sdfsdfsdf, sdfsdfkdk, szdfsdfsdf,
sdfsdfsdfsdlkfjsdlf, sdfsdfddf, sdfsdfsfd, sdfsdfdsf):
pass
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_tuple(self):
line = """\
def f():
man_this_is_a_very_long_function_name(an_extremely_long_variable_name,
('a string that is long: %s'%'bork'))
"""
fixed = """\
def f():
man_this_is_a_very_long_function_name(
an_extremely_long_variable_name,
('a string that is long: %s' % 'bork'))
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_tuple_in_list(self):
line = """\
def f(self):
self._xxxxxxxx(aaaaaa, bbbbbbbbb, cccccccccccccccccc,
[('mmmmmmmmmm', self.yyyyyyyyyy.zzzzzzzz/_DDDDDD)], eee, 'ff')
"""
fixed = """\
def f(self):
self._xxxxxxxx(
aaaaaa, bbbbbbbbb, cccccccccccccccccc,
[('mmmmmmmmmm', self.yyyyyyyyyy.zzzzzzzz / _DDDDDD)],
eee, 'ff')
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_complex_reformat(self):
line = """\
bork(111, 111, 111, 111, 222, 222, 222, { 'foo': 222, 'qux': 222 }, ((['hello', 'world'], ['yo', 'stella', "how's", 'it'], ['going']), {str(i): i for i in range(10)}, {'bork':((x, x**x) for x in range(10))}), 222, 222, 222, 222, 333, 333, 333, 333)
"""
fixed = """\
bork(
111, 111, 111, 111, 222, 222, 222, {'foo': 222, 'qux': 222},
((['hello', 'world'],
['yo', 'stella', "how's", 'it'],
['going']),
{str(i): i for i in range(10)},
{'bork': ((x, x ** x) for x in range(10))}),
222, 222, 222, 222, 333, 333, 333, 333)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_dot_calls(self):
line = """\
if True:
logging.info('aaaaaa bbbbb dddddd ccccccc eeeeeee fffffff gg: %s',
xxxxxxxxxxxxxxxxx.yyyyyyyyyyyyyyyyyyyyy(zzzzzzzzzzzzzzzzz.jjjjjjjjjjjjjjjjj()))
"""
fixed = """\
if True:
logging.info(
'aaaaaa bbbbb dddddd ccccccc eeeeeee fffffff gg: %s',
xxxxxxxxxxxxxxxxx.yyyyyyyyyyyyyyyyyyyyy(
zzzzzzzzzzzzzzzzz.jjjjjjjjjjjjjjjjj()))
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_avoid_breaking_at_empty_parentheses_if_possible(
self):
line = """\
someverylongindenttionwhatnot().foo().bar().baz("and here is a long string 123456789012345678901234567890")
"""
fixed = """\
someverylongindenttionwhatnot().foo().bar().baz(
"and here is a long string 123456789012345678901234567890")
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_unicode(self):
line = """\
someverylongindenttionwhatnot().foo().bar().baz("and here is a l안녕하세요 123456789012345678901234567890")
"""
fixed = """\
someverylongindenttionwhatnot().foo().bar().baz(
"and here is a l안녕하세요 123456789012345678901234567890")
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_tuple_assignment(self):
line = """\
if True:
(xxxxxxx,) = xxxx.xxxxxxx.xxxxx(xxxxxxxxxxxx.xx).xxxxxx(xxxxxxxxxxxx.xxxx == xxxx.xxxx).xxxxx()
"""
fixed = """\
if True:
(xxxxxxx,) = xxxx.xxxxxxx.xxxxx(xxxxxxxxxxxx.xx).xxxxxx(
xxxxxxxxxxxx.xxxx == xxxx.xxxx).xxxxx()
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
@unittest.skip('To do')
def test_e501_experimental_tuple_on_line(self):
line = """\
def f():
self.aaaaaaaaa(bbbbbb, ccccccccc, dddddddddddddddd,
((x, y/eeeeeee) for x, y in self.outputs.total.iteritems()),
fff, 'GG')
"""
fixed = """\
def f():
self.aaaaaaaaa(
bbbbbb, ccccccccc, dddddddddddddddd,
((x, y / eeeeeee) for x, y in self.outputs.total.iteritems()),
fff, 'GG')
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_tuple_on_line_two_space_indent(self):
line = """\
def f():
self.aaaaaaaaa(bbbbbb, ccccccccc, dddddddddddddddd,
((x, y/eeeeeee) for x, y in self.outputs.total.iteritems()),
fff, 'GG')
"""
fixed = """\
def f():
self.aaaaaaaaa(bbbbbb, ccccccccc, dddddddddddddddd,
((x, y/eeeeeee) for x, y in self.outputs.total.iteritems()),
fff, 'GG')
"""
with autopep8_context(line, options=['--experimental',
'--indent-size=2']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_oversized_default_initializer(self):
line = """\
aaaaaaaaaaaaaaaaaaaaa(lllll,mmmmmmmm,nnn,fffffffffff,ggggggggggg,hhh,ddddddddddddd=eeeeeeeee,bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb=ccccccccccccccccccccccccccccccccccccccccccccccccc,bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb=cccccccccccccccccccccccccccccccccccccccccccccccc)
"""
fixed = """\
aaaaaaaaaaaaaaaaaaaaa(
lllll, mmmmmmmm, nnn, fffffffffff, ggggggggggg, hhh,
ddddddddddddd=eeeeeeeee,
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb=ccccccccccccccccccccccccccccccccccccccccccccccccc,
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb=cccccccccccccccccccccccccccccccccccccccccccccccc)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_decorator(self):
line = """\
@foo(('xxxxxxxxxxxxxxxxxxxxxxxxxx', users.xxxxxxxxxxxxxxxxxxxxxxxxxx), ('yyyyyyyyyyyy', users.yyyyyyyyyyyy), ('zzzzzzzzzzzzzz', users.zzzzzzzzzzzzzz))
"""
fixed = """\
@foo(('xxxxxxxxxxxxxxxxxxxxxxxxxx', users.xxxxxxxxxxxxxxxxxxxxxxxxxx),
('yyyyyyyyyyyy', users.yyyyyyyyyyyy),
('zzzzzzzzzzzzzz', users.zzzzzzzzzzzzzz))
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_long_class_name(self):
line = """\
class AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA(BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB):
pass
"""
fixed = """\
class AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA(
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB):
pass
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_no_line_change(self):
line = """\
def f():
return '<a href="javascript:;" class="copy-to-clipboard-button" data-clipboard-text="%s" title="copy url to clipboard">Copy Link</a>' % url
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(line, result)
def test_e501_experimental_splitting_small_arrays(self):
line = """\
def foo():
unspecified[service] = ('# The %s brown fox jumped over the lazy, good for nothing '
'dog until it grew tired and set its sights upon the cat!' % adj)
"""
fixed = """\
def foo():
unspecified[service] = (
'# The %s brown fox jumped over the lazy, good for nothing '
'dog until it grew tired and set its sights upon the cat!' % adj)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_no_splitting_in_func_call(self):
line = """\
def foo():
if True:
if True:
function.calls('%r (%s): aaaaaaaa bbbbbbbbbb ccccccc ddddddd eeeeee (%d, %d)',
xxxxxx.yy, xxxxxx.yyyy, len(mmmmmmmmmmmmm['fnord']),
len(mmmmmmmmmmmmm['asdfakjhdsfkj']))
"""
fixed = """\
def foo():
if True:
if True:
function.calls(
'%r (%s): aaaaaaaa bbbbbbbbbb ccccccc ddddddd eeeeee (%d, %d)',
xxxxxx.yy, xxxxxx.yyyy, len(mmmmmmmmmmmmm['fnord']),
len(mmmmmmmmmmmmm['asdfakjhdsfkj']))
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_no_splitting_at_dot(self):
line = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxx = [yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy.MMMMMM_NNNNNNN_OOOOO,
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy.PPPPPP_QQQQQQQ_RRRRR,
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy.SSSSSS_TTTTTTT_UUUUU]
"""
fixed = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxx = [
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy.MMMMMM_NNNNNNN_OOOOO,
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy.PPPPPP_QQQQQQQ_RRRRR,
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy.SSSSSS_TTTTTTT_UUUUU]
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_no_splitting_before_arg_list(self):
line = """\
xxxxxxxxxxxx = [yyyyyy['yyyyyy'].get('zzzzzzzzzzz') for yyyyyy in x.get('aaaaaaaaaaa') if yyyyyy['yyyyyy'].get('zzzzzzzzzzz')]
"""
fixed = """\
xxxxxxxxxxxx = [yyyyyy['yyyyyy'].get('zzzzzzzzzzz')
for yyyyyy in x.get('aaaaaaaaaaa')
if yyyyyy['yyyyyy'].get('zzzzzzzzzzz')]
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_dont_split_if_looks_bad(self):
line = """\
def f():
if True:
BAD(('xxxxxxxxxxxxx', 42), 'I died for beauty, but was scarce / Adjusted in the tomb %s', yyyyyyyyyyyyy)
"""
fixed = """\
def f():
if True:
BAD(('xxxxxxxxxxxxx', 42),
'I died for beauty, but was scarce / Adjusted in the tomb %s',
yyyyyyyyyyyyy)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_list_comp(self):
line = """\
xxxxxxxxxxxs = [xxxxxxxxxxx for xxxxxxxxxxx in xxxxxxxxxxxs if not yyyyyyyyyyyy[xxxxxxxxxxx] or not yyyyyyyyyyyy[xxxxxxxxxxx].zzzzzzzzzz]
"""
fixed = """\
xxxxxxxxxxxs = [
xxxxxxxxxxx for xxxxxxxxxxx in xxxxxxxxxxxs
if not yyyyyyyyyyyy[xxxxxxxxxxx] or
not yyyyyyyyyyyy[xxxxxxxxxxx].zzzzzzzzzz]
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
line = """\
def f():
xxxxxxxxxx = [f for f in yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy.zzzzzzzzzzzzzzzzzzzzzzzz.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa]
"""
fixed = """\
def f():
xxxxxxxxxx = [
f
for f in
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy.zzzzzzzzzzzzzzzzzzzzzzzz.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa]
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_dict(self):
line = """\
def f():
zzzzzzzzzzzzz = {
'aaaaaa/bbbbbb/ccccc/dddddddd/eeeeeeeee/fffffffffff/ggggggggg/hhhhhhhh.py':
yyyyyyyyyyy.xxxxxxxxxxx(
'aa/bbbbbbb/cc/ddddddd/eeeeeeeeeee/fffffffffff/ggggggggg/hhhhhhh/ggggg.py',
'00000000',
yyyyyyyyyyy.xxxxxxxxx.zzzz),
}
"""
fixed = """\
def f():
zzzzzzzzzzzzz = {
'aaaaaa/bbbbbb/ccccc/dddddddd/eeeeeeeee/fffffffffff/ggggggggg/hhhhhhhh.py':
yyyyyyyyyyy.xxxxxxxxxxx(
'aa/bbbbbbb/cc/ddddddd/eeeeeeeeeee/fffffffffff/ggggggggg/hhhhhhh/ggggg.py',
'00000000', yyyyyyyyyyy.xxxxxxxxx.zzzz), }
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_indentation(self):
line = """\
class Klass(object):
'''Class docstring.'''
def Quote(self, parameter_1, parameter_2, parameter_3, parameter_4, parameter_5):
pass
"""
fixed = """\
class Klass(object):
'''Class docstring.'''
def Quote(
self, parameter_1, parameter_2, parameter_3, parameter_4,
parameter_5):
pass
"""
with autopep8_context(line, options=['--experimental',
'--indent-size=2']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_long_function_call_elements(self):
line = """\
def g():
pppppppppppppppppppppppppp1, pppppppppppppppppppppppp2 = (
zzzzzzzzzzzz.yyyyyyyyyyyyyy(aaaaaaaaa=10, bbbbbbbbbbbbbbbb='2:3',
cccccccc='{1:2}', dd=1, eeeee=0),
zzzzzzzzzzzz.yyyyyyyyyyyyyy(dd=7, aaaaaaaaa=16, bbbbbbbbbbbbbbbb='2:3',
cccccccc='{1:2}',
eeeee=xxxxxxxxxxxxxxxxx.wwwwwwwwwwwww.vvvvvvvvvvvvvvvvvvvvvvvvv))
"""
fixed = """\
def g():
pppppppppppppppppppppppppp1, pppppppppppppppppppppppp2 = (
zzzzzzzzzzzz.yyyyyyyyyyyyyy(
aaaaaaaaa=10, bbbbbbbbbbbbbbbb='2:3', cccccccc='{1:2}', dd=1,
eeeee=0),
zzzzzzzzzzzz.yyyyyyyyyyyyyy(
dd=7, aaaaaaaaa=16, bbbbbbbbbbbbbbbb='2:3', cccccccc='{1:2}',
eeeee=xxxxxxxxxxxxxxxxx.wwwwwwwwwwwww.vvvvvvvvvvvvvvvvvvvvvvvvv))
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_long_nested_tuples_in_arrays(self):
line = """\
def f():
aaaaaaaaaaa.bbbbbbb([
('xxxxxxxxxx', 'yyyyyy', 'Heaven hath no wrath like love to hatred turned. Nor hell a fury like a woman scorned.'),
('xxxxxxx', 'yyyyyyyyyyy', "To the last I grapple with thee. From hell's heart I stab at thee. For hate's sake I spit my last breath at thee!")])
"""
fixed = """\
def f():
aaaaaaaaaaa.bbbbbbb(
[('xxxxxxxxxx', 'yyyyyy',
'Heaven hath no wrath like love to hatred turned. Nor hell a fury like a woman scorned.'),
('xxxxxxx', 'yyyyyyyyyyy',
"To the last I grapple with thee. From hell's heart I stab at thee. For hate's sake I spit my last breath at thee!")])
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_func_call_open_paren_not_separated(self):
# Don't separate the opening paren of a function call from the
# function's name.
line = """\
def f():
owned_list = [o for o in owned_list if self.display['zzzzzzzzzzzzzz'] in aaaaaaaaaaaaaaaaa.bbbbbbbbbbbbbbbbbbbb(o.qq, ccccccccccccccccccccccccccc.ddddddddd.eeeeeee)]
"""
fixed = """\
def f():
owned_list = [
o for o in owned_list
if self.display['zzzzzzzzzzzzzz'] in aaaaaaaaaaaaaaaaa.bbbbbbbbbbbbbbbbbbbb(
o.qq, ccccccccccccccccccccccccccc.ddddddddd.eeeeeee)]
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_long_dotted_object(self):
# Don't separate a long dotted object too soon. Otherwise, it may end
# up with most of its elements on separate lines.
line = """\
def f(self):
return self.xxxxxxxxxxxxxxx(aaaaaaa.bbbbb.ccccccc.ddd.eeeeee.fffffffff.ggggg.hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)
"""
fixed = """\
def f(self):
return self.xxxxxxxxxxxxxxx(
aaaaaaa.bbbbb.ccccccc.ddd.eeeeee.fffffffff.ggggg.
hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_parsing_dict_with_comments(self):
line = """\
self.display['xxxxxxxxxxxx'] = [{'title': _('Library'), #. This is the first comment.
'flag': aaaaaaaaaa.bbbbbbbbb.cccccccccc
}, {'title': _('Original'), #. This is the second comment.
'flag': aaaaaaaaaa.bbbbbbbbb.dddddddddd
}, {'title': _('Unknown'), #. This is the third comment.
'flag': aaaaaaaaaa.bbbbbbbbb.eeeeeeeeee}]
"""
fixed = """\
self.display['xxxxxxxxxxxx'] = [{'title': _('Library'), # . This is the first comment.
'flag': aaaaaaaaaa.bbbbbbbbb.cccccccccc
# . This is the second comment.
}, {'title': _('Original'),
'flag': aaaaaaaaaa.bbbbbbbbb.dddddddddd
# . This is the third comment.
}, {'title': _('Unknown'),
'flag': aaaaaaaaaa.bbbbbbbbb.eeeeeeeeee}]
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_if_line_over_limit(self):
line = """\
if not xxxxxxxxxxxx(aaaaaaaaaaaaaaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccc, dddddddddddddddddddddd):
return 1
"""
fixed = """\
if not xxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccc,
dddddddddddddddddddddd):
return 1
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_for_line_over_limit(self):
line = """\
for aaaaaaaaa in xxxxxxxxxxxx(aaaaaaaaaaaaaaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccc, dddddddddddddddddddddd):
pass
"""
fixed = """\
for aaaaaaaaa in xxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccc,
dddddddddddddddddddddd):
pass
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_while_line_over_limit(self):
line = """\
while xxxxxxxxxxxx(aaaaaaaaaaaaaaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccc, dddddddddddddddddddddd):
pass
"""
fixed = """\
while xxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccc,
dddddddddddddddddddddd):
pass
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_in(self):
line = """\
if True:
if True:
if True:
if True:
if True:
if True:
if True:
if True:
if k_left in ('any', k_curr) and k_right in ('any', k_curr):
pass
"""
fixed = """\
if True:
if True:
if True:
if True:
if True:
if True:
if True:
if True:
if k_left in (
'any', k_curr) and k_right in (
'any', k_curr):
pass
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
@unittest.skipIf(sys.version_info.major >= 3, 'syntax error in Python3')
def test_e501_print_isnot_function(self):
line = """\
def d():
print "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d" % (111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333, 333, 333, 333)
"""
fixed = """\
def d():
print "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d" % (
111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333,
333, 333, 333)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
@contextlib.contextmanager
def autopep8_context(line, options=None):
if not options:
options = []
with temporary_file_context(line) as filename:
options = autopep8.parse_args([filename] + list(options))
yield autopep8.fix_file(filename=filename, options=options)
@contextlib.contextmanager
def autopep8_subprocess(line, options):
with temporary_file_context(line) as filename:
p = Popen(list(AUTOPEP8_CMD_TUPLE) + [filename] + options,
stdout=PIPE)
yield (p.communicate()[0].decode('utf-8'), p.returncode)
@contextlib.contextmanager
def temporary_file_context(text, suffix='', prefix=''):
temporary = mkstemp(suffix=suffix, prefix=prefix)
os.close(temporary[0])
with autopep8.open_with_encoding(temporary[1],
encoding='utf-8',
mode='w') as temp_file:
temp_file.write(text)
yield temporary[1]
os.remove(temporary[1])
@contextlib.contextmanager
def temporary_project_directory(prefix="autopep8test"):
temporary = mkdtemp(prefix=prefix)
yield temporary
shutil.rmtree(temporary)
@contextlib.contextmanager
def disable_stderr():
sio = StringIO()
with capture_stderr(sio):
yield
@contextlib.contextmanager
def capture_stderr(sio):
_tmp = sys.stderr
sys.stderr = sio
try:
yield
finally:
sys.stderr = _tmp
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[
"blahblah",
"AUTOPEP8_COVERAGE",
"blahblahblah",
"blahblahblahblah"
] |
[]
|
["blahblah", "AUTOPEP8_COVERAGE", "blahblahblah", "blahblahblahblah"]
|
python
| 4 | 0 | |
catchsparrow/wsgi.py
|
"""
WSGI config for catchsparrow project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'catchsparrow.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
tests/runtests.py
|
# -*- coding: utf-8 -*-
import os
from django_nose import NoseTestSuiteRunner
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
if __name__ == '__main__':
if NoseTestSuiteRunner(verbosity=1).run_tests(['tests']) > 0:
exit(1)
else:
exit(0)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
python/LAC/lac.py
|
# -*- coding: UTF-8 -*-
################################################################################
#
# Copyright (c) 2020 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#################################################################################
"""
本文件定义了LAC类,实现其调用分词,词性标注,训练模型的接口。
"""
import os
import shutil
import logging
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.core import PaddleTensor
from paddle.fluid.core import AnalysisConfig
from paddle.fluid.core import create_paddle_predictor
from . import reader
from . import utils
from . import nets
from .custom import Customization
from ._compat import *
def _get_abs_path(path): return os.path.normpath(
os.path.join(os.getcwd(), os.path.dirname(__file__), path))
DEFAULT_LAC = _get_abs_path('lac_model')
DEFAULT_SEG = _get_abs_path('seg_model')
class LAC(object):
"""docstring for LAC"""
def __init__(self, model_path=None, mode='lac', use_cuda=False):
super(LAC, self).__init__()
utils.check_cuda(use_cuda)
if model_path is None:
model_path = DEFAULT_SEG if mode == 'seg' else DEFAULT_LAC
self.args = utils.DefaultArgs(model_path)
self.args.use_cuda = use_cuda
self.model_path = model_path
config = AnalysisConfig(self.args.init_checkpoint)
config.disable_glog_info()
if use_cuda:
self.place = fluid.CUDAPlace(
int(os.getenv('FLAGS_selected_gpus', '0')))
config.enable_use_gpu(memory_pool_init_size_mb=500,
device_id=int(
os.getenv('FLAGS_selected_gpus', '0')),
)
else:
self.place = fluid.CPUPlace()
# init executor
self.exe = fluid.Executor(self.place)
self.dataset = reader.Dataset(self.args)
self.predictor = create_paddle_predictor(config)
self.custom = None
self.batch = False
self.return_tag = self.args.tag_type != 'seg'
def run(self, texts):
"""执行模型预测过程
Args:
texts: 模型输入的文本,一个Unicode编码的字符串或者
由Unicode编码字符串组成的List
Returns:
返回LAC处理结果
如果mode=='seg', 则只返回分词结果
如果mode=='lac', 则同时返回分词与标签
"""
if isinstance(texts, list) or isinstance(texts, tuple):
self.batch = True
else:
if len(texts.strip()) == 0:
return ([], []) if self.return_tag else []
texts = [texts]
self.batch = False
tensor_words = self.texts2tensor(texts)
crf_decode = self.predictor.run([tensor_words])
result = self.parse_result(texts, crf_decode[0], self.dataset)
if self.return_tag:
return result if self.batch else result[0]
else:
if not self.batch:
return result[0][0]
return [word for word, _ in result]
def parse_result(self, lines, crf_decode, dataset):
"""将模型输出的Tensor转为明文"""
offset_list = crf_decode.lod[0]
crf_decode = crf_decode.data.int64_data()
batch_size = len(offset_list) - 1
batch_out = []
for sent_index in range(batch_size):
begin, end = offset_list[sent_index], offset_list[sent_index + 1]
sent = lines[sent_index]
tags = [dataset.id2label_dict[str(id)]
for id in crf_decode[begin:end]]
if self.custom:
self.custom.parse_customization(sent, tags)
sent_out = []
tags_out = []
for ind, tag in enumerate(tags):
# for the first char
if len(sent_out) == 0 or tag.endswith("B") or tag.endswith("S"):
sent_out.append(sent[ind])
tags_out.append(tag[:-2])
continue
sent_out[-1] += sent[ind]
# 取最后一个tag作为标签
tags_out[-1] = tag[:-2]
batch_out.append([sent_out, tags_out])
return batch_out
def train(self, model_save_dir, train_data, test_data=None, iter_num=10, thread_num=10):
"""执行模型增量训练
Args:
model_save_dir: 训练结束后模型保存的路径
train_data: 训练数据路径
test_data: 测试数据路径,若为None则不进行测试
iter_num: 训练数据的迭代次数
thread_num: 执行训练的线程数
"""
self.args.train_data = train_data
self.args.test_data = test_data
self.args.epoch = iter_num
self.args.cpu_num = thread_num
logging.info("Start Training!")
scope = fluid.core.Scope()
with fluid.scope_guard(scope):
test_program, fetch_list = nets.do_train(self.args)
fluid.io.save_inference_model(os.path.join(model_save_dir, 'model'),
['words'],
fetch_list,
self.exe,
main_program=test_program,
)
# 拷贝配置文件
if os.path.exists(os.path.join(model_save_dir, 'conf')):
shutil.rmtree(os.path.join(model_save_dir, 'conf'))
shutil.copytree(os.path.join(self.model_path, 'conf'),
os.path.join(model_save_dir, 'conf'))
self.load_model(model_save_dir)
logging.info("Finish Training!")
def load_model(self, model_dir):
"""装载预训练的模型"""
use_cuda = self.args.use_cuda
self.args = utils.DefaultArgs(model_dir)
self.args.use_cuda = use_cuda
self.dataset = reader.Dataset(self.args)
self.return_tag = self.args.tag_type != 'seg'
self.model_path = model_dir
config = AnalysisConfig(os.path.join(model_dir, 'model'))
config.disable_glog_info()
if self.args.use_cuda:
config.enable_use_gpu(memory_pool_init_size_mb=500,
device_id=int(
os.getenv('FLAGS_selected_gpus', '0')),
)
self.predictor = create_paddle_predictor(config)
def load_customization(self, customization_file, sep=None):
"""装载用户词典
Args:
texts: 用户词典路径
sep: 表示词典中,短语片段的分隔符,默认为空格' '或制表符'\t'
"""
self.custom = Customization()
self.custom.load_customization(customization_file, sep)
def add_word(self, word, sep=None):
"""添加单词,格式与用户词典一致
Args:
texts: 用户定义词典,如:"春天"、"花 开"、"春天/SEASON"、"花/n 开/v"、
sep: 表示词典中,短语片段的分隔符,默认为空格' '或制表符'\t'
"""
if self.custom is None:
self.custom = Customization()
self.custom.add_word(word, sep)
def texts2tensor(self, texts):
"""将文本输入转为Paddle输入的Tensor
Args:
texts: 由string组成的list,模型输入的文本
Returns:
Paddle模型输入用的Tensor
"""
lod = [0]
data = []
for i, text in enumerate(texts):
text_inds = self.dataset.word_to_ids(text)
data += text_inds
lod.append(len(text_inds) + lod[i])
data_np = np.array(data, dtype="int64")
tensor = fluid.core.PaddleTensor(data_np)
tensor.lod = [lod]
tensor.shape = [lod[-1], 1]
return tensor
if __name__ == "__main__":
lac = LAC('lac_model')
test_data = [u'百度是一家高科技公司', u'LAC是一个优秀的分词工具', '']
print('######### run:list ##############')
result = lac.run(test_data)
for res in result:
print(' '.join(res))
print('######### run:str ##############')
result = lac.run(test_data[0])
print(' '.join(result))
print('######### run:tag ##############')
result = lac.run(test_data, return_tag=True)
for i, (sent, tags) in enumerate(result):
result_list = ['(%s, %s)' % (ch, tag) for ch, tag in zip(sent, tags)]
print(''.join(result_list))
# 重训模型
lac.train(model_save_dir='models_test',
train_data='./data/train.tsv', test_data='./data/test.tsv')
print('######### run:list ##############')
result = lac.run(test_data)
for res in result:
print(' '.join(res))
print('######### run:str ##############')
result = lac.run(test_data[0])
print(' '.join(result))
print('######### run:tag ##############')
result = lac.run(test_data, return_tag=True)
for i, (sent, tags) in enumerate(result):
result_list = ['(%s, %s)' % (ch, tag) for ch, tag in zip(sent, tags)]
print(''.join(result_list))
|
[] |
[] |
[
"FLAGS_selected_gpus"
] |
[]
|
["FLAGS_selected_gpus"]
|
python
| 1 | 0 | |
matrix-commit/action.py
|
import os
import time
import simplematrixbotlib as botlib
import git
HOMESERVER = os.getenv("HOMESERVER")
USERNAME = os.getenv("USERNAME")
ACCESS_TOKEN = os.getenv("ACCESS_TOKEN")
ROOM_ID = os.getenv("ROOM_ID")
MESSAGE = os.getenv("MESSAGE") or "Commit:"
repo = git.Repo('.')
commit = repo.commit('HEAD')
repository_url = repo.remotes.origin.url.split('.git')[0]
commit_url = f"{repository_url}/commit/{commit.hexsha}"
creds = botlib.Creds(homeserver=HOMESERVER, username=USERNAME, access_token=ACCESS_TOKEN)
bot = botlib.Bot(creds=creds)
@bot.listener.on_startup
async def send_message(joined_room_id: str) -> None:
if ROOM_ID and ROOM_ID != joined_room_id:
return
message = f"""
{MESSAGE}
URL: {commit_url}
Repository: {repository_url}
Author: {commit.author.name}
Branch: {repo.active_branch}
Timestamp: {time.asctime(time.gmtime(commit.committed_date))}
Message: {commit.message}
"""
await bot.api.send_markdown_message(
room_id=joined_room_id,
message=message,
msgtype="m.notice")
exit()
bot.run()
|
[] |
[] |
[
"MESSAGE",
"USERNAME",
"HOMESERVER",
"ROOM_ID",
"ACCESS_TOKEN"
] |
[]
|
["MESSAGE", "USERNAME", "HOMESERVER", "ROOM_ID", "ACCESS_TOKEN"]
|
python
| 5 | 0 | |
examples/user_guide/add_data.py
|
#!/usr/bin/env python
from radical.entk import Pipeline, Stage, Task, AppManager
import os
# ------------------------------------------------------------------------------
# Set default verbosity
if os.environ.get('RADICAL_ENTK_VERBOSE') is None:
os.environ['RADICAL_ENTK_REPORT'] = 'True'
# Description of how the RabbitMQ process is accessible
# No need to change/set any variables if you installed RabbitMQ has a system
# process. If you are running RabbitMQ under a docker container or another
# VM, set "RMQ_HOSTNAME" and "RMQ_PORT" in the session where you are running
# this script.
hostname = os.environ.get('RMQ_HOSTNAME', 'localhost')
port = os.environ.get('RMQ_PORT', 5672)
username = os.environ.get('RMQ_USERNAME')
password = os.environ.get('RMQ_PASSWORD')
if __name__ == '__main__':
# Create a Pipeline object
p = Pipeline()
# Create a Stage object
s1 = Stage()
# Create a Task object which creates a file named 'output.txt' of size 1 MB
t1 = Task()
t1.executable = '/bin/bash'
t1.arguments = ['-l', '-c', 'base64 /dev/urandom | head -c 1000000 > output.txt']
# Add the Task to the Stage
s1.add_tasks(t1)
# Add Stage to the Pipeline
p.add_stages(s1)
# Create another Stage object
s2 = Stage()
s2.name = 'Stage.2'
# Create a Task object
t2 = Task()
t2.executable = '/bin/bash'
t2.arguments = ['-l', '-c', 'grep -o . output.txt | sort | uniq -c > ccount.txt']
# Copy data from the task in the first stage to the current task's location
t2.copy_input_data = ['$Pipeline_%s_Stage_%s_Task_%s/output.txt' % (p.name, s1.name, t1.name)]
# Download the output of the current task to the current location
t2.download_output_data = ['ccount.txt']
# Add the Task to the Stage
s2.add_tasks(t2)
# Add Stage to the Pipeline
p.add_stages(s2)
# Create Application Manager
appman = AppManager(hostname=hostname, port=port, username=username,
password=password)
# Assign the workflow as a set or list of Pipelines to the Application Manager
appman.workflow = set([p])
# Create a dictionary describe four mandatory keys:
# resource, walltime, cpus and project
# resource is 'local.localhost' to execute locally
res_dict = {
'resource': 'local.localhost',
'walltime': 10,
'cpus': 1
}
# Assign resource request description to the Application Manager
appman.resource_desc = res_dict
# Run the Application Manager
appman.run()
|
[] |
[] |
[
"RADICAL_ENTK_VERBOSE",
"RMQ_USERNAME",
"RMQ_HOSTNAME",
"RMQ_PORT",
"RADICAL_ENTK_REPORT",
"RMQ_PASSWORD"
] |
[]
|
["RADICAL_ENTK_VERBOSE", "RMQ_USERNAME", "RMQ_HOSTNAME", "RMQ_PORT", "RADICAL_ENTK_REPORT", "RMQ_PASSWORD"]
|
python
| 6 | 0 | |
cache-engine/bitg-blockchain-crawler.py
|
# The App listening to new blocks written read the exstrincs and store the transactions in a mysql/mariadb database.
# the database must be created, the app will create the tables and indexes used.
# import libraries
# system packages
import sys
import os
import json
# Substrate module
from substrateinterface import SubstrateInterface, Keypair,ExtrinsicReceipt
from substrateinterface.exceptions import SubstrateRequestException
# base64 encoder/decoder
import base64
# base58 encoder/decoder
import base58
#import scale library to load data types
import scalecodec
# import mysql connector
import mysql.connector
currentime=""
# read environment variables
try:
DB_NAME=os.environ['DB_NAME']
DB_USER=os.environ['DB_USER']
DB_PWD=os.environ['DB_PWD']
DB_HOST=os.environ['DB_HOST']
NODE=os.environ['NODE']
except NameError:
print("System Variables have not been set")
exit(1)
# function to load data types registry
def load_type_registry_file(file_path: str) -> dict:
with open(os.path.abspath(file_path), 'r') as fp:
data = fp.read()
return json.loads(data)
# function to create tables required
def create_tables():
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
cursor = cnx.cursor()
# use database
try:
cursor.execute("USE {}".format(DB_NAME))
except mysql.connector.Error as err:
print("Database {} does not exists.".format(DB_NAME))
print(err)
exit(1)
# create tables
createtx="CREATE TABLE `transactions` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`blocknumber` INT(11) NOT NULL,`txhash` VARCHAR(66) NOT NULL, \
`sender` VARCHAR(64) NOT NULL, `recipient` VARCHAR(64) NOT NULL, \
`amount` numeric(32,0) NOT NULL, \
`gasfees` numeric(32,0) NOT NULL, \
`dtblockchain` DATETIME NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),PRIMARY KEY (id))"
try:
print("Creating table TRANSACTIONS...")
cursor.execute(createtx)
except mysql.connector.Error as err:
if(err.msg!="Table 'transactions' already exists"):
print(err.msg)
else:
print("OK")
# create indexes
createidxtx="CREATE INDEX txhash on transactions(txhash)"
try:
print("Creating index TXHASH on TRANSACTIONS...")
cursor.execute(createidxtx)
except mysql.connector.Error as err:
if(err.msg!="Duplicate key name 'txhash'"):
print(err.msg)
else:
print("OK")
createidxtx="CREATE INDEX sender on transactions(sender)"
try:
print("Creating index SENDER on TRANSACTIONS...")
cursor.execute(createidxtx)
except mysql.connector.Error as err:
if(err.msg!="Duplicate key name 'sender'"):
print(err.msg)
else:
print("OK")
createidxtx="CREATE INDEX recipient on transactions(recipient)"
try:
print("Creating index RECIPIENT on TRANSACTIONS...")
cursor.execute(createidxtx)
except mysql.connector.Error as err:
if(err.msg!="Duplicate key name 'recipient'"):
print(err.msg)
else:
print("OK")
# creating sync table to keep syncronisation info
createsync="CREATE TABLE `sync` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`lastblocknumberverified` INT(11) NOT NULL, \
`lastapprovalrequestprocessed` int(11) default 0 not null,\
PRIMARY KEY (id))"
try:
print("Creating table SYNC...")
cursor.execute(createsync)
except mysql.connector.Error as err:
if(err.msg!="Table 'sync' already exists"):
print(err.msg)
else:
print("OK")
# creating categories table for impact actions
createcategories="CREATE TABLE `impactactionscategories` (`id` MEDIUMINT NOT NULL,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`description` VARCHAR(64) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash), PRIMARY KEY (id))"
try:
print("Creating table impactactionscategories...")
cursor.execute(createcategories)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionscategories' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactions table for impact actions
createactions="CREATE TABLE `impactactions` (`id` MEDIUMINT NOT NULL,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`description` VARCHAR(128) NOT NULL,\
`category` INT(11) NOT NULL,`auditors` INT(11) NOT NULL,`blockstart` INT(11) NOT NULL,\
`blockend` INT(11) NOT NULL, `rewardstoken` INT(11) NOT NULL, `rewardsamount` INT(32) NOT NULL,\
`rewardsoracle` INT(32) NOT NULL,`rewardauditors` INT(32) NOT NULL,\
`slashingsauditors` INT(32) NOT NULL,`maxerrorsauditor` INT(11) NOT NULL,\
`fields` varchar(8192) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash), \
PRIMARY KEY (id))"
try:
print("Creating table impactactions...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactions' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactionsoracles table for impact actions
createactions="CREATE TABLE `impactactionsoracles` (`id` MEDIUMINT NOT NULL,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`description` VARCHAR(128) NOT NULL,\
`account` VARCHAR(48) NOT NULL,`otherinfo` VARCHAR(66) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),\
PRIMARY KEY (id))"
try:
print("Creating table impactactionsoracles...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionsoracles' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactionsauditors table for impact actions
createactions="CREATE TABLE `impactactionsauditors` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`description` VARCHAR(128) NOT NULL,\
`account` VARCHAR(48) NOT NULL,`categories` VARCHAR(128) NOT NULL,\
`area` VARCHAR(64) NOT NULL,`otherinfo` VARCHAR(66) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),\
PRIMARY KEY (id))"
try:
print("Creating table impactactionsauditors...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionsauditors' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactionsproxy table for impact actions
createactions="CREATE TABLE `impactactionsproxy` (`id` MEDIUMINT NOT NULL,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`account` VARCHAR(48) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),PRIMARY KEY (id))"
try:
print("Creating table impactactionsproxy...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionsproxy' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactionsapprovalrequests table for impact actions
createactions="CREATE TABLE `impactactionsapprovalrequests` (`id` MEDIUMINT NOT NULL,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`info` VARCHAR(8192) NOT NULL,\
`dtapproved` DATETIME,\
`dtrefused` DATETIME,\
CONSTRAINT txhash_unique UNIQUE (txhash),PRIMARY KEY (id))"
try:
print("Creating table impactactionsapprovalrequests...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionsapprovalrequests' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactionsapprovalrequestsauditors table for impact actions
createactions="CREATE TABLE `impactactionsapprovalrequestsauditors` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`approvalrequestid` int(11) NOT NULL,\
`auditor` VARCHAR(48) NOT NULL,\
`maxdays` INT(11) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),PRIMARY KEY (id))"
try:
print("Creating table impactactionsapprovalrequestsauditors...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionsapprovalrequestsauditors' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactionsapprovalrequestvotes table for impact actions
createactions="CREATE TABLE `impactactionsapprovalrequestauditorvotes` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`approvalrequestid` int(11) NOT NULL,\
`vote` VARCHAR(1) NOT NULL,\
`otherinfo` VARCHAR(66) NOT NULL,\
`dtrewards` DATETIME NOT NULL,\
CONSTRAINT txhash_unique UNIQUE (txhash),PRIMARY KEY (id))"
try:
print("Creating table impactactionsapprovalrequestauditorvotes...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionsapprovalrequestauditorvotes' already exists"):
print(err.msg)
else:
print("OK")
# creating assets table for FT
createassets="CREATE TABLE `ftassets` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`assetid` int(11) NOT NULL,\
`owner` VARCHAR(48) NOT NULL,\
`maxzombies` int(11) NOT NULL,\
`minbalance` int(11) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),\
PRIMARY KEY (id))"
try:
print("Creating table ftassets...")
cursor.execute(createassets)
except mysql.connector.Error as err:
if(err.msg!="Table 'ftassets' already exists"):
print(err.msg)
else:
print("OK")
# creating transaction for fungible tokens
createassets="CREATE TABLE `fttransactions` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`sender` VARCHAR(48) NOT NULL,\
`category` VARCHAR(20) NOT NULL,\
`assetid` int(11) NOT NULL,\
`recipient` VARCHAR(48) NOT NULL,\
`amount` int(11) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),\
PRIMARY KEY (id))"
try:
print("Creating table fttransactions...")
cursor.execute(createassets)
except mysql.connector.Error as err:
if(err.msg!="Table 'fttransactions' already exists"):
print(err.msg)
else:
print("OK")
#closing database
cursor.close()
cnx.close()
# function to syncronise the blockchain reading the old blocks if not yet loaded
def sync_blockchain(substrate):
# we get the the last block from the blockchain
r=substrate.rpc_request(method='chain_getHeader',params=[],result_handler=None)
rs=r.get('result')
lastblockhex=rs.get('number')
lastblocknumber=int(lastblockhex,16)
print("[Info] Last Block: ",lastblocknumber)
# we check the last block reconcilied
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
cursor = cnx.cursor(dictionary=True)
lastblocknumberverified=0
query="select * from sync limit 1"
try:
cursor.execute(query)
for row in cursor:
lastblocknumberverified=row['lastblocknumberverified']
#lastblocknumberverified=row.get('lastblocknumberverified')
except mysql.connector.Error as err:
print(err.msg)
lastblocknumberverified=0
print("[INFO] Last block number verified:",lastblocknumberverified)
# loop the new block number to find gaps and fill them in case
x=lastblocknumberverified+1
cursor.close()
cursorb = cnx.cursor()
print("[INFO] Syncing previous blocks...")
while x<=lastblocknumber:
# get block data
print("Syncing block # ",x)
# process the block of data
process_block(x)
# update sync
sqlst=""
if(lastblocknumberverified==0):
sqlst="insert into sync set lastblocknumberverified="+str(x)
else:
sqlst="update sync set lastblocknumberverified="+str(x)
try:
cursorb.execute(sqlst)
cnx.commit()
except mysql.connector.Error as err:
print(err.msg)
lastblocknumberverified=x
# increase block number
x=x+1
#end while loop
cursorb.close()
cnx.close()
# function to store a new transaction
def store_transaction(blocknumber,txhash,sender,recipient,amount,currenttime,gasfees):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Storing New Transaction")
print("TxHash: ",txhash)
print("Current time: ",currentime)
print("Sender: ",sender)
print("Recipient: ",recipient)
print("Amount: ",amount)
print("`Gas fees`: ",gasfees)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into transactions set blocknumber=%s,txhash=%s,sender=%s,recipient=%s,amount=%s,gasfees=%s,dtblockchain=%s"
datatx=(blocknumber,txhash,sender,recipient,amount,gasfees,dtblockchain)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print(err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - New Impact Action
def impactactions_newimpactaction(blocknumber,txhash,signer,currenttime,idimpactaction,data):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
#decode json structure
j=json.loads(data)
print("Storing New Impact Action")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id: ",idimpactaction)
print("Data: ",data)
print("Category: ",j['category'])
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactions set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,id=%s"
addtx=addtx+",description=%s,category=%s,auditors=%s,blockstart=%s,blockend=%s,rewardstoken=%s,rewardsamount=%s,rewardsoracle=%s"
addtx=addtx+",rewardauditors=%s,slashingsauditors=%s,maxerrorsauditor=%s,fields=%s"
if 'fields' in j:
f=j['fields']
else:
f={}
datatx=(blocknumber,txhash,signer,dtblockchain,idimpactaction,j['description'],j['category'],j['auditors'],j['blockstart'],j['blockend'],j['rewardstoken'],j['rewardsamount'],j['rewardsoracle'],j['rewardsauditors'],j['slashingsauditors'],j['maxerrorsauditor'],json.dumps(f))
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Destroy Impact Actions
def impactactions_destroyimpactaction(blocknumber,txhash,signer,currenttime,idimpactaction):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Impact Action")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id Impact Action: ",idimpactaction)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from impactactions where id=%s"
datatx=(idimpactaction,)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - New Oracle
def impactactions_neworacle(blocknumber,txhash,signer,currenttime,idoracle,data):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
#decode json structure
j=json.loads(data)
print("Storing New Oracle")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id: ",idoracle)
print("Data: ",data)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionsoracles set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,id=%s"
addtx=addtx+",description=%s,account=%s,otherinfo=%s"
if 'otherinfo' in j:
o=j['otherinfo']
else:
o=''
datatx=(blocknumber,txhash,signer,dtblockchain,idoracle,j['description'],j['account'],o)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Destroy Oracle
def impactactions_destroyoracle(blocknumber,txhash,signer,currenttime,idoracle):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Oracle")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id Oracle: ",idoracle)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from impactactionsoracles where id=%s"
datatx=(idoracle,)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - New Approval Request
def impactactions_newapprovalrequest(blocknumber,txhash,signer,currenttime,approvalrequestid,info):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
#decode json structure
print("Storing New Approval Request")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id: ",approvalrequestid)
print("Info: ",info)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionsapprovalrequests set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,id=%s,info=%s"
datatx=(blocknumber,txhash,signer,dtblockchain,approvalrequestid,info)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Vote Approval Request
def impactactions_voteapprovalrequest(blocknumber,txhash,signer,currenttime,approvalrequestid,data):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
j=json.loads(data)
vote=j['vote']
otherinfo=j['otherinfo']
print("Storing Vote of an Approval Request")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id Approval: ",approvalrequestid)
print("Vote: ",vote)
print("Other Info: ",otherinfo)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionsapprovalrequestauditorvotes set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,approvalrequestid=%s,vote=%s,otherinfo=%s"
datatx=(blocknumber,txhash,signer,dtblockchain,approvalrequestid,vote,otherinfo)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Assign Auditor to Approval Request
def impactactions_assignauditorapprovalrequest(blocknumber,txhash,signer,currenttime,approvalrequestid,auditor,maxdays):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
#decode json structure
print("Storing Assigned Auditor for an Approval Request")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Approval Request Id: ",approvalrequestid)
print("Auditor: ",auditor)
print("Max days: ",maxdays)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionsapprovalrequestsauditors set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,approvalrequestid=%s,auditor=%s,maxdays=%s"
datatx=(blocknumber,txhash,signer,dtblockchain,approvalrequestid,auditor,maxdays)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Destroy Auditor
def impactactions_destory_assignedauditorapprovalrequest(blocknumber,txhash,signer,currenttime,approvalrequestid,auditor):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Assigned Auditor to an Approval Request")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Approval Request id: ",approvalrequestid)
print("Auditor: ",auditor)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from impactactionsapprovalrequestsauditors where approvalrequestid=%s and auditor=%s"
datatx=(approvalrequestid,auditor)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - New Auditor
def impactactions_newauditor(blocknumber,txhash,signer,currenttime,account,data):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
#decode json structure
j=json.loads(data)
print("Storing New Auditor")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Account: ",account)
print("Data: ",data)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionsauditors set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s"
addtx=addtx+",description=%s,account=%s,categories=%s,area=%s,otherinfo=%s"
if 'otherinfo' in j:
o=j['otherinfo']
else:
o=''
datatx=(blocknumber,txhash,signer,dtblockchain,j['description'],account,json.dumps(j['categories']),j['area'],o)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Destroy Auditor
def impactactions_destroyauditor(blocknumber,txhash,signer,currenttime,account):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Auditor")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("account: ",account)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from impactactionsauditors where account=%s"
datatx=(account,)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - New Proxy
def impactactions_newproxy(blocknumber,txhash,signer,currenttime,idproxy, account):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Storing New Proxy")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Account: ",account)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionsproxy set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s"
addtx=addtx+",id=%s,account=%s"
datatx=(blocknumber,txhash,signer,dtblockchain,idproxy,account)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Destroy Proxy
def impactactions_destroyproxy(blocknumber,txhash,signer,currenttime,idproxy):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Proxy")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("id Proxy: ",idproxy)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from impactactionsproxy where id=%s"
datatx=(idproxy,)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - New Category
def impactactions_newcategory(blocknumber,txhash,signer,currenttime,idcategory,description):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Storing New Category")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id category: ",idcategory)
print("Description: ",description)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionscategories set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,id=%s,description=%s"
datatx=(blocknumber,txhash,signer,dtblockchain,idcategory,description)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Destroy Category
def impactactions_destroycategory(blocknumber,txhash,signer,currenttime,idcategory):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Category")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id category: ",idcategory)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from impactactionscategories where id=%s"
datatx=(idcategory,)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to create new asset from Sudo
def assets_force_create(blocknumber,txhash,signer,currenttime,assetid,owner,maxzombies,minbalance):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Create Asset (Fungible Tokens)")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Asset Id : ",assetid)
print("Owner : ",owner)
print("Max Zombies : ",maxzombies)
print("Min Balance : ",minbalance)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into ftassets set blocknumber=%s,txhash=%s,signer=%s,assetid=%s,owner=%s,maxzombies=%s,minbalance=%s,dtblockchain=%s"
datatx=(blocknumber,txhash,signer,assetid,owner,maxzombies,minbalance,dtblockchain)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to mint assets in favor of an account
def assets_mint(blocknumber,txhash,signer,currenttime,assetid,recipient,amount):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
category="Minted"
print("Mint Assets (Fungible Tokens)")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Asset Id : ",assetid)
print("Recipient : ",recipient)
print("Amount : ",amount)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into fttransactions set blocknumber=%s,txhash=%s,signer=%s,sender=%s,category=%s,assetid=%s,recipient=%s,amount=%s,dtblockchain=%s"
datatx=(blocknumber,txhash,signer,signer,category,assetid,recipient,amount,dtblockchain)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to burn assets decrease the balance of an account
def assets_burn(blocknumber,txhash,signer,currenttime,assetid,recipient,amount):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
category="Burned"
print("Burn Assets (Fungible Tokens)")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Asset Id : ",assetid)
print("Recipient : ",recipient)
print("Amount : ",amount)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into fttransactions set blocknumber=%s,txhash=%s,signer=%s,sender=%s,category=%s,assetid=%s,recipient=%s,amount=%s,dtblockchain=%s"
datatx=(blocknumber,txhash,signer,signer,category,assetid,recipient,amount,dtblockchain)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to transfer assets in favor of an account
def assets_transfer(blocknumber,txhash,signer,currenttime,assetid,recipient,amount):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
category="Transfer"
print("Mint Assets (Fungible Tokens)")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Asset Id : ",assetid)
print("Recipient : ",recipient)
print("Amount : ",amount)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into fttransactions set blocknumber=%s,txhash=%s,signer=%s,sender=%s,category=%s,assetid=%s,recipient=%s,amount=%s,dtblockchain=%s"
datatx=(blocknumber,txhash,signer,signer,category,assetid,recipient,amount,dtblockchain)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to force transfer assets in favor of an account
def assets_forcetransfer(blocknumber,txhash,signer,sender,currenttime,assetid,recipient,amount):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
category="Transfer"
print("Mint Assets (Fungible Tokens)")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Asset Id : ",assetid)
print("Recipient : ",recipient)
print("Amount : ",amount)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into fttransactions set blocknumber=%s,txhash=%s,signer=%s,sender=%s,category=%s,assetid=%s,recipient=%s,amount=%s,dtblockchain=%s"
datatx=(blocknumber,txhash,signer,signer,category,assetid,recipient,amount,dtblockchain)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to destroy asset (Fungible Tokens) from Sudo
def assets_force_destroy(blocknumber,txhash,signer,currenttime,assetid,witnesszombies):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Asset (Fungible Tokens)")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Asset Id: ",assetid)
print("Witnesses Zombies: ",witnesszombies)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from ftassets where assetid=%s"
datatx=(assetid,)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to process a block of data
def process_block(blocknumber):
# Retrieve extrinsics in block
print("Processing Block # ",blocknumber)
result = substrate.get_block(block_number=blocknumber)
print ("##########################")
print(result)
print("Block Hash: ",result['header']['hash'])
blockhash=result['header']['hash']
print ("##########################")
events=substrate.get_events(result['header']['hash'])
print ("#######EVENTS##############")
print(events)
print ("##########################")
# retrieve receipt
cnt=0
for extrinsic in result['extrinsics']:
if extrinsic.address:
signed_by_address = extrinsic.address.value
else:
signed_by_address = None
print('\nPallet: {}\nCall: {}\nSigned by: {}'.format(
extrinsic.call_module.name,
extrinsic.call.name,
signed_by_address
))
# check for exstrinc success or not
try:
error=events[cnt].params[0]['value'].get('Error')
except:
error=None
if events[cnt].event.name=="ExtrinsicFailed" or error!=None :
print("Extrinsic has failed")
cnt=cnt+1
continue
else:
print("Extrinsic succeded: ",events[cnt].event.name)
print("extrinsic.extrinsic_hash: ",extrinsic.extrinsic_hash)
print("extrinsic: ",extrinsic)
print("blockhash: ",blockhash)
gasfees=0
if (extrinsic.extrinsic_hash!=None):
# get receipt of the extrisinc
receipt = ExtrinsicReceipt(
substrate=substrate,
extrinsic_hash=extrinsic.extrinsic_hash,
block_hash=blockhash
)
print("************RECEIPT**************")
print("blockhash: ",blockhash)
print("extrinsic.extrinsic_hash: ",extrinsic.extrinsic_hash)
print("receipt.total_fee_amount: ",receipt.total_fee_amount)
print(receipt.is_success)
print(receipt.extrinsic.call_module.name)
print(receipt.extrinsic.call.name)
print(receipt.weight)
print("*********************************")
gasfees=receipt.total_fee_amount
#for TimeStamp call we set the time of the following transactions
if extrinsic.call_module.name=="Timestamp" and extrinsic.call.name=="set":
currentime=extrinsic.params[0]['value']
#Balance Transfer we update the transactions
if extrinsic.call_module.name=="Balances" and ( extrinsic.call.name=="transfer" or extrinsic.call.name=="transfer_keep_alive"):
## store the transaction in the database
store_transaction(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,extrinsic.params[0]['value'],extrinsic.params[1]['value'],currentime,gasfees)
#Impact Actions - Vote Approval Request
if extrinsic.call_module.name=="ImpactActions" and extrinsic.call.name=="vote_approval_request":
impactactions_voteapprovalrequest(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'])
#Impact Actions - Vote Approval Request
if extrinsic.call_module.name=="ImpactActions" and extrinsic.call.name=="request_approval":
impactactions_newapprovalrequest(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'])
#Impact Actions - Assign Auditor to Approval Request
if extrinsic.call_module.name=="ImpactActions" and extrinsic.call.name=="assign_auditor":
impactactions_assignauditorapprovalrequest(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'],extrinsic.params[2]['value'])
#Impact Actions - Remove Assigned Auditor to Approval Request
if extrinsic.call_module.name=="ImpactActions" and extrinsic.call.name=="destroy_assigned_auditor":
impactactions_destory_assignedauditorapprovalrequest(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'])
#Assets - Create new asset as regular user
if extrinsic.call_module.name=="Assets" and extrinsic.call.name=="create":
assets_force_create(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'],extrinsic.params[2]['value'],extrinsic.params[3]['value'])
#Assets - Destroy asset as regular user
if extrinsic.call_module.name=="Assets" and extrinsic.call.name=="destroy":
assets_force_destroy(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'])
#Assets - Mint assets in favor of an account
if extrinsic.call_module.name=="Assets" and extrinsic.call.name=="mint":
assets_mint(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'],extrinsic.params[2]['value'])
#Assets - Burn assets decreasing the balance of an account
if extrinsic.call_module.name=="Assets" and extrinsic.call.name=="burn":
assets_burn(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'],extrinsic.params[2]['value'])
#Assets - Transfer assets in favor of an account
if extrinsic.call_module.name=="Assets" and extrinsic.call.name=="transfer":
assets_transfer(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'],extrinsic.params[2]['value'])
# Sudo Calls
if extrinsic.call_module.name=="Sudo" and extrinsic.call.name=="sudo":
print(extrinsic.params[0].get('value'))
c=extrinsic.params[0].get('value')
# new impact action
if c['call_module']== 'ImpactActions' and c['call_function']=='create_impact_action':
print("Impact Actions - Create New Impact Action")
print("id: ",c['call_args'][0]['value'])
print("data: ",c['call_args'][1]['value'])
impactactions_newimpactaction(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])
# destroy impact action
if c['call_module']== 'ImpactActions' and c['call_function']=='destroy_impact_action':
print("Impact Actions - Destroy Impact Action")
print("id: ",c['call_args'][0]['value'])
impactactions_destroyimpactaction(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'])
# new oracle
if c['call_module']== 'ImpactActions' and c['call_function']=='create_oracle':
print("Impact Actions - Create New Oracle")
print("id: ",c['call_args'][0]['value'])
print("data: ",c['call_args'][1]['value'])
impactactions_neworacle(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])
# destroy oracle
if c['call_module']== 'ImpactActions' and c['call_function']=='destroy_oracle':
print("Impact Actions - Destroy Oracle")
print("id: ",c['call_args'][0]['value'])
impactactions_destroyoracle(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'])
# new auditor
if c['call_module']== 'ImpactActions' and c['call_function']=='create_auditor':
print("Impact Actions - Create New Auditor")
print("id: ",c['call_args'][0]['value'])
print("data: ",c['call_args'][1]['value'])
impactactions_newauditor(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])
# destroy auditor
if c['call_module']== 'ImpactActions' and c['call_function']=='destroy_auditor':
print("Impact Actions - Destroy Auditor")
print("id: ",c['call_args'][0]['value'])
impactactions_destroyauditor(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'])
# new proxy account
if c['call_module']== 'ImpactActions' and c['call_function']=='create_proxy':
print("Impact Actions - Create New Proxy")
print("id: ",c['call_args'][0]['value'])
print("account: ",c['call_args'][1]['value'])
impactactions_newproxy(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])
# destroy proxy
if c['call_module']== 'ImpactActions' and c['call_function']=='destroy_proxy':
print("Impact Actions - Destroy Proxy")
print("id: ",c['call_args'][0]['value'])
impactactions_destroyproxy(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'])
# new category
if c['call_module']== 'ImpactActions' and c['call_function']=='create_category':
print("Impact Actions - Create New Category")
print("id: ",c['call_args'][0]['value'])
print("description: ",c['call_args'][1]['value'])
impactactions_newcategory(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])
# destroy category
if c['call_module']== 'ImpactActions' and c['call_function']=='destroy_category':
print("Impact Actions - Destroy Category")
print("id: ",c['call_args'][0]['value'])
impactactions_destroycategory(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'])
# Force Create Asset
if c['call_module']== 'Assets' and c['call_function']=='force_create':
print("Fungibile Tokens - Create Asset")
print("id: ",c['call_args'][0]['value'])
print("Owner: ",c['call_args'][1]['value'])
print("Max Zombies: ",c['call_args'][2]['value'])
print("Minimum Deposit: ",c['call_args'][3]['value'])
assets_force_create(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'],c['call_args'][2]['value'],c['call_args'][3]['value'])
# Force transfer Assets
if c['call_module']== 'Assets' and c['call_function']=='force_transfer':
print("Fungible Tokens - Force Transfer")
print("id: ",c['call_args'][0]['value'])
print("Witnesses Zombies: ",c['call_args'][1]['value'])
assets_forcetransfer(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,c['call_args'][1]['value'],currentime,c['call_args'][0]['value'],c['call_args'][2]['value'],c['call_args'][3]['value'])
# Force Destroy Asset
if c['call_module']== 'Assets' and c['call_function']=='force_destroy':
print("Fungible Tokens - Create Asset")
print("id: ",c['call_args'][0]['value'])
print("Witnesses Zombies: ",c['call_args'][1]['value'])
assets_force_destroy(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])
# Loop through call params
for param in extrinsic.params:
if param['type'] == 'Compact<Balance>':
param['value'] = '{} {}'.format(param['value'] / 10 ** substrate.token_decimals, substrate.token_symbol)
print("Param '{}': {}".format(param['name'], param['value']))
cnt=cnt+1
# subscription handler for new blocks written
def subscription_handler(obj, update_nr, subscription_id):
print(f"New block #{obj['header']['number']} produced by {obj['author']} hash: {obj['header']['hash']}")
# call the block management function
process_block(obj['header']['number'])
## MAIN
# load custom data types
custom_type_registry = load_type_registry_file("../assets/types.json")
# define connection parameters
substrate = SubstrateInterface(
url=NODE,
ss58_format=42,
type_registry_preset='default',
type_registry=custom_type_registry
)
# create database tables
create_tables()
# syncronise the blockchain
if(len(sys.argv)>1):
if (sys.argv[1]== '--sync' or sys.argv[1]=="-s"):
sync_blockchain(substrate)
# subscribe to new block writing and process them in real time
result = substrate.subscribe_block_headers(subscription_handler, include_author=True)
print(result)
|
[] |
[] |
[
"DB_HOST",
"DB_NAME",
"NODE",
"DB_PWD",
"DB_USER"
] |
[]
|
["DB_HOST", "DB_NAME", "NODE", "DB_PWD", "DB_USER"]
|
python
| 5 | 0 | |
bccsp/pkcs11/impl.go
|
/*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package pkcs11
import (
"crypto/ecdsa"
"crypto/rsa"
"crypto/x509"
"os"
"github.com/hyperledger/fabric/bccsp"
"github.com/hyperledger/fabric/bccsp/sw"
"github.com/hyperledger/fabric/common/flogging"
"github.com/miekg/pkcs11"
"github.com/pkg/errors"
)
var (
logger = flogging.MustGetLogger("bccsp_p11")
sessionCacheSize = 10
)
// New WithParams returns a new instance of the software-based BCCSP
// set at the passed security level, hash family and KeyStore.
func New(opts PKCS11Opts, keyStore bccsp.KeyStore) (bccsp.BCCSP, error) {
// Init config
conf := &config{}
err := conf.setSecurityLevel(opts.SecLevel, opts.HashFamily)
if err != nil {
return nil, errors.Wrapf(err, "Failed initializing configuration")
}
swCSP, err := sw.NewWithParams(opts.SecLevel, opts.HashFamily, keyStore)
if err != nil {
return nil, errors.Wrapf(err, "Failed initializing fallback SW BCCSP")
}
// Check KeyStore
if keyStore == nil {
return nil, errors.New("Invalid bccsp.KeyStore instance. It must be different from nil")
}
lib := opts.Library
pin := opts.Pin
label := opts.Label
ctx, slot, session, err := loadLib(lib, pin, label)
if err != nil {
return nil, errors.Wrapf(err, "Failed initializing PKCS11 library %s %s",
lib, label)
}
sessions := make(chan pkcs11.SessionHandle, sessionCacheSize)
csp := &impl{swCSP, conf, keyStore, ctx, sessions, slot, lib, opts.SoftVerify, opts.Immutable}
csp.returnSession(*session)
return csp, nil
}
type impl struct {
bccsp.BCCSP
conf *config
ks bccsp.KeyStore
ctx *pkcs11.Ctx
sessions chan pkcs11.SessionHandle
slot uint
lib string
softVerify bool
//Immutable flag makes object immutable
immutable bool
}
// KeyGen generates a key using opts.
func (csp *impl) KeyGen(opts bccsp.KeyGenOpts) (k bccsp.Key, err error) {
// Validate arguments
if opts == nil {
return nil, errors.New("Invalid Opts parameter. It must not be nil")
}
// Parse algorithm
switch opts.(type) {
case *bccsp.ECDSAKeyGenOpts:
ski, pub, err := csp.generateECKey(csp.conf.ellipticCurve, opts.Ephemeral())
if err != nil {
return nil, errors.Wrapf(err, "Failed generating ECDSA key")
}
k = &ecdsaPrivateKey{ski, ecdsaPublicKey{ski, pub}}
case *bccsp.ECDSAP256KeyGenOpts:
ski, pub, err := csp.generateECKey(oidNamedCurveP256, opts.Ephemeral())
if err != nil {
return nil, errors.Wrapf(err, "Failed generating ECDSA P256 key")
}
k = &ecdsaPrivateKey{ski, ecdsaPublicKey{ski, pub}}
case *bccsp.ECDSAP384KeyGenOpts:
ski, pub, err := csp.generateECKey(oidNamedCurveP384, opts.Ephemeral())
if err != nil {
return nil, errors.Wrapf(err, "Failed generating ECDSA P384 key")
}
k = &ecdsaPrivateKey{ski, ecdsaPublicKey{ski, pub}}
default:
return csp.BCCSP.KeyGen(opts)
}
return k, nil
}
// KeyImport imports a key from its raw representation using opts.
// The opts argument should be appropriate for the primitive used.
func (csp *impl) KeyImport(raw interface{}, opts bccsp.KeyImportOpts) (k bccsp.Key, err error) {
// Validate arguments
if raw == nil {
return nil, errors.New("Invalid raw. Cannot be nil")
}
if opts == nil {
return nil, errors.New("Invalid Opts parameter. It must not be nil")
}
switch opts.(type) {
case *bccsp.X509PublicKeyImportOpts:
x509Cert, ok := raw.(*x509.Certificate)
if !ok {
return nil, errors.New("[X509PublicKeyImportOpts] Invalid raw material. Expected *x509.Certificate")
}
pk := x509Cert.PublicKey
switch pk.(type) {
case *ecdsa.PublicKey:
return csp.KeyImport(pk, &bccsp.ECDSAGoPublicKeyImportOpts{Temporary: opts.Ephemeral()})
case *rsa.PublicKey:
return csp.KeyImport(pk, &bccsp.RSAGoPublicKeyImportOpts{Temporary: opts.Ephemeral()})
default:
return nil, errors.New("Certificate's public key type not recognized. Supported keys: [ECDSA, RSA]")
}
default:
return csp.BCCSP.KeyImport(raw, opts)
}
}
// GetKey returns the key this CSP associates to
// the Subject Key Identifier ski.
func (csp *impl) GetKey(ski []byte) (bccsp.Key, error) {
pubKey, isPriv, err := csp.getECKey(ski)
if err == nil {
if isPriv {
return &ecdsaPrivateKey{ski, ecdsaPublicKey{ski, pubKey}}, nil
}
return &ecdsaPublicKey{ski, pubKey}, nil
}
return csp.BCCSP.GetKey(ski)
}
// Sign signs digest using key k.
// The opts argument should be appropriate for the primitive used.
//
// Note that when a signature of a hash of a larger message is needed,
// the caller is responsible for hashing the larger message and passing
// the hash (as digest).
func (csp *impl) Sign(k bccsp.Key, digest []byte, opts bccsp.SignerOpts) ([]byte, error) {
// Validate arguments
if k == nil {
return nil, errors.New("Invalid Key. It must not be nil")
}
if len(digest) == 0 {
return nil, errors.New("Invalid digest. Cannot be empty")
}
// Check key type
switch key := k.(type) {
case *ecdsaPrivateKey:
return csp.signECDSA(*key, digest, opts)
default:
return csp.BCCSP.Sign(key, digest, opts)
}
}
// Verify verifies signature against key k and digest
func (csp *impl) Verify(k bccsp.Key, signature, digest []byte, opts bccsp.SignerOpts) (bool, error) {
// Validate arguments
if k == nil {
return false, errors.New("Invalid Key. It must not be nil")
}
if len(signature) == 0 {
return false, errors.New("Invalid signature. Cannot be empty")
}
if len(digest) == 0 {
return false, errors.New("Invalid digest. Cannot be empty")
}
// Check key type
switch key := k.(type) {
case *ecdsaPrivateKey:
return csp.verifyECDSA(key.pub, signature, digest, opts)
case *ecdsaPublicKey:
return csp.verifyECDSA(*key, signature, digest, opts)
default:
return csp.BCCSP.Verify(k, signature, digest, opts)
}
}
// Encrypt encrypts plaintext using key k.
// The opts argument should be appropriate for the primitive used.
func (csp *impl) Encrypt(k bccsp.Key, plaintext []byte, opts bccsp.EncrypterOpts) ([]byte, error) {
// TODO: Add PKCS11 support for encryption, when fabric starts requiring it
return csp.BCCSP.Encrypt(k, plaintext, opts)
}
// Decrypt decrypts ciphertext using key k.
// The opts argument should be appropriate for the primitive used.
func (csp *impl) Decrypt(k bccsp.Key, ciphertext []byte, opts bccsp.DecrypterOpts) ([]byte, error) {
return csp.BCCSP.Decrypt(k, ciphertext, opts)
}
// FindPKCS11Lib IS ONLY USED FOR TESTING
// This is a convenience function. Useful to self-configure, for tests where usual configuration is not
// available
func FindPKCS11Lib() (lib, pin, label string) {
//FIXME: Till we workout the configuration piece, look for the libraries in the familiar places
lib = os.Getenv("PKCS11_LIB")
if lib == "" {
pin = "98765432"
label = "ForFabric"
possibilities := []string{
"/usr/lib/softhsm/libsofthsm2.so", //Debian
"/usr/lib/x86_64-linux-gnu/softhsm/libsofthsm2.so", //Ubuntu
"/usr/lib/s390x-linux-gnu/softhsm/libsofthsm2.so", //Ubuntu
"/usr/lib/powerpc64le-linux-gnu/softhsm/libsofthsm2.so", //Power
"/usr/local/Cellar/softhsm/2.5.0/lib/softhsm/libsofthsm2.so", //MacOS
}
for _, path := range possibilities {
if _, err := os.Stat(path); !os.IsNotExist(err) {
lib = path
break
}
}
} else {
pin = os.Getenv("PKCS11_PIN")
label = os.Getenv("PKCS11_LABEL")
}
return lib, pin, label
}
|
[
"\"PKCS11_LIB\"",
"\"PKCS11_PIN\"",
"\"PKCS11_LABEL\""
] |
[] |
[
"PKCS11_PIN",
"PKCS11_LIB",
"PKCS11_LABEL"
] |
[]
|
["PKCS11_PIN", "PKCS11_LIB", "PKCS11_LABEL"]
|
go
| 3 | 0 | |
functions/source/IndexS3TranscriptionDataIntoES/upload_to_elasticsearch.py
|
import boto3
import certifi
import json
import os
from aws_requests_auth.aws_auth import AWSRequestsAuth
from elasticsearch import Elasticsearch, RequestsHttpConnection
import logging
# Log level
logging.basicConfig()
logger = logging.getLogger()
# Parameters
REGION = os.getenv('AWS_REGION')
esendpoint = os.environ['ES_DOMAIN']
FULL_EPISODE_INDEX = os.getenv('ES_EPISODE_INDEX', default='call-transcript')
FULL_EPISODE_DOCTYPE = os.getenv('FULL_EPISODE_DOCTYPE', default='doc')
s3_client = boto3.client('s3')
# Create the auth token for the sigv4 signature
session = boto3.session.Session()
credentials = session.get_credentials().get_frozen_credentials()
awsauth = AWSRequestsAuth(
aws_access_key=credentials.access_key,
aws_secret_access_key=credentials.secret_key,
aws_token=credentials.token,
aws_host=esendpoint,
aws_region=REGION,
aws_service='es'
)
# Connect to the elasticsearch cluster using aws authentication. The lambda function
# must have access in an IAM policy to the ES cluster.
es = Elasticsearch(
hosts=[{'host': esendpoint, 'port': 443}],
http_auth=awsauth,
use_ssl=True,
verify_certs=True,
ca_certs=certifi.where(),
timeout=120,
connection_class=RequestsHttpConnection
)
# Entry point into the lambda function
def lambda_handler(event, context):
fullEpisodeS3Location = event["processedTranscription"][0]
index_episode(es, event, fullEpisodeS3Location)
return
def index_episode(es, event, fullEpisodeS3Location):
response = s3_client.get_object(Bucket=fullEpisodeS3Location['bucket'], Key=fullEpisodeS3Location['key'])
file_content = response['Body'].read().decode('utf-8')
fullepisode = json.loads(file_content)
s3_location = "s3://" + event['bucket'] + "/" + event['key']
s = event['key'].split('_')[1]
contact_id = event['key'].split('/')[-1].split('_')[0]
updateDoc = {
'doc':{
'audio_type': event['audio_type'],
'audio_s3_location': s3_location,
'contact_id': contact_id,
'LastUpdateTimestamp': s[0:4] + '-' + s[4:6] + '-' + s[6:8] + 'T' + s.split('T')[1] + 'Z',
'transcript': fullepisode['transcript'],
'agent_transcript': fullepisode['agent'],
'customer_transcript': fullepisode['customer'],
'transcript_keyphrases': fullepisode['key_phrases'],
'transcript_entities': fullepisode['transcript_entities'],
'customer_keyphrases': fullepisode['customer_phrases'],
'customer_entities': fullepisode['customer_entities'],
'agent_keyphrases': fullepisode['agent_key_phrases'],
'agent_entities': fullepisode['agent_entities'],
'agent_sentiment': fullepisode['agent_sentiment'],
'customer_sentiment': fullepisode['customer_sentiment'],
'type': 'CallRecord'
},
"doc_as_upsert" : True
}
es.update(index=FULL_EPISODE_INDEX, doc_type=FULL_EPISODE_DOCTYPE, body=updateDoc, id=contact_id)
|
[] |
[] |
[
"ES_EPISODE_INDEX",
"ES_DOMAIN",
"FULL_EPISODE_DOCTYPE",
"AWS_REGION"
] |
[]
|
["ES_EPISODE_INDEX", "ES_DOMAIN", "FULL_EPISODE_DOCTYPE", "AWS_REGION"]
|
python
| 4 | 0 | |
farm/file_utils.py
|
"""
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import fnmatch
import json
import logging
import os
import shutil
import sys
import tempfile
from functools import wraps
from hashlib import sha256
from io import open
import boto3
import numpy as np
import requests
from botocore.exceptions import ClientError
from dotmap import DotMap
from tqdm import tqdm
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv(
"TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")
)
)
default_cache_path = os.path.join(torch_cache_home, "farm")
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
FARM_CACHE = Path(os.getenv("FARM_CACHE", default_cache_path))
except (AttributeError, ImportError):
FARM_CACHE = os.getenv("FARM_CACHE", default_cache_path)
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode("utf-8")
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = FARM_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + ".json"
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = FARM_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ("http", "https", "s3"):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError(
"unable to parse {} as a URL or as a local path".format(url_or_filename)
)
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file, proxies=None):
req = requests.get(url, stream=True, proxies=proxies)
content_length = req.headers.get("Content-Length")
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = FARM_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
try:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except EnvironmentError:
etag = None
if sys.version_info[0] == 2 and etag is not None:
etag = etag.decode("utf-8")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + ".*")
matching_files = list(filter(lambda s: not s.endswith(".json"), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, "wb") as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w") as meta_file:
output_string = json.dumps(meta)
if sys.version_info[0] == 2 and isinstance(output_string, str):
output_string = unicode(
output_string, "utf-8"
) # The beauty of python 2
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
"""
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
"""
collection = set()
with open(filename, "r", encoding="utf-8") as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
def read_config(path, flattend=False):
if path:
with open(path) as json_data_file:
conf_args = json.load(json_data_file)
else:
raise ValueError("No config provided for classifier")
def getArgValue(arg):
if "value" not in arg:
logger.error(
"Only depth 2 config files supported. Failed to convert: %s" % str(arg)
)
return arg["value"] if (arg["value"] is not None) else arg["default"]
# flatten last part of config, take either value or default as value
for gk, gv in conf_args.items():
for k, v in gv.items():
if isinstance(getArgValue(v), dict):
logger.error("Config is too deeply nested, at %s" % str(v))
conf_args[gk][k] = getArgValue(v)
# DotMap for making nested dictionary accessible through dot notation
flat_args = dict(
conf_args["general"],
**conf_args["task"],
**conf_args["parameter"],
**conf_args["logging"],
)
if flattend:
args = DotMap(flat_args, _dynamic=False)
else:
args = DotMap(conf_args, _dynamic=False)
return args
def unnestConfig(config, flattened=False):
"""
This function creates a list of config files for evaluating parameters with different values. If a config parameter
is of type list this list is iterated over and a config object without lists is returned. Can handle lists inside any
number of parameters.
Can handle shallow or nested (one level) configs
"""
nestedKeys = []
nestedVals = []
if flattened:
for k, v in config.items():
if isinstance(v, list):
if k != "layer_dims": # exclude layer dims, since it is already a list
nestedKeys.append(k)
nestedVals.append(v)
else:
for gk, gv in config.items():
if(gk != "task"):
for k, v in gv.items():
if isinstance(v, list):
if isinstance(v, list):
if (
k != "layer_dims"
): # exclude layer dims, since it is already a list
nestedKeys.append([gk, k])
nestedVals.append(v)
elif isinstance(v, dict):
logger.error("Config too deep!")
if len(nestedKeys) == 0:
unnestedConfig = [config]
else:
if flattened:
logger.info("Nested config at parameters: %s" % (", ".join(nestedKeys)))
else:
logger.info(
"Nested config at parameters: %s"
% (", ".join(".".join(x) for x in nestedKeys))
)
unnestedConfig = []
mesh = np.meshgrid(
*nestedVals
) # get all combinations, each dimension corresponds to one parameter type
# flatten mesh into shape: [num_parameters, num_combinations] so we can iterate in 2d over any paramter combinations
mesh = [x.flatten() for x in mesh]
# loop over all combinations
for i in range(len(mesh[0])):
tempconfig = config.copy()
for j, k in enumerate(nestedKeys):
if isinstance(k, str):
tempconfig[k] = mesh[j][
i
] # get ith val of correct param value and overwrite original config
elif len(k) == 2:
tempconfig[k[0]][k[1]] = mesh[j][i] # set nested dictionary keys
else:
logger.error("Config too deep!")
unnestedConfig.append(tempconfig)
return unnestedConfig
|
[] |
[] |
[
"FARM_CACHE",
"XDG_CACHE_HOME",
"TORCH_HOME"
] |
[]
|
["FARM_CACHE", "XDG_CACHE_HOME", "TORCH_HOME"]
|
python
| 3 | 0 | |
amaztv/wsgi.py
|
"""
WSGI config for amaztv project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "amaztv.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
api/main.go
|
package main
import (
"os"
"srb/domain/database"
"srb/domain/routing"
"github.com/joho/godotenv"
)
// @title book_Impressions_back
// @version 1.0.0
// @description API of software to describe impressions of books.
// @license.name MIT
// @license.url https://opensource.org/licenses/MIT
// @host Secret
// @BasePath /
func main() {
if os.Getenv("ENVIROMENT") != "production" {
err := godotenv.Load()
if err != nil {
panic(err.Error())
}
}
database.Connet()
routing.Routing()
}
|
[
"\"ENVIROMENT\""
] |
[] |
[
"ENVIROMENT"
] |
[]
|
["ENVIROMENT"]
|
go
| 1 | 0 | |
test/run_test.py
|
#!/usr/bin/env python3
import argparse
import copy
from datetime import datetime
import json
import modulefinder
import os
import shutil
import signal
import subprocess
import sys
import tempfile
import torch
from torch.utils import cpp_extension
from torch.testing._internal.common_utils import TEST_WITH_ROCM, shell, set_cwd, FILE_SCHEMA
from torch.testing._internal.framework_utils import calculate_shards
import torch.distributed as dist
from typing import Dict, Optional, Tuple, List, Any
from typing_extensions import TypedDict
try:
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))
from tools.stats_utils.s3_stat_parser import (get_previous_reports_for_branch, Report, HAVE_BOTO3)
except ImportError:
print("Unable to import s3_stat_parser from tools. Running without S3 stats...")
HAVE_BOTO3 = False
TESTS = [
'test_import_time',
'test_public_bindings',
'test_type_hints',
'test_autograd',
'benchmark_utils/test_benchmark_utils',
'test_binary_ufuncs',
'test_bundled_inputs',
'test_complex',
'test_cpp_api_parity',
'test_cpp_extensions_aot_no_ninja',
'test_cpp_extensions_aot_ninja',
'test_cpp_extensions_jit',
'distributed/test_c10d_common',
'distributed/test_c10d_gloo',
'distributed/test_c10d_nccl',
'distributed/test_jit_c10d',
'distributed/test_c10d_spawn_gloo',
'distributed/test_c10d_spawn_nccl',
'test_cuda',
'test_jit_cuda_fuser',
'test_cuda_primary_ctx',
'test_dataloader',
'test_datapipe',
'distributed/test_data_parallel',
'distributed/test_distributed_fork',
'distributed/test_distributed_spawn',
'distributions/test_constraints',
'distributions/test_distributions',
'test_dispatch',
'test_expecttest',
'test_foreach',
'test_indexing',
'test_jit',
'test_linalg',
'test_logging',
'test_mkldnn',
'test_model_dump',
'test_module_init',
'test_multiprocessing',
'test_multiprocessing_spawn',
'distributed/test_nccl',
'test_native_functions',
'test_numba_integration',
'test_nn',
'test_ops',
'test_optim',
'test_pytree',
'test_mobile_optimizer',
'test_set_default_mobile_cpu_allocator',
'test_xnnpack_integration',
'test_vulkan',
'test_sparse',
'test_quantization',
'test_pruning_op',
'test_spectral_ops',
'test_serialization',
'test_shape_ops',
'test_show_pickle',
'test_sort_and_select',
'test_tensor_creation_ops',
'test_testing',
'test_torch',
'test_type_info',
'test_unary_ufuncs',
'test_utils',
'test_view_ops',
'test_vmap',
'test_namedtuple_return_api',
'test_numpy_interop',
'test_jit_profiling',
'test_jit_legacy',
'test_jit_fuser_legacy',
'test_tensorboard',
'test_namedtensor',
'test_reductions',
'test_type_promotion',
'test_jit_disabled',
'test_function_schema',
'test_op_aliases',
'test_overrides',
'test_jit_fuser_te',
'test_tensorexpr',
'test_tensorexpr_pybind',
'test_openmp',
'test_profiler',
"distributed/test_launcher",
'distributed/nn/jit/test_instantiator',
'distributed/rpc/test_faulty_agent',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/cuda/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'test_determination',
'test_futures',
'test_fx',
'test_fx_experimental',
'test_functional_autograd_benchmark',
'test_package',
'test_license',
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
'distributed/optim/test_zero_redundancy_optimizer',
'distributed/elastic/timer/api_test',
'distributed/elastic/timer/local_timer_example',
'distributed/elastic/timer/local_timer_test',
'distributed/elastic/events/lib_test',
'distributed/elastic/metrics/api_test',
'distributed/elastic/utils/logging_test',
'distributed/elastic/utils/util_test',
'distributed/elastic/utils/distributed_test',
'distributed/elastic/multiprocessing/api_test',
]
# Tests need to be run with pytest.
USE_PYTEST_LIST = [
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
'distributions/test_constraints',
'distributions/test_transforms',
'distributions/test_utils',
'test_typing',
"distributed/elastic/events/lib_test",
"distributed/elastic/agent/server/test/api_test",
]
WINDOWS_BLOCKLIST = [
'distributed/nn/jit/test_instantiator',
'distributed/rpc/test_faulty_agent',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/cuda/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'distributed/test_distributed_fork',
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
'distributed/optim/test_zero_redundancy_optimizer',
"distributed/elastic/agent/server/test/api_test",
'distributed/elastic/multiprocessing/api_test',
]
ROCM_BLOCKLIST = [
'distributed/nn/jit/test_instantiator',
'distributed/rpc/test_faulty_agent',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/cuda/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'test_determination',
'test_multiprocessing',
'test_jit_legacy',
'test_type_hints',
'test_openmp',
]
RUN_PARALLEL_BLOCKLIST = [
'test_cpp_extensions_jit',
'test_expecttest',
'test_jit_disabled',
'test_mobile_optimizer',
'test_multiprocessing',
'test_multiprocessing_spawn',
'test_namedtuple_return_api',
'test_overrides',
'test_show_pickle',
'test_tensorexpr',
'test_cuda_primary_ctx',
] + [test for test in TESTS if test.startswith('distributed/')]
WINDOWS_COVERAGE_BLOCKLIST = [
]
# These tests are slow enough that it's worth calculating whether the patch
# touched any related files first. This list was manually generated, but for every
# run with --determine-from, we use another generated list based on this one and the
# previous test stats.
TARGET_DET_LIST = [
'distributions/test_distributions',
'test_nn',
'test_autograd',
'test_cpp_extensions_jit',
'test_jit_legacy',
'test_dataloader',
'test_overrides',
'test_linalg',
'test_jit',
'test_jit_profiling',
'test_torch',
'test_binary_ufuncs',
'test_numpy_interop',
'test_reductions',
'test_shape_ops',
'test_sort_and_select',
'test_testing',
'test_view_ops',
'distributed/nn/jit/test_instantiator',
'distributed/test_distributed_fork',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/cuda/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'distributed/algorithms/ddp_comm_hooks/test_ddp_hooks',
'distributed/test_distributed_spawn',
'test_cuda',
'test_cuda_primary_ctx',
'test_cpp_extensions_aot_ninja',
'test_cpp_extensions_aot_no_ninja',
'test_serialization',
'test_optim',
'test_utils',
'test_multiprocessing',
'test_tensorboard',
'distributed/test_c10d_common',
'distributed/test_c10d_gloo',
'distributed/test_c10d_nccl',
'distributed/test_jit_c10d',
'distributed/test_c10d_spawn_gloo',
'distributed/test_c10d_spawn_nccl',
'test_quantization',
'test_pruning_op',
'test_determination',
'test_futures',
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
]
# the JSON file to store the S3 test stats
TEST_TIMES_FILE = '.pytorch-test-times'
# if a test file takes longer than 5 min, we add it to TARGET_DET_LIST
SLOW_TEST_THRESHOLD = 300
_DEP_MODULES_CACHE: Dict[str, set] = {}
DISTRIBUTED_TESTS_CONFIG = {}
if dist.is_available():
DISTRIBUTED_TESTS_CONFIG['test'] = {
'WORLD_SIZE': '1'
}
if not TEST_WITH_ROCM and dist.is_mpi_available():
DISTRIBUTED_TESTS_CONFIG['mpi'] = {
'WORLD_SIZE': '3',
'TEST_REPORT_SOURCE_OVERRIDE': 'dist-mpi'
}
if dist.is_nccl_available():
DISTRIBUTED_TESTS_CONFIG['nccl'] = {
'WORLD_SIZE': '2' if torch.cuda.device_count() == 2 else '3',
'TEST_REPORT_SOURCE_OVERRIDE': 'dist-nccl'
}
if dist.is_gloo_available():
DISTRIBUTED_TESTS_CONFIG['gloo'] = {
'WORLD_SIZE': '2' if torch.cuda.device_count() == 2 else '3',
'TEST_REPORT_SOURCE_OVERRIDE': 'dist-gloo'
}
# https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python
SIGNALS_TO_NAMES_DICT = {getattr(signal, n): n for n in dir(signal)
if n.startswith('SIG') and '_' not in n}
CPP_EXTENSIONS_ERROR = """
Ninja (https://ninja-build.org) is required for some of the C++ extensions
tests, but it could not be found. Install ninja with `pip install ninja`
or `conda install ninja`. Alternatively, disable said tests with
`run_test.py --exclude test_cpp_extensions_aot_ninja test_cpp_extensions_jit`.
"""
PYTORCH_COLLECT_COVERAGE = bool(os.environ.get("PYTORCH_COLLECT_COVERAGE"))
JIT_EXECUTOR_TESTS = [
'test_jit_cuda_fuser',
'test_jit_profiling',
'test_jit_legacy',
'test_jit_fuser_legacy',
]
def print_to_stderr(message):
print(message, file=sys.stderr)
# Convert something like pytorch_windows_vs2019_py36_cuda10.1_build to pytorch_windows_vs2019_py36_cuda10.1
def get_stripped_CI_job() -> str:
job = os.environ.get("CIRCLE_JOB", "").rstrip('0123456789')
if job.endswith('_slow_test'):
job = job[:len(job) - len('_slow_test')]
elif job.endswith('_test'):
job = job[:len(job) - len('_test')]
elif job.endswith('_build'):
job = job[:len(job) - len('_build')]
return job
def calculate_job_times(reports: List["Report"]) -> Dict[str, float]:
# an entry will be like ("test_file_name" -> (current_avg, # values))
jobs_to_times: Dict[str, Tuple[float, int]] = dict()
for report in reports:
assert report.get('format_version') == 2, "S3 format currently handled is version 2 only"
files: Dict[str, Any] = report['files']
for name, test_file in files.items():
if name not in jobs_to_times:
jobs_to_times[name] = (test_file['total_seconds'], 1)
else:
curr_avg, curr_count = jobs_to_times[name]
new_count = curr_count + 1
new_avg = (curr_avg * curr_count + test_file['total_seconds']) / new_count
jobs_to_times[name] = (new_avg, new_count)
# if there's 'test_cpp_extensions_aot' entry in jobs_to_times, add 'test_cpp_extensions_aot_ninja'
# and 'test_cpp_extensions_aot_no_ninja' duplicate entries to ease future computation since
# test_cpp_extensions_aot_no_ninja and test_cpp_extensions_aot_ninja are Python test jobs that
# both use the test_cpp_extensions_aot.py file.
if 'test_cpp_extensions_aot' in jobs_to_times:
jobs_to_times['test_cpp_extensions_aot_ninja'] = jobs_to_times['test_cpp_extensions_aot']
jobs_to_times['test_cpp_extensions_aot_no_ninja'] = jobs_to_times['test_cpp_extensions_aot']
return {job: time for job, (time, _) in jobs_to_times.items()}
def pull_job_times_from_S3() -> Dict[str, float]:
if HAVE_BOTO3:
ci_job_prefix = get_stripped_CI_job()
s3_reports: List["Report"] = get_previous_reports_for_branch('origin/nightly', ci_job_prefix)
else:
print('Uh oh, boto3 is not found. Either it is not installed or we failed to import s3_stat_parser.')
print('If not installed, please install boto3 for automatic sharding and test categorization.')
s3_reports = []
if len(s3_reports) == 0:
print('Gathered no reports from S3. Please proceed without them.')
return dict()
return calculate_job_times(s3_reports)
def get_past_job_times() -> Dict[str, float]:
if os.path.exists(TEST_TIMES_FILE):
with open(TEST_TIMES_FILE) as file:
test_times_json: JobTimeJSON = json.load(file)
curr_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD'], encoding="ascii").strip()
file_commit = test_times_json.get('commit', '')
curr_ci_job = get_stripped_CI_job()
file_ci_job = test_times_json.get('CIRCLE_JOB', 'N/A')
if curr_commit != file_commit:
print(f'Current test times file is from different commit {file_commit}.')
elif curr_ci_job != file_ci_job:
print(f'Current test times file is for different CI job {file_ci_job}.')
else:
print(f'Found stats for current commit: {curr_commit} and job: {curr_ci_job}. Proceeding with those values.')
return test_times_json.get('job_times', {})
# Found file, but commit or CI job in JSON doesn't match
print(f'Overwriting current file with stats based on current commit: {curr_commit} and CI job: {curr_ci_job}')
job_times = pull_job_times_from_S3()
print(f'Exporting S3 test stats to {TEST_TIMES_FILE}.')
export_S3_test_times(TEST_TIMES_FILE, job_times)
return job_times
class JobTimeJSON(TypedDict):
commit: str
job_times: Dict[str, float]
def get_job_times_json(job_times: Dict[str, float]) -> JobTimeJSON:
return {
'commit': subprocess.check_output(['git', 'rev-parse', 'HEAD'], encoding="ascii").strip(),
'CIRCLE_JOB': get_stripped_CI_job(),
'job_times': job_times,
}
def get_shard(which_shard: int, num_shards: int, tests: List[str]) -> List[str]:
jobs_to_times = get_past_job_times()
# Got no stats from S3, returning early to save runtime
if len(jobs_to_times) == 0:
print('Gathered no stats from S3. Proceeding with default sharding plan.')
return tests[which_shard - 1 :: num_shards]
shards = calculate_shards(num_shards, tests, jobs_to_times)
_, tests_from_shard = shards[which_shard - 1]
return tests_from_shard
def get_slow_tests_based_on_S3() -> List[str]:
jobs_to_times: Dict[str, float] = get_past_job_times()
# Got no stats from S3, returning early to save runtime
if len(jobs_to_times) == 0:
print('Gathered no stats from S3. No new slow tests calculated.')
return []
slow_tests: List[str] = []
for test in TESTS:
if test in jobs_to_times and test not in TARGET_DET_LIST:
if jobs_to_times[test] > SLOW_TEST_THRESHOLD:
slow_tests.append(test)
return slow_tests
def get_executable_command(options, allow_pytest, disable_coverage=False):
if options.coverage and not disable_coverage:
executable = ['coverage', 'run', '--parallel-mode', '--source=torch']
else:
executable = [sys.executable]
if options.pytest:
if allow_pytest:
executable += ['-m', 'pytest']
else:
print_to_stderr('Pytest cannot be used for this test. Falling back to unittest.')
return executable
def run_test(test_module, test_directory, options, launcher_cmd=None, extra_unittest_args=None):
unittest_args = options.additional_unittest_args.copy()
if options.verbose:
unittest_args.append(f'-{"v"*options.verbose}') # in case of pytest
if test_module in RUN_PARALLEL_BLOCKLIST:
unittest_args = [arg for arg in unittest_args if not arg.startswith('--run-parallel')]
if extra_unittest_args:
assert isinstance(extra_unittest_args, list)
unittest_args.extend(extra_unittest_args)
# If using pytest, replace -f with equivalent -x
if options.pytest:
unittest_args = [arg if arg != '-f' else '-x' for arg in unittest_args]
# Can't call `python -m unittest test_*` here because it doesn't run code
# in `if __name__ == '__main__': `. So call `python test_*.py` instead.
argv = [test_module + '.py'] + unittest_args
# Multiprocessing related tests cannot run with coverage.
# Tracking issue: https://github.com/pytorch/pytorch/issues/50661
disable_coverage = sys.platform == 'win32' and test_module in WINDOWS_COVERAGE_BLOCKLIST
# Extra arguments are not supported with pytest
executable = get_executable_command(options, allow_pytest=not extra_unittest_args,
disable_coverage=disable_coverage)
command = (launcher_cmd or []) + executable + argv
print_to_stderr('Executing {} ... [{}]'.format(command, datetime.now()))
return shell(command, test_directory)
def test_cuda_primary_ctx(test_module, test_directory, options):
return run_test(test_module, test_directory, options, extra_unittest_args=['--subprocess'])
def _test_cpp_extensions_aot(test_module, test_directory, options, use_ninja):
if use_ninja:
try:
cpp_extension.verify_ninja_availability()
except RuntimeError:
print(CPP_EXTENSIONS_ERROR)
return 1
# Wipe the build folder, if it exists already
cpp_extensions_test_dir = os.path.join(test_directory, 'cpp_extensions')
cpp_extensions_test_build_dir = os.path.join(cpp_extensions_test_dir, 'build')
if os.path.exists(cpp_extensions_test_build_dir):
shutil.rmtree(cpp_extensions_test_build_dir)
# Build the test cpp extensions modules
shell_env = os.environ.copy()
shell_env['USE_NINJA'] = str(1 if use_ninja else 0)
cmd = [sys.executable, 'setup.py', 'install', '--root', './install']
return_code = shell(cmd, cwd=cpp_extensions_test_dir, env=shell_env)
if return_code != 0:
return return_code
if sys.platform != 'win32':
return_code = shell(cmd,
cwd=os.path.join(cpp_extensions_test_dir, 'no_python_abi_suffix_test'),
env=shell_env)
if return_code != 0:
return return_code
# "install" the test modules and run tests
python_path = os.environ.get('PYTHONPATH', '')
try:
cpp_extensions = os.path.join(test_directory, 'cpp_extensions')
install_directory = ''
# install directory is the one that is named site-packages
for root, directories, _ in os.walk(os.path.join(cpp_extensions, 'install')):
for directory in directories:
if '-packages' in directory:
install_directory = os.path.join(root, directory)
assert install_directory, 'install_directory must not be empty'
os.environ['PYTHONPATH'] = os.pathsep.join([install_directory, python_path])
return run_test(test_module, test_directory, options)
finally:
os.environ['PYTHONPATH'] = python_path
def test_cpp_extensions_aot_ninja(test_module, test_directory, options):
return _test_cpp_extensions_aot('test_cpp_extensions_aot', test_directory,
options, use_ninja=True)
def test_cpp_extensions_aot_no_ninja(test_module, test_directory, options):
return _test_cpp_extensions_aot('test_cpp_extensions_aot',
test_directory, options, use_ninja=False)
def test_distributed(test_module, test_directory, options):
# MPI tests are broken with Python-3.9
mpi_available = subprocess.call('command -v mpiexec', shell=True) == 0 and sys.version_info < (3, 9)
if options.verbose and not mpi_available:
print_to_stderr(
'MPI not available -- MPI backend tests will be skipped')
config = DISTRIBUTED_TESTS_CONFIG
for backend, env_vars in config.items():
if sys.platform == 'win32' and backend != 'gloo':
continue
if backend == 'mpi' and not mpi_available:
continue
for with_init_file in {True, False}:
if sys.platform == 'win32' and not with_init_file:
continue
tmp_dir = tempfile.mkdtemp()
if options.verbose:
init_str = "with {} init_method"
with_init = init_str.format("file" if with_init_file else "env")
print_to_stderr(
'Running distributed tests for the {} backend {}'.format(
backend, with_init))
os.environ['TEMP_DIR'] = tmp_dir
os.environ['BACKEND'] = backend
os.environ['INIT_METHOD'] = 'env://'
os.environ.update(env_vars)
if with_init_file:
if test_module in ["test_distributed_fork", "test_distributed_spawn"]:
init_method = f'{FILE_SCHEMA}{tmp_dir}/'
else:
init_method = f'{FILE_SCHEMA}{tmp_dir}/shared_init_file'
os.environ['INIT_METHOD'] = init_method
try:
os.mkdir(os.path.join(tmp_dir, 'barrier'))
os.mkdir(os.path.join(tmp_dir, 'test_dir'))
if backend == 'mpi':
# test mpiexec for --noprefix option
with open(os.devnull, 'w') as devnull:
allowrunasroot_opt = '--allow-run-as-root' if subprocess.call(
'mpiexec --allow-run-as-root -n 1 bash -c ""', shell=True,
stdout=devnull, stderr=subprocess.STDOUT) == 0 else ''
noprefix_opt = '--noprefix' if subprocess.call(
f'mpiexec {allowrunasroot_opt} -n 1 --noprefix bash -c ""', shell=True,
stdout=devnull, stderr=subprocess.STDOUT) == 0 else ''
mpiexec = ['mpiexec', '-n', '3', noprefix_opt, allowrunasroot_opt]
return_code = run_test(test_module, test_directory, options,
launcher_cmd=mpiexec)
else:
return_code = run_test(test_module, test_directory, options)
if return_code != 0:
return return_code
finally:
shutil.rmtree(tmp_dir)
return 0
CUSTOM_HANDLERS = {
'test_cuda_primary_ctx': test_cuda_primary_ctx,
'test_cpp_extensions_aot_no_ninja': test_cpp_extensions_aot_no_ninja,
'test_cpp_extensions_aot_ninja': test_cpp_extensions_aot_ninja,
'distributed/test_distributed_fork': test_distributed,
'distributed/test_distributed_spawn': test_distributed,
}
def parse_test_module(test):
return test.split('.')[0]
class TestChoices(list):
def __init__(self, *args, **kwargs):
super(TestChoices, self).__init__(args[0])
def __contains__(self, item):
return list.__contains__(self, parse_test_module(item))
def parse_args():
parser = argparse.ArgumentParser(
description='Run the PyTorch unit test suite',
epilog='where TESTS is any of: {}'.format(', '.join(TESTS)))
parser.add_argument(
'-v',
'--verbose',
action='count',
default=0,
help='print verbose information and test-by-test results')
parser.add_argument(
'--jit',
'--jit',
action='store_true',
help='run all jit tests')
parser.add_argument(
'-pt', '--pytest', action='store_true',
help='If true, use `pytest` to execute the tests. E.g., this runs '
'TestTorch with pytest in verbose and coverage mode: '
'python run_test.py -vci torch -pt')
parser.add_argument(
'-c', '--coverage', action='store_true', help='enable coverage',
default=PYTORCH_COLLECT_COVERAGE)
parser.add_argument(
'-i',
'--include',
nargs='+',
choices=TestChoices(TESTS),
default=TESTS,
metavar='TESTS',
help='select a set of tests to include (defaults to ALL tests).'
' tests can be specified with module name, module.TestClass'
' or module.TestClass.test_method')
parser.add_argument(
'-x',
'--exclude',
nargs='+',
choices=TESTS,
metavar='TESTS',
default=[],
help='select a set of tests to exclude')
parser.add_argument(
'-f',
'--first',
choices=TESTS,
metavar='TESTS',
help='select the test to start from (excludes previous tests)')
parser.add_argument(
'-l',
'--last',
choices=TESTS,
metavar='TESTS',
help='select the last test to run (excludes following tests)')
parser.add_argument(
'--bring-to-front',
nargs='+',
choices=TestChoices(TESTS),
default=[],
metavar='TESTS',
help='select a set of tests to run first. This can be used in situations'
' where you want to run all tests, but care more about some set, '
'e.g. after making a change to a specific component')
parser.add_argument(
'--ignore-win-blocklist',
action='store_true',
help='always run blocklisted windows tests')
parser.add_argument(
'--determine-from',
help='File of affected source filenames to determine which tests to run.')
parser.add_argument(
'--continue-through-error',
action='store_true',
help='Runs the full test suite despite one of the tests failing')
parser.add_argument(
'additional_unittest_args',
nargs='*',
help='additional arguments passed through to unittest, e.g., '
'python run_test.py -i sparse -- TestSparse.test_factory_size_check')
parser.add_argument(
'--export-past-test-times',
nargs='?',
type=str,
const=TEST_TIMES_FILE,
help='dumps test times from previous S3 stats into a file, format JSON',
)
parser.add_argument(
'--shard',
nargs=2,
type=int,
help='runs a shard of the tests (taking into account other selections), e.g., '
'--shard 2 3 will break up the selected tests into 3 shards and run the tests '
'in the 2nd shard (the first number should not exceed the second)',
)
parser.add_argument(
'--exclude-jit-executor',
action='store_true',
help='exclude tests that are run for a specific jit config'
)
return parser.parse_args()
def find_test_index(test, selected_tests, find_last_index=False):
"""Find the index of the first or last occurrence of a given test/test module in the list of selected tests.
This function is used to determine the indices when slicing the list of selected tests when
``options.first``(:attr:`find_last_index`=False) and/or ``options.last``(:attr:`find_last_index`=True) are used.
:attr:`selected_tests` can be a list that contains multiple consequent occurrences of tests
as part of the same test module, e.g.:
```
selected_tests = ['autograd', 'cuda', **'torch.TestTorch.test_acos',
'torch.TestTorch.test_tan', 'torch.TestTorch.test_add'**, 'utils']
```
If :attr:`test`='torch' and :attr:`find_last_index`=False, result should be **2**.
If :attr:`test`='torch' and :attr:`find_last_index`=True, result should be **4**.
Args:
test (str): Name of test to lookup
selected_tests (list): List of tests
find_last_index (bool, optional): should we lookup the index of first or last
occurrence (first is default)
Returns:
index of the first or last occurrence of the given test
"""
idx = 0
found_idx = -1
for t in selected_tests:
if t.startswith(test):
found_idx = idx
if not find_last_index:
break
idx += 1
return found_idx
def exclude_tests(exclude_list, selected_tests, exclude_message=None):
for exclude_test in exclude_list:
tests_copy = selected_tests[:]
for test in tests_copy:
if test.startswith(exclude_test):
if exclude_message is not None:
print_to_stderr('Excluding {} {}'.format(test, exclude_message))
selected_tests.remove(test)
return selected_tests
def get_selected_tests(options):
selected_tests = options.include
if options.bring_to_front:
to_front = set(options.bring_to_front)
selected_tests = options.bring_to_front + list(filter(lambda name: name not in to_front,
selected_tests))
if options.first:
first_index = find_test_index(options.first, selected_tests)
selected_tests = selected_tests[first_index:]
if options.last:
last_index = find_test_index(options.last, selected_tests, find_last_index=True)
selected_tests = selected_tests[:last_index + 1]
if options.shard:
assert len(options.shard) == 2, "Unexpected shard format"
assert min(options.shard) > 0, "Shards must be positive numbers"
which_shard, num_shards = options.shard
assert which_shard <= num_shards, "Selected shard must be less or equal that total number of shards"
assert num_shards <= len(selected_tests), f"Number of shards must be less than {len(selected_tests)}"
selected_tests = get_shard(which_shard, num_shards, selected_tests)
if options.exclude_jit_executor:
options.exclude.extend(JIT_EXECUTOR_TESTS)
selected_tests = exclude_tests(options.exclude, selected_tests)
if sys.platform == 'win32' and not options.ignore_win_blocklist:
target_arch = os.environ.get('VSCMD_ARG_TGT_ARCH')
if target_arch != 'x64':
WINDOWS_BLOCKLIST.append('cpp_extensions_aot_no_ninja')
WINDOWS_BLOCKLIST.append('cpp_extensions_aot_ninja')
WINDOWS_BLOCKLIST.append('cpp_extensions_jit')
WINDOWS_BLOCKLIST.append('jit')
WINDOWS_BLOCKLIST.append('jit_fuser')
selected_tests = exclude_tests(WINDOWS_BLOCKLIST, selected_tests, 'on Windows')
elif TEST_WITH_ROCM:
selected_tests = exclude_tests(ROCM_BLOCKLIST, selected_tests, 'on ROCm')
return selected_tests
def test_impact_of_file(filename):
"""Determine what class of impact this file has on test runs.
Possible values:
TORCH - torch python code
CAFFE2 - caffe2 python code
TEST - torch test code
UNKNOWN - may affect all tests
NONE - known to have no effect on test outcome
CI - CI configuration files
"""
parts = filename.split(os.sep)
if parts[0] in ['.jenkins', '.circleci']:
return 'CI'
if parts[0] in ['docs', 'scripts', 'CODEOWNERS', 'README.md']:
return 'NONE'
elif parts[0] == 'torch':
if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):
return 'TORCH'
elif parts[0] == 'caffe2':
if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):
return 'CAFFE2'
elif parts[0] == 'test':
if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):
return 'TEST'
return 'UNKNOWN'
def log_test_reason(file_type, filename, test, options):
if options.verbose:
print_to_stderr(
'Determination found {} file {} -- running {}'.format(
file_type,
filename,
test,
)
)
def get_dep_modules(test):
# Cache results in case of repetition
if test in _DEP_MODULES_CACHE:
return _DEP_MODULES_CACHE[test]
repo_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
test_location = os.path.join(repo_root, 'test', test + '.py')
finder = modulefinder.ModuleFinder(
# Ideally exclude all third party modules, to speed up calculation.
excludes=[
'scipy',
'numpy',
'numba',
'multiprocessing',
'sklearn',
'setuptools',
'hypothesis',
'llvmlite',
'joblib',
'email',
'importlib',
'unittest',
'urllib',
'json',
'collections',
# Modules below are excluded because they are hitting https://bugs.python.org/issue40350
# Trigger AttributeError: 'NoneType' object has no attribute 'is_package'
'mpl_toolkits',
'google',
'onnx',
# Triggers RecursionError
'mypy'
],
)
# HACK: some platforms default to ascii, so we can't just run_script :(
with open(test_location, 'r', encoding='utf-8') as fp:
finder.load_module('__main__', fp, test_location, ('', 'r', 1))
dep_modules = set(finder.modules.keys())
_DEP_MODULES_CACHE[test] = dep_modules
return dep_modules
def determine_target(target_det_list, test, touched_files, options):
test = parse_test_module(test)
# Some tests are faster to execute than to determine.
if test not in target_det_list:
if options.verbose:
print_to_stderr(f'Running {test} without determination')
return True
# HACK: "no_ninja" is not a real module
if test.endswith('_no_ninja'):
test = test[:(-1 * len('_no_ninja'))]
if test.endswith('_ninja'):
test = test[:(-1 * len('_ninja'))]
dep_modules = get_dep_modules(test)
for touched_file in touched_files:
file_type = test_impact_of_file(touched_file)
if file_type == 'NONE':
continue
elif file_type == 'CI':
# Force all tests to run if any change is made to the CI
# configurations.
log_test_reason(file_type, touched_file, test, options)
return True
elif file_type == 'UNKNOWN':
# Assume uncategorized source files can affect every test.
log_test_reason(file_type, touched_file, test, options)
return True
elif file_type in ['TORCH', 'CAFFE2', 'TEST']:
parts = os.path.splitext(touched_file)[0].split(os.sep)
touched_module = ".".join(parts)
# test/ path does not have a "test." namespace
if touched_module.startswith('test.'):
touched_module = touched_module.split('test.')[1]
if (
touched_module in dep_modules
or touched_module == test.replace('/', '.')
):
log_test_reason(file_type, touched_file, test, options)
return True
# If nothing has determined the test has run, don't run the test.
if options.verbose:
print_to_stderr(f'Determination is skipping {test}')
return False
def run_test_module(test: str, test_directory: str, options) -> Optional[str]:
test_module = parse_test_module(test)
# Printing the date here can help diagnose which tests are slow
print_to_stderr('Running {} ... [{}]'.format(test, datetime.now()))
handler = CUSTOM_HANDLERS.get(test_module, run_test)
return_code = handler(test_module, test_directory, options)
assert isinstance(return_code, int) and not isinstance(
return_code, bool), 'Return code should be an integer'
if return_code == 0:
return None
message = f'{test} failed!'
if return_code < 0:
# subprocess.Popen returns the child process' exit signal as
# return code -N, where N is the signal number.
signal_name = SIGNALS_TO_NAMES_DICT[-return_code]
message += f' Received signal: {signal_name}'
return message
def export_S3_test_times(test_times_filename: str, test_times: Dict[str, float]) -> None:
if os.path.exists(test_times_filename):
print(f'Overwriting existent file: {test_times_filename}')
with open(test_times_filename, 'w+') as file:
job_times_json = get_job_times_json(test_times)
json.dump(job_times_json, file, indent=' ', separators=(',', ': '))
file.write('\n')
def query_changed_test_files() -> List[str]:
cmd = ["git", "diff", "--name-only", "origin/master", "HEAD"]
proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if proc.returncode != 0:
raise RuntimeError("Unable to get changed files")
lines = proc.stdout.decode().strip().split("\n")
lines = [line.strip() for line in lines]
return lines
def reorder_tests(tests: List[str]) -> List[str]:
try:
changed_files = query_changed_test_files()
except Exception:
# If unable to get changed files from git, quit without doing any sorting
return tests
prefix = f"test{os.path.sep}"
changed_tests = [f for f in changed_files if f.startswith(prefix) and f.endswith(".py")]
changed_tests = [f[len(prefix):] for f in changed_tests]
changed_tests = [f[:-len(".py")] for f in changed_tests]
bring_to_front = []
the_rest = []
for test in tests:
if test in changed_tests:
bring_to_front.append(test)
else:
the_rest.append(test)
sorted_tests = bring_to_front + the_rest
if len(sorted_tests) != len(tests):
# Something went wrong, bail out without doing any sorting
return tests
return sorted_tests
def main():
options = parse_args()
test_times_filename = options.export_past_test_times
if test_times_filename:
print(f'Exporting past test times from S3 to {test_times_filename}, no tests will be run.')
export_S3_test_times(test_times_filename, pull_job_times_from_S3())
return
test_directory = os.path.dirname(os.path.abspath(__file__))
selected_tests = get_selected_tests(options)
if options.verbose:
print_to_stderr('Selected tests: {}'.format(', '.join(selected_tests)))
if options.coverage and not PYTORCH_COLLECT_COVERAGE:
shell(['coverage', 'erase'])
if options.jit:
selected_tests = filter(lambda test_name: "jit" in test_name, TESTS)
if options.determine_from is not None and os.path.exists(options.determine_from):
slow_tests = get_slow_tests_based_on_S3()
print('Added the following tests to target_det tests as calculated based on S3:')
print(slow_tests)
with open(options.determine_from, 'r') as fh:
touched_files = [
os.path.normpath(name.strip()) for name in fh.read().split('\n')
if len(name.strip()) > 0
]
# HACK: Ensure the 'test' paths can be traversed by Modulefinder
sys.path.append('test')
selected_tests = [
test for test in selected_tests
if determine_target(TARGET_DET_LIST + slow_tests, test, touched_files, options)
]
sys.path.remove('test')
selected_tests = reorder_tests(selected_tests)
has_failed = False
failure_messages = []
try:
for test in selected_tests:
options_clone = copy.deepcopy(options)
if test in USE_PYTEST_LIST:
options_clone.pytest = True
err_message = run_test_module(test, test_directory, options_clone)
if err_message is None:
continue
has_failed = True
failure_messages.append(err_message)
if not options_clone.continue_through_error:
raise RuntimeError(err_message)
print_to_stderr(err_message)
finally:
if options.coverage:
from coverage import Coverage
test_dir = os.path.dirname(os.path.abspath(__file__))
with set_cwd(test_dir):
cov = Coverage()
if PYTORCH_COLLECT_COVERAGE:
cov.load()
cov.combine(strict=False)
cov.save()
if not PYTORCH_COLLECT_COVERAGE:
cov.html_report()
if options.continue_through_error and has_failed:
for err in failure_messages:
print_to_stderr(err)
sys.exit(1)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"TEMP_DIR",
"INIT_METHOD",
"CIRCLE_JOB",
"BACKEND",
"VSCMD_ARG_TGT_ARCH",
"PYTORCH_COLLECT_COVERAGE",
"PYTHONPATH"
] |
[]
|
["TEMP_DIR", "INIT_METHOD", "CIRCLE_JOB", "BACKEND", "VSCMD_ARG_TGT_ARCH", "PYTORCH_COLLECT_COVERAGE", "PYTHONPATH"]
|
python
| 7 | 0 | |
flink-yarn/src/main/java/org/apache/flink/yarn/entrypoint/YarnJobClusterEntrypoint.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.yarn.entrypoint;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.entrypoint.ClusterEntrypoint;
import org.apache.flink.runtime.entrypoint.ClusterEntrypointUtils;
import org.apache.flink.runtime.entrypoint.DynamicParametersConfigurationParserFactory;
import org.apache.flink.runtime.entrypoint.JobClusterEntrypoint;
import org.apache.flink.runtime.entrypoint.component.DefaultDispatcherResourceManagerComponentFactory;
import org.apache.flink.runtime.entrypoint.component.FileJobGraphRetriever;
import org.apache.flink.runtime.util.EnvironmentInformation;
import org.apache.flink.runtime.util.JvmShutdownSafeguard;
import org.apache.flink.runtime.util.SignalHandler;
import org.apache.flink.util.Preconditions;
import org.apache.flink.yarn.configuration.YarnConfigOptions;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import java.io.IOException;
import java.util.Map;
/**
* Entry point for Yarn per-job clusters.
*/
/*TODO 不同的模式有不同的入口类,Job模式、Session模式、Application模式*/
public class YarnJobClusterEntrypoint extends JobClusterEntrypoint {
public YarnJobClusterEntrypoint(Configuration configuration) {
super(configuration);
}
@Override
protected String getRPCPortRange(Configuration configuration) {
return configuration.getString(YarnConfigOptions.APPLICATION_MASTER_PORT);
}
@Override
protected DefaultDispatcherResourceManagerComponentFactory createDispatcherResourceManagerComponentFactory(Configuration configuration) throws IOException {
return DefaultDispatcherResourceManagerComponentFactory.createJobComponentFactory(
YarnResourceManagerFactory.getInstance(),
FileJobGraphRetriever.createFrom(
configuration,
YarnEntrypointUtils.getUsrLibDir(configuration).orElse(null)));
}
// ------------------------------------------------------------------------
// The executable entry point for the Yarn Application Master Process
// for a single Flink job. per-job模式
// ------------------------------------------------------------------------
/*TODO 以后遇到EntryPoint, 就要明白这是一个入口类*/
public static void main(String[] args) {
// startup checks and logging
EnvironmentInformation.logEnvironmentInfo(LOG, YarnJobClusterEntrypoint.class.getSimpleName(), args);
SignalHandler.register(LOG);
JvmShutdownSafeguard.installAsShutdownHook(LOG);
Map<String, String> env = System.getenv();
final String workingDirectory = env.get(ApplicationConstants.Environment.PWD.key());
Preconditions.checkArgument(
workingDirectory != null,
"Working directory variable (%s) not set",
ApplicationConstants.Environment.PWD.key());
try {
YarnEntrypointUtils.logYarnEnvironmentInformation(env, LOG);
} catch (IOException e) {
LOG.warn("Could not log YARN environment information.", e);
}
final Configuration dynamicParameters = ClusterEntrypointUtils.parseParametersOrExit(
args,
new DynamicParametersConfigurationParserFactory(),
YarnJobClusterEntrypoint.class);
/*TODO 前面的这些代码都是为了封装这个配置对象*/
final Configuration configuration = YarnEntrypointUtils.loadConfiguration(workingDirectory, dynamicParameters, env);
YarnJobClusterEntrypoint yarnJobClusterEntrypoint = new YarnJobClusterEntrypoint(configuration);
/*TODO 前面封装好了类了,这里是集群的入口*/
ClusterEntrypoint.runClusterEntrypoint(yarnJobClusterEntrypoint);
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
workflow/controller/workflowpod.go
|
package controller
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strconv"
"time"
log "github.com/sirupsen/logrus"
apiv1 "k8s.io/api/core/v1"
apierr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/utils/pointer"
"github.com/argoproj/argo-workflows/v3/config"
"github.com/argoproj/argo-workflows/v3/errors"
"github.com/argoproj/argo-workflows/v3/pkg/apis/workflow"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
errorsutil "github.com/argoproj/argo-workflows/v3/util/errors"
"github.com/argoproj/argo-workflows/v3/util/intstr"
"github.com/argoproj/argo-workflows/v3/util/template"
"github.com/argoproj/argo-workflows/v3/workflow/common"
"github.com/argoproj/argo-workflows/v3/workflow/controller/indexes"
"github.com/argoproj/argo-workflows/v3/workflow/util"
)
var (
volumeVarArgo = apiv1.Volume{
Name: "var-run-argo",
VolumeSource: apiv1.VolumeSource{
EmptyDir: &apiv1.EmptyDirVolumeSource{},
},
}
volumeMountVarArgo = apiv1.VolumeMount{
Name: volumeVarArgo.Name,
MountPath: "/var/run/argo",
}
hostPathSocket = apiv1.HostPathSocket
)
func (woc *wfOperationCtx) getVolumeMountDockerSock(tmpl *wfv1.Template) apiv1.VolumeMount {
return apiv1.VolumeMount{
Name: common.DockerSockVolumeName,
MountPath: getDockerSockPath(tmpl),
ReadOnly: getDockerSockReadOnly(tmpl),
}
}
func getDockerSockReadOnly(tmpl *wfv1.Template) bool {
return !util.HasWindowsOSNodeSelector(tmpl.NodeSelector)
}
func getDockerSockPath(tmpl *wfv1.Template) string {
if util.HasWindowsOSNodeSelector(tmpl.NodeSelector) {
return "\\\\.\\pipe\\docker_engine"
}
return "/var/run/docker.sock"
}
func getVolumeHostPathType(tmpl *wfv1.Template) *apiv1.HostPathType {
if util.HasWindowsOSNodeSelector(tmpl.NodeSelector) {
return nil
}
return &hostPathSocket
}
func (woc *wfOperationCtx) getVolumeDockerSock(tmpl *wfv1.Template) apiv1.Volume {
dockerSockPath := getDockerSockPath(tmpl)
if woc.controller.Config.DockerSockPath != "" {
dockerSockPath = woc.controller.Config.DockerSockPath
}
// volumeDockerSock provides the wait container direct access to the minion's host docker daemon.
// The primary purpose of this is to make available `docker cp` to collect an output artifact
// from a container. Alternatively, we could use `kubectl cp`, but `docker cp` avoids the extra
// hop to the kube api server.
return apiv1.Volume{
Name: common.DockerSockVolumeName,
VolumeSource: apiv1.VolumeSource{
HostPath: &apiv1.HostPathVolumeSource{
Path: dockerSockPath,
Type: getVolumeHostPathType(tmpl),
},
},
}
}
func (woc *wfOperationCtx) hasPodSpecPatch(tmpl *wfv1.Template) bool {
return woc.execWf.Spec.HasPodSpecPatch() || tmpl.HasPodSpecPatch()
}
// scheduleOnDifferentHost adds affinity to prevent retry on the same host when
// retryStrategy.affinity.nodeAntiAffinity{} is specified
func (woc *wfOperationCtx) scheduleOnDifferentHost(node *wfv1.NodeStatus, pod *apiv1.Pod) error {
if node != nil && pod != nil {
if retryNode := FindRetryNode(woc.wf.Status.Nodes, node.ID); retryNode != nil {
// recover template for the retry node
tmplCtx, err := woc.createTemplateContext(retryNode.GetTemplateScope())
if err != nil {
return err
}
_, retryTmpl, _, err := tmplCtx.ResolveTemplate(retryNode)
if err != nil {
return err
}
if retryStrategy := woc.retryStrategy(retryTmpl); retryStrategy != nil {
RetryOnDifferentHost(retryNode.ID)(*retryStrategy, woc.wf.Status.Nodes, pod)
}
}
}
return nil
}
type createWorkflowPodOpts struct {
includeScriptOutput bool
onExitPod bool
executionDeadline time.Time
}
func (woc *wfOperationCtx) createWorkflowPod(ctx context.Context, nodeName string, mainCtrs []apiv1.Container, tmpl *wfv1.Template, opts *createWorkflowPodOpts) (*apiv1.Pod, error) {
nodeID := woc.wf.NodeID(nodeName)
// we must check to see if the pod exists rather than just optimistically creating the pod and see if we get
// an `AlreadyExists` error because we won't get that error if there is not enough resources.
// Performance enhancement: Code later in this func is expensive to execute, so return quickly if we can.
existing, exists, err := woc.podExists(nodeID)
if err != nil {
return nil, err
}
if exists {
woc.log.WithField("podPhase", existing.Status.Phase).Debugf("Skipped pod %s (%s) creation: already exists", nodeName, nodeID)
return existing, nil
}
if !woc.GetShutdownStrategy().ShouldExecute(opts.onExitPod) {
// Do not create pods if we are shutting down
woc.markNodePhase(nodeName, wfv1.NodeSkipped, fmt.Sprintf("workflow shutdown with strategy: %s", woc.GetShutdownStrategy()))
return nil, nil
}
tmpl = tmpl.DeepCopy()
wfSpec := woc.execWf.Spec.DeepCopy()
for i, c := range mainCtrs {
if c.Name == "" || tmpl.GetType() != wfv1.TemplateTypeContainerSet {
c.Name = common.MainContainerName
}
// Allow customization of main container resources.
if isResourcesSpecified(woc.controller.Config.MainContainer) {
c.Resources = *woc.controller.Config.MainContainer.Resources.DeepCopy()
}
// Template resources inv workflow spec takes precedence over the main container's configuration in controller
switch tmpl.GetType() {
case wfv1.TemplateTypeContainer:
if isResourcesSpecified(tmpl.Container) && tmpl.Container.Name == common.MainContainerName {
c.Resources = *tmpl.Container.Resources.DeepCopy()
}
case wfv1.TemplateTypeScript:
if isResourcesSpecified(&tmpl.Script.Container) {
c.Resources = *tmpl.Script.Resources.DeepCopy()
}
case wfv1.TemplateTypeContainerSet:
}
mainCtrs[i] = c
}
var activeDeadlineSeconds *int64
wfDeadline := woc.getWorkflowDeadline()
tmplActiveDeadlineSeconds, err := intstr.Int64(tmpl.ActiveDeadlineSeconds)
if err != nil {
return nil, err
}
if wfDeadline == nil || opts.onExitPod { // ignore the workflow deadline for exit handler so they still run if the deadline has passed
activeDeadlineSeconds = tmplActiveDeadlineSeconds
} else {
wfActiveDeadlineSeconds := int64((*wfDeadline).Sub(time.Now().UTC()).Seconds())
if wfActiveDeadlineSeconds <= 0 {
return nil, nil
} else if tmpl.ActiveDeadlineSeconds == nil || wfActiveDeadlineSeconds < *tmplActiveDeadlineSeconds {
activeDeadlineSeconds = &wfActiveDeadlineSeconds
} else {
activeDeadlineSeconds = tmplActiveDeadlineSeconds
}
}
pod := &apiv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: util.PodName(woc.wf.Name, nodeName, tmpl.Name, nodeID),
Namespace: woc.wf.ObjectMeta.Namespace,
Labels: map[string]string{
common.LabelKeyWorkflow: woc.wf.ObjectMeta.Name, // Allows filtering by pods related to specific workflow
common.LabelKeyCompleted: "false", // Allows filtering by incomplete workflow pods
},
Annotations: map[string]string{
common.AnnotationKeyNodeName: nodeName,
common.AnnotationKeyNodeID: nodeID,
},
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(woc.wf, wfv1.SchemeGroupVersion.WithKind(workflow.WorkflowKind)),
},
},
Spec: apiv1.PodSpec{
RestartPolicy: apiv1.RestartPolicyNever,
Volumes: woc.createVolumes(tmpl),
ActiveDeadlineSeconds: activeDeadlineSeconds,
ImagePullSecrets: woc.execWf.Spec.ImagePullSecrets,
},
}
if opts.onExitPod {
// This pod is part of an onExit handler, label it so
pod.ObjectMeta.Labels[common.LabelKeyOnExit] = "true"
}
if woc.execWf.Spec.HostNetwork != nil {
pod.Spec.HostNetwork = *woc.execWf.Spec.HostNetwork
}
if woc.execWf.Spec.DNSPolicy != nil {
pod.Spec.DNSPolicy = *woc.execWf.Spec.DNSPolicy
}
if woc.execWf.Spec.DNSConfig != nil {
pod.Spec.DNSConfig = woc.execWf.Spec.DNSConfig
}
if woc.controller.Config.InstanceID != "" {
pod.ObjectMeta.Labels[common.LabelKeyControllerInstanceID] = woc.controller.Config.InstanceID
}
if woc.getContainerRuntimeExecutor() == common.ContainerRuntimeExecutorPNS {
pod.Spec.ShareProcessNamespace = pointer.BoolPtr(true)
}
woc.addArchiveLocation(tmpl)
err = woc.setupServiceAccount(ctx, pod, tmpl)
if err != nil {
return nil, err
}
if tmpl.GetType() != wfv1.TemplateTypeResource && tmpl.GetType() != wfv1.TemplateTypeData {
// we do not need the wait container for resource templates because
// argoexec runs as the main container and will perform the job of
// annotating the outputs or errors, making the wait container redundant.
waitCtr := woc.newWaitContainer(tmpl)
pod.Spec.Containers = append(pod.Spec.Containers, *waitCtr)
}
// NOTE: the order of the container list is significant. kubelet will pull, create, and start
// each container sequentially in the order that they appear in this list. For PNS we want the
// wait container to start before the main, so that it always has the chance to see the main
// container's PID and root filesystem.
pod.Spec.Containers = append(pod.Spec.Containers, mainCtrs...)
// Configuring default container to be used with commands like "kubectl exec/logs".
// Select "main" container if it's available. In other case use the last container (can happent when pod created from ContainerSet).
defaultContainer := pod.Spec.Containers[len(pod.Spec.Containers)-1].Name
for _, c := range pod.Spec.Containers {
if c.Name == common.MainContainerName {
defaultContainer = common.MainContainerName
break
}
}
pod.ObjectMeta.Annotations[common.AnnotationKeyDefaultContainer] = defaultContainer
// Add init container only if it needs input artifacts. This is also true for
// script templates (which needs to populate the script)
if len(tmpl.Inputs.Artifacts) > 0 || tmpl.GetType() == wfv1.TemplateTypeScript || woc.getContainerRuntimeExecutor() == common.ContainerRuntimeExecutorEmissary {
initCtr := woc.newInitContainer(tmpl)
pod.Spec.InitContainers = []apiv1.Container{initCtr}
}
addSchedulingConstraints(pod, wfSpec, tmpl)
woc.addMetadata(pod, tmpl)
err = addVolumeReferences(pod, woc.volumes, tmpl, woc.wf.Status.PersistentVolumeClaims)
if err != nil {
return nil, err
}
err = woc.addInputArtifactsVolumes(pod, tmpl)
if err != nil {
return nil, err
}
if tmpl.GetType() == wfv1.TemplateTypeScript {
addScriptStagingVolume(pod)
}
// addInitContainers, addSidecars and addOutputArtifactsVolumes should be called after all
// volumes have been manipulated in the main container since volumeMounts are mirrored
addInitContainers(pod, tmpl)
addSidecars(pod, tmpl)
addOutputArtifactsVolumes(pod, tmpl)
for i, c := range pod.Spec.InitContainers {
c.VolumeMounts = append(c.VolumeMounts, volumeMountVarArgo)
pod.Spec.InitContainers[i] = c
}
for i, c := range pod.Spec.Containers {
if woc.getContainerRuntimeExecutor() == common.ContainerRuntimeExecutorEmissary && c.Name != common.WaitContainerName {
// https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes
if len(c.Command) == 0 {
x := woc.getImage(c.Image)
c.Command = x.Command
if c.Args == nil { // check nil rather than length, as zero-length is valid args
c.Args = x.Args
}
}
if len(c.Command) == 0 {
return nil, fmt.Errorf("when using the emissary executor you must either explicitly specify the command, or list the image's command in the index: https://argoproj.github.io/argo-workflows/workflow-executors/#emissary-emissary")
}
c.Command = append([]string{"/var/run/argo/argoexec", "emissary", "--"}, c.Command...)
}
c.VolumeMounts = append(c.VolumeMounts, volumeMountVarArgo)
pod.Spec.Containers[i] = c
}
// Add standard environment variables, making pod spec larger
envVars := []apiv1.EnvVar{
{Name: common.EnvVarTemplate, Value: wfv1.MustMarshallJSON(tmpl)},
{Name: common.EnvVarIncludeScriptOutput, Value: strconv.FormatBool(opts.includeScriptOutput)},
{Name: common.EnvVarDeadline, Value: woc.getDeadline(opts).Format(time.RFC3339)},
{Name: common.EnvVarProgressFile, Value: common.ArgoProgressPath},
}
// only set tick durations if progress is enabled. The EnvVarProgressFile is always set (user convenience) but the
// progress is only monitored if the tick durations are >0.
if woc.controller.progressPatchTickDuration != 0 && woc.controller.progressFileTickDuration != 0 {
envVars = append(envVars,
apiv1.EnvVar{
Name: common.EnvVarProgressPatchTickDuration,
Value: woc.controller.progressPatchTickDuration.String(),
},
apiv1.EnvVar{
Name: common.EnvVarProgressFileTickDuration,
Value: woc.controller.progressFileTickDuration.String(),
},
)
}
for i, c := range pod.Spec.InitContainers {
c.Env = append(c.Env, apiv1.EnvVar{Name: common.EnvVarContainerName, Value: c.Name})
c.Env = append(c.Env, envVars...)
pod.Spec.InitContainers[i] = c
}
for i, c := range pod.Spec.Containers {
c.Env = append(c.Env, apiv1.EnvVar{Name: common.EnvVarContainerName, Value: c.Name})
c.Env = append(c.Env, envVars...)
pod.Spec.Containers[i] = c
}
// Perform one last variable substitution here. Some variables come from the from workflow
// configmap (e.g. archive location) or volumes attribute, and were not substituted
// in executeTemplate.
pod, err = substitutePodParams(pod, woc.globalParams, tmpl)
if err != nil {
return nil, err
}
// One final check to verify all variables are resolvable for select fields. We are choosing
// only to check ArchiveLocation for now, since everything else should have been substituted
// earlier (i.e. in executeTemplate). But archive location is unique in that the variables
// are formulated from the configmap. We can expand this to other fields as necessary.
for _, c := range pod.Spec.Containers {
for _, e := range c.Env {
if e.Name == common.EnvVarTemplate {
err = json.Unmarshal([]byte(e.Value), tmpl)
if err != nil {
return nil, err
}
for _, obj := range []interface{}{tmpl.ArchiveLocation} {
err = verifyResolvedVariables(obj)
if err != nil {
return nil, err
}
}
}
}
}
// Apply the patch string from template
if woc.hasPodSpecPatch(tmpl) {
jsonstr, err := json.Marshal(pod.Spec)
if err != nil {
return nil, errors.Wrap(err, "", "Failed to marshal the Pod spec")
}
tmpl.PodSpecPatch, err = util.PodSpecPatchMerge(woc.wf, tmpl)
if err != nil {
return nil, errors.Wrap(err, "", "Failed to merge the workflow PodSpecPatch with the template PodSpecPatch due to invalid format")
}
// Final substitution for workflow level PodSpecPatch
localParams := make(map[string]string)
if tmpl.IsPodType() {
localParams[common.LocalVarPodName] = pod.Name
}
tmpl, err := common.ProcessArgs(tmpl, &wfv1.Arguments{}, woc.globalParams, localParams, false, woc.wf.Namespace, woc.controller.configMapInformer)
if err != nil {
return nil, errors.Wrap(err, "", "Failed to substitute the PodSpecPatch variables")
}
if err := json.Unmarshal([]byte(tmpl.PodSpecPatch), &apiv1.PodSpec{}); err != nil {
return nil, fmt.Errorf("invalid podSpecPatch %q: %w", tmpl.PodSpecPatch, err)
}
modJson, err := strategicpatch.StrategicMergePatch(jsonstr, []byte(tmpl.PodSpecPatch), apiv1.PodSpec{})
if err != nil {
return nil, errors.Wrap(err, "", "Error occurred during strategic merge patch")
}
pod.Spec = apiv1.PodSpec{} // zero out the pod spec so we cannot get conflicts
err = json.Unmarshal(modJson, &pod.Spec)
if err != nil {
return nil, errors.Wrap(err, "", "Error in Unmarshalling after merge the patch")
}
}
// Check if the template has exceeded its timeout duration. If it hasn't set the applicable activeDeadlineSeconds
node := woc.wf.GetNodeByName(nodeName)
templateDeadline, err := woc.checkTemplateTimeout(tmpl, node)
if err != nil {
return nil, err
}
if err := woc.scheduleOnDifferentHost(node, pod); err != nil {
return nil, err
}
if templateDeadline != nil && (pod.Spec.ActiveDeadlineSeconds == nil || time.Since(*templateDeadline).Seconds() < float64(*pod.Spec.ActiveDeadlineSeconds)) {
newActiveDeadlineSeconds := int64(time.Until(*templateDeadline).Seconds())
if newActiveDeadlineSeconds <= 1 {
return nil, fmt.Errorf("%s exceeded its deadline", nodeName)
}
woc.log.Debugf("Setting new activeDeadlineSeconds %d for pod %s/%s due to templateDeadline", newActiveDeadlineSeconds, pod.Namespace, pod.Name)
pod.Spec.ActiveDeadlineSeconds = &newActiveDeadlineSeconds
}
if !woc.controller.rateLimiter.Allow() {
return nil, ErrResourceRateLimitReached
}
woc.log.Debugf("Creating Pod: %s (%s)", nodeName, pod.Name)
created, err := woc.controller.kubeclientset.CoreV1().Pods(woc.wf.ObjectMeta.Namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
if apierr.IsAlreadyExists(err) {
// workflow pod names are deterministic. We can get here if the
// controller fails to persist the workflow after creating the pod.
woc.log.Infof("Failed pod %s (%s) creation: already exists", nodeName, pod.Name)
return created, nil
}
if errorsutil.IsTransientErr(err) {
return nil, err
}
woc.log.Infof("Failed to create pod %s (%s): %v", nodeName, pod.Name, err)
return nil, errors.InternalWrapError(err)
}
woc.log.Infof("Created pod: %s (%s)", nodeName, created.Name)
woc.activePods++
return created, nil
}
func (woc *wfOperationCtx) podExists(nodeID string) (existing *apiv1.Pod, exists bool, err error) {
objs, err := woc.controller.podInformer.GetIndexer().ByIndex(indexes.NodeIDIndex, woc.wf.Namespace+"/"+nodeID)
if err != nil {
return nil, false, fmt.Errorf("failed to get pod from informer store: %w", err)
}
objectCount := len(objs)
if objectCount == 0 {
return nil, false, nil
}
if objectCount > 1 {
return nil, false, fmt.Errorf("expected < 2 pods, got %d - this is a bug", len(objs))
}
if existing, ok := objs[0].(*apiv1.Pod); ok {
return existing, true, nil
}
return nil, false, nil
}
func (woc *wfOperationCtx) getDeadline(opts *createWorkflowPodOpts) *time.Time {
deadline := time.Time{}
if woc.workflowDeadline != nil {
deadline = *woc.workflowDeadline
}
if !opts.executionDeadline.IsZero() && (deadline.IsZero() || opts.executionDeadline.Before(deadline)) {
deadline = opts.executionDeadline
}
return &deadline
}
func (woc *wfOperationCtx) getImage(image string) config.Image {
if woc.controller.Config.Images == nil {
return config.Image{}
}
return woc.controller.Config.Images[image]
}
// substitutePodParams returns a pod spec with parameter references substituted as well as pod.name
func substitutePodParams(pod *apiv1.Pod, globalParams common.Parameters, tmpl *wfv1.Template) (*apiv1.Pod, error) {
podParams := globalParams.DeepCopy()
for _, inParam := range tmpl.Inputs.Parameters {
podParams["inputs.parameters."+inParam.Name] = inParam.Value.String()
}
podParams[common.LocalVarPodName] = pod.Name
specBytes, err := json.Marshal(pod)
if err != nil {
return nil, err
}
newSpecBytes, err := template.Replace(string(specBytes), podParams, true)
if err != nil {
return nil, err
}
var newSpec apiv1.Pod
err = json.Unmarshal([]byte(newSpecBytes), &newSpec)
if err != nil {
return nil, errors.InternalWrapError(err)
}
return &newSpec, nil
}
func (woc *wfOperationCtx) newInitContainer(tmpl *wfv1.Template) apiv1.Container {
ctr := woc.newExecContainer(common.InitContainerName, tmpl)
ctr.Command = []string{"argoexec", "init", "--loglevel", getExecutorLogLevel()}
return *ctr
}
func (woc *wfOperationCtx) newWaitContainer(tmpl *wfv1.Template) *apiv1.Container {
ctr := woc.newExecContainer(common.WaitContainerName, tmpl)
ctr.Command = []string{"argoexec", "wait", "--loglevel", getExecutorLogLevel()}
switch woc.getContainerRuntimeExecutor() {
case common.ContainerRuntimeExecutorPNS:
ctr.SecurityContext = &apiv1.SecurityContext{
Capabilities: &apiv1.Capabilities{
Add: []apiv1.Capability{
// necessary to access main's root filesystem when run with a different user id
apiv1.Capability("SYS_PTRACE"),
apiv1.Capability("SYS_CHROOT"),
},
},
}
// PNS_PRIVILEGED allows you to always set privileged on for PNS, this seems to be needed for certain systems
// https://github.com/argoproj/argo-workflows/issues/1256
if hasPrivilegedContainers(tmpl) || os.Getenv("PNS_PRIVILEGED") == "true" {
// if the main or sidecar is privileged, the wait sidecar must also run privileged,
// in order to SIGTERM/SIGKILL the pid
ctr.SecurityContext.Privileged = pointer.BoolPtr(true)
}
case common.ContainerRuntimeExecutorDocker:
ctr.VolumeMounts = append(ctr.VolumeMounts, woc.getVolumeMountDockerSock(tmpl))
}
return ctr
}
func getExecutorLogLevel() string {
return log.GetLevel().String()
}
// hasPrivilegedContainers tests if the main container or sidecars is privileged
func hasPrivilegedContainers(tmpl *wfv1.Template) bool {
if containerIsPrivileged(tmpl.Container) {
return true
}
for _, side := range tmpl.Sidecars {
if containerIsPrivileged(&side.Container) {
return true
}
}
return false
}
func containerIsPrivileged(ctr *apiv1.Container) bool {
if ctr != nil && ctr.SecurityContext != nil && ctr.SecurityContext.Privileged != nil && *ctr.SecurityContext.Privileged {
return true
}
return false
}
func (woc *wfOperationCtx) createEnvVars() []apiv1.EnvVar {
execEnvVars := []apiv1.EnvVar{
{
Name: common.EnvVarPodName,
ValueFrom: &apiv1.EnvVarSource{
FieldRef: &apiv1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.name",
},
},
},
{
Name: common.EnvVarContainerRuntimeExecutor,
Value: woc.getContainerRuntimeExecutor(),
},
// This flag was introduced in Go 15 and will be removed in Go 16.
// x509: cannot validate certificate for ... because it doesn't contain any IP SANs
// https://github.com/argoproj/argo-workflows/issues/5563 - Upgrade to Go 16
// https://github.com/golang/go/issues/39568
{
Name: "GODEBUG",
Value: "x509ignoreCN=0",
},
{
Name: common.EnvVarWorkflowName,
Value: woc.wf.Name,
},
}
if woc.controller.Config.Executor != nil {
execEnvVars = append(execEnvVars, woc.controller.Config.Executor.Env...)
}
switch woc.getContainerRuntimeExecutor() {
case common.ContainerRuntimeExecutorKubelet:
execEnvVars = append(execEnvVars,
apiv1.EnvVar{
Name: common.EnvVarDownwardAPINodeIP,
ValueFrom: &apiv1.EnvVarSource{
FieldRef: &apiv1.ObjectFieldSelector{
FieldPath: "status.hostIP",
},
},
},
apiv1.EnvVar{
Name: common.EnvVarKubeletPort,
Value: strconv.Itoa(woc.controller.Config.KubeletPort),
},
apiv1.EnvVar{
Name: common.EnvVarKubeletInsecure,
Value: strconv.FormatBool(woc.controller.Config.KubeletInsecure),
},
)
}
return execEnvVars
}
func (woc *wfOperationCtx) createVolumes(tmpl *wfv1.Template) []apiv1.Volume {
var volumes []apiv1.Volume
if woc.controller.Config.KubeConfig != nil {
name := woc.controller.Config.KubeConfig.VolumeName
if name == "" {
name = common.KubeConfigDefaultVolumeName
}
volumes = append(volumes, apiv1.Volume{
Name: name,
VolumeSource: apiv1.VolumeSource{
Secret: &apiv1.SecretVolumeSource{
SecretName: woc.controller.Config.KubeConfig.SecretName,
},
},
})
}
volumes = append(volumes, volumeVarArgo)
switch woc.getContainerRuntimeExecutor() {
case common.ContainerRuntimeExecutorDocker:
volumes = append(volumes, woc.getVolumeDockerSock(tmpl))
}
volumes = append(volumes, tmpl.Volumes...)
return volumes
}
func (woc *wfOperationCtx) newExecContainer(name string, tmpl *wfv1.Template) *apiv1.Container {
exec := apiv1.Container{
Name: name,
Image: woc.controller.executorImage(),
ImagePullPolicy: woc.controller.executorImagePullPolicy(),
Env: woc.createEnvVars(),
}
if woc.controller.Config.Executor != nil {
exec.Args = woc.controller.Config.Executor.Args
if woc.controller.Config.Executor.SecurityContext != nil {
exec.SecurityContext = woc.controller.Config.Executor.SecurityContext.DeepCopy()
}
}
if isResourcesSpecified(woc.controller.Config.Executor) {
exec.Resources = *woc.controller.Config.Executor.Resources.DeepCopy()
} else if woc.controller.Config.ExecutorResources != nil {
exec.Resources = *woc.controller.Config.ExecutorResources.DeepCopy()
}
if woc.controller.Config.KubeConfig != nil {
path := woc.controller.Config.KubeConfig.MountPath
if path == "" {
path = common.KubeConfigDefaultMountPath
}
name := woc.controller.Config.KubeConfig.VolumeName
if name == "" {
name = common.KubeConfigDefaultVolumeName
}
exec.VolumeMounts = append(exec.VolumeMounts, apiv1.VolumeMount{
Name: name,
MountPath: path,
ReadOnly: true,
SubPath: woc.controller.Config.KubeConfig.SecretKey,
})
exec.Args = append(exec.Args, "--kubeconfig="+path)
}
executorServiceAccountName := ""
if tmpl.Executor != nil && tmpl.Executor.ServiceAccountName != "" {
executorServiceAccountName = tmpl.Executor.ServiceAccountName
} else if woc.execWf.Spec.Executor != nil && woc.execWf.Spec.Executor.ServiceAccountName != "" {
executorServiceAccountName = woc.execWf.Spec.Executor.ServiceAccountName
}
if executorServiceAccountName != "" {
exec.VolumeMounts = append(exec.VolumeMounts, apiv1.VolumeMount{
Name: common.ServiceAccountTokenVolumeName,
MountPath: common.ServiceAccountTokenMountPath,
ReadOnly: true,
})
}
return &exec
}
func isResourcesSpecified(ctr *apiv1.Container) bool {
return ctr != nil && (len(ctr.Resources.Limits) != 0 || len(ctr.Resources.Requests) != 0)
}
// addMetadata applies metadata specified in the template
func (woc *wfOperationCtx) addMetadata(pod *apiv1.Pod, tmpl *wfv1.Template) {
if woc.execWf.Spec.PodMetadata != nil {
// add workflow-level pod annotations and labels
for k, v := range woc.execWf.Spec.PodMetadata.Annotations {
pod.ObjectMeta.Annotations[k] = v
}
for k, v := range woc.execWf.Spec.PodMetadata.Labels {
pod.ObjectMeta.Labels[k] = v
}
}
for k, v := range tmpl.Metadata.Annotations {
pod.ObjectMeta.Annotations[k] = v
}
for k, v := range tmpl.Metadata.Labels {
pod.ObjectMeta.Labels[k] = v
}
}
// addSchedulingConstraints applies any node selectors or affinity rules to the pod, either set in the workflow or the template
func addSchedulingConstraints(pod *apiv1.Pod, wfSpec *wfv1.WorkflowSpec, tmpl *wfv1.Template) {
// Set nodeSelector (if specified)
if len(tmpl.NodeSelector) > 0 {
pod.Spec.NodeSelector = tmpl.NodeSelector
} else if len(wfSpec.NodeSelector) > 0 {
pod.Spec.NodeSelector = wfSpec.NodeSelector
}
// Set affinity (if specified)
if tmpl.Affinity != nil {
pod.Spec.Affinity = tmpl.Affinity
} else if wfSpec.Affinity != nil {
pod.Spec.Affinity = wfSpec.Affinity
}
// Set tolerations (if specified)
if len(tmpl.Tolerations) > 0 {
pod.Spec.Tolerations = tmpl.Tolerations
} else if len(wfSpec.Tolerations) > 0 {
pod.Spec.Tolerations = wfSpec.Tolerations
}
// Set scheduler name (if specified)
if tmpl.SchedulerName != "" {
pod.Spec.SchedulerName = tmpl.SchedulerName
} else if wfSpec.SchedulerName != "" {
pod.Spec.SchedulerName = wfSpec.SchedulerName
}
// Set priorityClass (if specified)
if tmpl.PriorityClassName != "" {
pod.Spec.PriorityClassName = tmpl.PriorityClassName
} else if wfSpec.PodPriorityClassName != "" {
pod.Spec.PriorityClassName = wfSpec.PodPriorityClassName
}
// Set priority (if specified)
if tmpl.Priority != nil {
pod.Spec.Priority = tmpl.Priority
} else if wfSpec.PodPriority != nil {
pod.Spec.Priority = wfSpec.PodPriority
}
// set hostaliases
pod.Spec.HostAliases = append(pod.Spec.HostAliases, wfSpec.HostAliases...)
pod.Spec.HostAliases = append(pod.Spec.HostAliases, tmpl.HostAliases...)
// set pod security context
if tmpl.SecurityContext != nil {
pod.Spec.SecurityContext = tmpl.SecurityContext
} else if wfSpec.SecurityContext != nil {
pod.Spec.SecurityContext = wfSpec.SecurityContext
}
}
// addVolumeReferences adds any volumeMounts that a container/sidecar is referencing, to the pod.spec.volumes
// These are either specified in the workflow.spec.volumes or the workflow.spec.volumeClaimTemplate section
func addVolumeReferences(pod *apiv1.Pod, vols []apiv1.Volume, tmpl *wfv1.Template, pvcs []apiv1.Volume) error {
switch tmpl.GetType() {
case wfv1.TemplateTypeContainer, wfv1.TemplateTypeContainerSet, wfv1.TemplateTypeScript, wfv1.TemplateTypeData:
default:
return nil
}
// getVolByName is a helper to retrieve a volume by its name, either from the volumes or claims section
getVolByName := func(name string) *apiv1.Volume {
// Find a volume from template-local volumes.
for _, vol := range tmpl.Volumes {
if vol.Name == name {
return &vol
}
}
// Find a volume from global volumes.
for _, vol := range vols {
if vol.Name == name {
return &vol
}
}
// Find a volume from pvcs.
for _, pvc := range pvcs {
if pvc.Name == name {
return &pvc
}
}
return nil
}
addVolumeRef := func(volMounts []apiv1.VolumeMount) error {
for _, volMnt := range volMounts {
vol := getVolByName(volMnt.Name)
if vol == nil {
return errors.Errorf(errors.CodeBadRequest, "volume '%s' not found in workflow spec", volMnt.Name)
}
found := false
for _, v := range pod.Spec.Volumes {
if v.Name == vol.Name {
found = true
break
}
}
if !found {
if pod.Spec.Volumes == nil {
pod.Spec.Volumes = make([]apiv1.Volume, 0)
}
pod.Spec.Volumes = append(pod.Spec.Volumes, *vol)
}
}
return nil
}
err := addVolumeRef(tmpl.GetVolumeMounts())
if err != nil {
return err
}
for _, container := range tmpl.InitContainers {
err := addVolumeRef(container.VolumeMounts)
if err != nil {
return err
}
}
for _, sidecar := range tmpl.Sidecars {
err := addVolumeRef(sidecar.VolumeMounts)
if err != nil {
return err
}
}
volumes, volumeMounts := createSecretVolumes(tmpl)
pod.Spec.Volumes = append(pod.Spec.Volumes, volumes...)
for idx, container := range pod.Spec.Containers {
if container.Name == common.WaitContainerName {
pod.Spec.Containers[idx].VolumeMounts = append(pod.Spec.Containers[idx].VolumeMounts, volumeMounts...)
break
}
}
for idx, container := range pod.Spec.InitContainers {
if container.Name == common.InitContainerName {
pod.Spec.InitContainers[idx].VolumeMounts = append(pod.Spec.InitContainers[idx].VolumeMounts, volumeMounts...)
break
}
}
if tmpl.Data != nil {
for idx, container := range pod.Spec.Containers {
if container.Name == common.MainContainerName {
pod.Spec.Containers[idx].VolumeMounts = append(pod.Spec.Containers[idx].VolumeMounts, volumeMounts...)
break
}
}
}
return nil
}
// addInputArtifactVolumes sets up the artifacts volume to the pod to support input artifacts to containers.
// In order support input artifacts, the init container shares a emptydir volume with the main container.
// It is the responsibility of the init container to load all artifacts to the mounted emptydir location.
// (e.g. /inputs/artifacts/CODE). The shared emptydir is mapped to the user's desired location in the main
// container.
//
// It is possible that a user specifies overlapping paths of an artifact path with a volume mount,
// (e.g. user wants an external volume mounted at /src, while simultaneously wanting an input artifact
// placed at /src/some/subdirectory). When this occurs, we need to prevent the duplicate bind mounting of
// overlapping volumes, since the outer volume will not see the changes made in the artifact emptydir.
//
// To prevent overlapping bind mounts, both the controller and executor will recognize the overlap between
// the explicit volume mount and the artifact emptydir and prevent all uses of the emptydir for purposes of
// loading data. The controller will omit mounting the emptydir to the artifact path, and the executor
// will load the artifact in the in user's volume (as opposed to the emptydir)
func (woc *wfOperationCtx) addInputArtifactsVolumes(pod *apiv1.Pod, tmpl *wfv1.Template) error {
if len(tmpl.Inputs.Artifacts) == 0 {
return nil
}
artVol := apiv1.Volume{
Name: "input-artifacts",
VolumeSource: apiv1.VolumeSource{
EmptyDir: &apiv1.EmptyDirVolumeSource{},
},
}
pod.Spec.Volumes = append(pod.Spec.Volumes, artVol)
for i, initCtr := range pod.Spec.InitContainers {
if initCtr.Name == common.InitContainerName {
volMount := apiv1.VolumeMount{
Name: artVol.Name,
MountPath: common.ExecutorArtifactBaseDir,
}
initCtr.VolumeMounts = append(initCtr.VolumeMounts, volMount)
// We also add the user supplied mount paths to the init container,
// in case the executor needs to load artifacts to this volume
// instead of the artifacts volume
for _, mnt := range tmpl.GetVolumeMounts() {
if util.IsWindowsUNCPath(mnt.MountPath, tmpl) {
continue
}
mnt.MountPath = filepath.Join(common.ExecutorMainFilesystemDir, mnt.MountPath)
initCtr.VolumeMounts = append(initCtr.VolumeMounts, mnt)
}
pod.Spec.InitContainers[i] = initCtr
break
}
}
for i, c := range pod.Spec.Containers {
if c.Name != common.MainContainerName {
continue
}
for _, art := range tmpl.Inputs.Artifacts {
if art.Path == "" {
return errors.Errorf(errors.CodeBadRequest, "inputs.artifacts.%s did not specify a path", art.Name)
}
if !art.HasLocationOrKey() && art.Optional {
woc.log.Infof("skip volume mount of %s (%s): optional artifact was not provided",
art.Name, art.Path)
continue
}
overlap := common.FindOverlappingVolume(tmpl, art.Path)
if overlap != nil {
// artifact path overlaps with a mounted volume. do not mount the
// artifacts emptydir to the main container. init would have copied
// the artifact to the user's volume instead
woc.log.Debugf("skip volume mount of %s (%s): overlaps with mount %s at %s",
art.Name, art.Path, overlap.Name, overlap.MountPath)
continue
}
volMount := apiv1.VolumeMount{
Name: artVol.Name,
MountPath: art.Path,
SubPath: art.Name,
}
c.VolumeMounts = append(c.VolumeMounts, volMount)
}
pod.Spec.Containers[i] = c
}
return nil
}
// addOutputArtifactsVolumes mirrors any volume mounts in the main container to the wait sidecar.
// For any output artifacts that were produced in mounted volumes (e.g. PVCs, emptyDirs), the
// wait container will collect the artifacts directly from volumeMount instead of `docker cp`-ing
// them to the wait sidecar. In order for this to work, we mirror all volume mounts in the main
// container under a well-known path.
func addOutputArtifactsVolumes(pod *apiv1.Pod, tmpl *wfv1.Template) {
if tmpl.GetType() == wfv1.TemplateTypeResource || tmpl.GetType() == wfv1.TemplateTypeData {
return
}
waitCtrIndex, err := util.FindWaitCtrIndex(pod)
if err != nil {
log.Info("Could not find wait container in pod spec")
return
}
waitCtr := &pod.Spec.Containers[waitCtrIndex]
for _, c := range pod.Spec.Containers {
if c.Name != common.MainContainerName {
continue
}
for _, mnt := range c.VolumeMounts {
if util.IsWindowsUNCPath(mnt.MountPath, tmpl) {
continue
}
mnt.MountPath = filepath.Join(common.ExecutorMainFilesystemDir, mnt.MountPath)
// ReadOnly is needed to be false for overlapping volume mounts
mnt.ReadOnly = false
waitCtr.VolumeMounts = append(waitCtr.VolumeMounts, mnt)
}
}
pod.Spec.Containers[waitCtrIndex] = *waitCtr
}
// addArchiveLocation conditionally updates the template with the default artifact repository
// information configured in the controller, for the purposes of archiving outputs. This is skipped
// for templates which do not need to archive anything, or have explicitly set an archive location
// in the template.
func (woc *wfOperationCtx) addArchiveLocation(tmpl *wfv1.Template) {
if tmpl.ArchiveLocation.HasLocation() {
// User explicitly set the location. nothing else to do.
return
}
archiveLogs := woc.IsArchiveLogs(tmpl)
needLocation := archiveLogs
for _, art := range append(tmpl.Inputs.Artifacts, tmpl.Outputs.Artifacts...) {
if !art.HasLocation() {
needLocation = true
}
}
woc.log.WithField("needLocation", needLocation).Debug()
if !needLocation {
return
}
tmpl.ArchiveLocation = woc.artifactRepository.ToArtifactLocation()
tmpl.ArchiveLocation.ArchiveLogs = &archiveLogs
}
// IsArchiveLogs determines if container should archive logs
// priorities: controller(on) > template > workflow > controller(off)
func (woc *wfOperationCtx) IsArchiveLogs(tmpl *wfv1.Template) bool {
archiveLogs := woc.artifactRepository.IsArchiveLogs()
if !archiveLogs {
if woc.execWf.Spec.ArchiveLogs != nil {
archiveLogs = *woc.execWf.Spec.ArchiveLogs
}
if tmpl.ArchiveLocation != nil && tmpl.ArchiveLocation.ArchiveLogs != nil {
archiveLogs = *tmpl.ArchiveLocation.ArchiveLogs
}
}
return archiveLogs
}
// setupServiceAccount sets up service account and token.
func (woc *wfOperationCtx) setupServiceAccount(ctx context.Context, pod *apiv1.Pod, tmpl *wfv1.Template) error {
if tmpl.ServiceAccountName != "" {
pod.Spec.ServiceAccountName = tmpl.ServiceAccountName
} else if woc.execWf.Spec.ServiceAccountName != "" {
pod.Spec.ServiceAccountName = woc.execWf.Spec.ServiceAccountName
}
var automountServiceAccountToken *bool
if tmpl.AutomountServiceAccountToken != nil {
automountServiceAccountToken = tmpl.AutomountServiceAccountToken
} else if woc.execWf.Spec.AutomountServiceAccountToken != nil {
automountServiceAccountToken = woc.execWf.Spec.AutomountServiceAccountToken
}
if automountServiceAccountToken != nil && !*automountServiceAccountToken {
pod.Spec.AutomountServiceAccountToken = automountServiceAccountToken
}
executorServiceAccountName := ""
if tmpl.Executor != nil && tmpl.Executor.ServiceAccountName != "" {
executorServiceAccountName = tmpl.Executor.ServiceAccountName
} else if woc.execWf.Spec.Executor != nil && woc.execWf.Spec.Executor.ServiceAccountName != "" {
executorServiceAccountName = woc.execWf.Spec.Executor.ServiceAccountName
}
if executorServiceAccountName != "" {
tokenName, err := common.GetServiceAccountTokenName(ctx, woc.controller.kubeclientset, pod.Namespace, executorServiceAccountName)
if err != nil {
return err
}
pod.Spec.Volumes = append(pod.Spec.Volumes, apiv1.Volume{
Name: common.ServiceAccountTokenVolumeName,
VolumeSource: apiv1.VolumeSource{
Secret: &apiv1.SecretVolumeSource{
SecretName: tokenName,
},
},
})
} else if automountServiceAccountToken != nil && !*automountServiceAccountToken {
return errors.Errorf(errors.CodeBadRequest, "executor.serviceAccountName must not be empty if automountServiceAccountToken is false")
}
return nil
}
// addScriptStagingVolume sets up a shared staging volume between the init container
// and main container for the purpose of holding the script source code for script templates
func addScriptStagingVolume(pod *apiv1.Pod) {
volName := "argo-staging"
stagingVol := apiv1.Volume{
Name: volName,
VolumeSource: apiv1.VolumeSource{
EmptyDir: &apiv1.EmptyDirVolumeSource{},
},
}
pod.Spec.Volumes = append(pod.Spec.Volumes, stagingVol)
for i, initCtr := range pod.Spec.InitContainers {
if initCtr.Name == common.InitContainerName {
volMount := apiv1.VolumeMount{
Name: volName,
MountPath: common.ExecutorStagingEmptyDir,
}
initCtr.VolumeMounts = append(initCtr.VolumeMounts, volMount)
pod.Spec.InitContainers[i] = initCtr
break
}
}
found := false
for i, ctr := range pod.Spec.Containers {
if ctr.Name == common.MainContainerName {
volMount := apiv1.VolumeMount{
Name: volName,
MountPath: common.ExecutorStagingEmptyDir,
}
ctr.VolumeMounts = append(ctr.VolumeMounts, volMount)
pod.Spec.Containers[i] = ctr
found = true
break
}
}
if !found {
panic("Unable to locate main container")
}
}
// addInitContainers adds all init containers to the pod spec of the step
// Optionally volume mounts from the main container to the init containers
func addInitContainers(pod *apiv1.Pod, tmpl *wfv1.Template) {
mainCtr := findMainContainer(pod)
for _, ctr := range tmpl.InitContainers {
log.Debugf("Adding init container %s", ctr.Name)
if mainCtr != nil && ctr.MirrorVolumeMounts != nil && *ctr.MirrorVolumeMounts {
mirrorVolumeMounts(mainCtr, &ctr.Container)
}
pod.Spec.InitContainers = append(pod.Spec.InitContainers, ctr.Container)
}
}
// addSidecars adds all sidecars to the pod spec of the step.
// Optionally volume mounts from the main container to the sidecar
func addSidecars(pod *apiv1.Pod, tmpl *wfv1.Template) {
mainCtr := findMainContainer(pod)
for _, sidecar := range tmpl.Sidecars {
log.Debugf("Adding sidecar container %s", sidecar.Name)
if mainCtr != nil && sidecar.MirrorVolumeMounts != nil && *sidecar.MirrorVolumeMounts {
mirrorVolumeMounts(mainCtr, &sidecar.Container)
}
pod.Spec.Containers = append(pod.Spec.Containers, sidecar.Container)
}
}
// verifyResolvedVariables is a helper to ensure all {{variables}} have been resolved for a object
func verifyResolvedVariables(obj interface{}) error {
str, err := json.Marshal(obj)
if err != nil {
return err
}
return template.Validate(string(str), func(tag string) error {
return errors.Errorf(errors.CodeBadRequest, "failed to resolve {{%s}}", tag)
})
}
// createSecretVolumes will retrieve and create Volumes and Volumemount object for Pod
func createSecretVolumes(tmpl *wfv1.Template) ([]apiv1.Volume, []apiv1.VolumeMount) {
allVolumesMap := make(map[string]apiv1.Volume)
uniqueKeyMap := make(map[string]bool)
var secretVolumes []apiv1.Volume
var secretVolMounts []apiv1.VolumeMount
createArchiveLocationSecret(tmpl, allVolumesMap, uniqueKeyMap)
for _, art := range tmpl.Outputs.Artifacts {
createSecretVolume(allVolumesMap, art, uniqueKeyMap)
}
for _, art := range tmpl.Inputs.Artifacts {
createSecretVolume(allVolumesMap, art, uniqueKeyMap)
}
if tmpl.Data != nil {
if art, needed := tmpl.Data.Source.GetArtifactIfNeeded(); needed {
createSecretVolume(allVolumesMap, *art, uniqueKeyMap)
}
}
for volMountName, val := range allVolumesMap {
secretVolumes = append(secretVolumes, val)
secretVolMounts = append(secretVolMounts, apiv1.VolumeMount{
Name: volMountName,
MountPath: common.SecretVolMountPath + "/" + val.Name,
ReadOnly: true,
})
}
return secretVolumes, secretVolMounts
}
func createArchiveLocationSecret(tmpl *wfv1.Template, volMap map[string]apiv1.Volume, uniqueKeyMap map[string]bool) {
if tmpl.ArchiveLocation == nil {
return
}
if s3ArtRepo := tmpl.ArchiveLocation.S3; s3ArtRepo != nil {
createSecretVal(volMap, s3ArtRepo.AccessKeySecret, uniqueKeyMap)
createSecretVal(volMap, s3ArtRepo.SecretKeySecret, uniqueKeyMap)
} else if hdfsArtRepo := tmpl.ArchiveLocation.HDFS; hdfsArtRepo != nil {
createSecretVal(volMap, hdfsArtRepo.KrbKeytabSecret, uniqueKeyMap)
createSecretVal(volMap, hdfsArtRepo.KrbCCacheSecret, uniqueKeyMap)
} else if artRepo := tmpl.ArchiveLocation.Artifactory; artRepo != nil {
createSecretVal(volMap, artRepo.UsernameSecret, uniqueKeyMap)
createSecretVal(volMap, artRepo.PasswordSecret, uniqueKeyMap)
} else if gitRepo := tmpl.ArchiveLocation.Git; gitRepo != nil {
createSecretVal(volMap, gitRepo.UsernameSecret, uniqueKeyMap)
createSecretVal(volMap, gitRepo.PasswordSecret, uniqueKeyMap)
createSecretVal(volMap, gitRepo.SSHPrivateKeySecret, uniqueKeyMap)
} else if ossRepo := tmpl.ArchiveLocation.OSS; ossRepo != nil {
createSecretVal(volMap, ossRepo.AccessKeySecret, uniqueKeyMap)
createSecretVal(volMap, ossRepo.SecretKeySecret, uniqueKeyMap)
} else if gcsRepo := tmpl.ArchiveLocation.GCS; gcsRepo != nil {
createSecretVal(volMap, gcsRepo.ServiceAccountKeySecret, uniqueKeyMap)
}
}
func createSecretVolume(volMap map[string]apiv1.Volume, art wfv1.Artifact, keyMap map[string]bool) {
if art.S3 != nil {
createSecretVal(volMap, art.S3.AccessKeySecret, keyMap)
createSecretVal(volMap, art.S3.SecretKeySecret, keyMap)
} else if art.Git != nil {
createSecretVal(volMap, art.Git.UsernameSecret, keyMap)
createSecretVal(volMap, art.Git.PasswordSecret, keyMap)
createSecretVal(volMap, art.Git.SSHPrivateKeySecret, keyMap)
} else if art.Artifactory != nil {
createSecretVal(volMap, art.Artifactory.UsernameSecret, keyMap)
createSecretVal(volMap, art.Artifactory.PasswordSecret, keyMap)
} else if art.HDFS != nil {
createSecretVal(volMap, art.HDFS.KrbCCacheSecret, keyMap)
createSecretVal(volMap, art.HDFS.KrbKeytabSecret, keyMap)
} else if art.OSS != nil {
createSecretVal(volMap, art.OSS.AccessKeySecret, keyMap)
createSecretVal(volMap, art.OSS.SecretKeySecret, keyMap)
} else if art.GCS != nil {
createSecretVal(volMap, art.GCS.ServiceAccountKeySecret, keyMap)
}
}
func createSecretVal(volMap map[string]apiv1.Volume, secret *apiv1.SecretKeySelector, keyMap map[string]bool) {
if secret == nil || secret.Name == "" || secret.Key == "" {
return
}
if vol, ok := volMap[secret.Name]; ok {
key := apiv1.KeyToPath{
Key: secret.Key,
Path: secret.Key,
}
if val := keyMap[secret.Name+"-"+secret.Key]; !val {
keyMap[secret.Name+"-"+secret.Key] = true
vol.Secret.Items = append(vol.Secret.Items, key)
}
} else {
volume := apiv1.Volume{
Name: secret.Name,
VolumeSource: apiv1.VolumeSource{
Secret: &apiv1.SecretVolumeSource{
SecretName: secret.Name,
Items: []apiv1.KeyToPath{
{
Key: secret.Key,
Path: secret.Key,
},
},
},
},
}
keyMap[secret.Name+"-"+secret.Key] = true
volMap[secret.Name] = volume
}
}
// findMainContainer finds main container
func findMainContainer(pod *apiv1.Pod) *apiv1.Container {
for _, ctr := range pod.Spec.Containers {
if common.MainContainerName == ctr.Name {
return &ctr
}
}
return nil
}
// mirrorVolumeMounts mirrors volumeMounts of source container to target container
func mirrorVolumeMounts(sourceContainer, targetContainer *apiv1.Container) {
for _, volMnt := range sourceContainer.VolumeMounts {
if targetContainer.VolumeMounts == nil {
targetContainer.VolumeMounts = make([]apiv1.VolumeMount, 0)
}
log.Debugf("Adding volume mount %v to container %v", volMnt.Name, targetContainer.Name)
targetContainer.VolumeMounts = append(targetContainer.VolumeMounts, volMnt)
}
}
|
[
"\"PNS_PRIVILEGED\""
] |
[] |
[
"PNS_PRIVILEGED"
] |
[]
|
["PNS_PRIVILEGED"]
|
go
| 1 | 0 | |
reana_workflow_engine_snakemake/config.py
|
# -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2021 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""REANA Workflow Engine Snakemake configuration."""
import os
MOUNT_CVMFS = os.getenv("REANA_MOUNT_CVMFS", "false")
LOGGING_MODULE = "reana-workflow-engine-snakemake"
"""REANA Workflow Engine Snakemake logging module."""
DEFAULT_SNAKEMAKE_REPORT_FILENAME = "report.html"
"""Snakemake report default filename."""
|
[] |
[] |
[
"REANA_MOUNT_CVMFS"
] |
[]
|
["REANA_MOUNT_CVMFS"]
|
python
| 1 | 0 | |
node/config/def.go
|
package config
import (
"encoding"
"os"
"strconv"
"time"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/types"
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
)
const (
// RetrievalPricingDefault configures the node to use the default retrieval pricing policy.
RetrievalPricingDefaultMode = "default"
// RetrievalPricingExternal configures the node to use the external retrieval pricing script
// configured by the user.
RetrievalPricingExternalMode = "external"
)
// MaxTraversalLinks configures the maximum number of links to traverse in a DAG while calculating
// CommP and traversing a DAG with graphsync; invokes a budget on DAG depth and density.
var MaxTraversalLinks uint64 = 32 * (1 << 20)
func init() {
if envMaxTraversal, err := strconv.ParseUint(os.Getenv("LOTUS_MAX_TRAVERSAL_LINKS"), 10, 64); err == nil {
MaxTraversalLinks = envMaxTraversal
}
}
func (b *BatchFeeConfig) FeeForSectors(nSectors int) abi.TokenAmount {
return big.Add(big.Int(b.Base), big.Mul(big.NewInt(int64(nSectors)), big.Int(b.PerSector)))
}
func defCommon() Common {
return Common{
API: API{
ListenAddress: "/ip4/127.0.0.1/tcp/1234/http",
Timeout: Duration(30 * time.Second),
},
Libp2p: Libp2p{
ListenAddresses: []string{
"/ip4/0.0.0.0/tcp/0",
"/ip6/::/tcp/0",
},
AnnounceAddresses: []string{},
NoAnnounceAddresses: []string{},
ConnMgrLow: 150,
ConnMgrHigh: 180,
ConnMgrGrace: Duration(20 * time.Second),
},
Pubsub: Pubsub{
Bootstrapper: false,
DirectPeers: nil,
},
}
}
var DefaultDefaultMaxFee = types.MustParseFIL("0.07")
var DefaultSimultaneousTransfers = uint64(20)
// DefaultFullNode returns the default config
func DefaultFullNode() *FullNode {
return &FullNode{
Common: defCommon(),
Fees: FeeConfig{
DefaultMaxFee: DefaultDefaultMaxFee,
},
Client: Client{
SimultaneousTransfersForStorage: DefaultSimultaneousTransfers,
SimultaneousTransfersForRetrieval: DefaultSimultaneousTransfers,
},
Chainstore: Chainstore{
EnableSplitstore: false,
Splitstore: Splitstore{
ColdStoreType: "universal",
HotStoreType: "badger",
MarkSetType: "map",
HotStoreFullGCFrequency: 20,
},
},
}
}
func DefaultStorageMiner() *StorageMiner {
cfg := &StorageMiner{
Common: defCommon(),
Sealing: SealingConfig{
MaxWaitDealsSectors: 2, // 64G with 32G sectors
MaxSealingSectors: 0,
MaxSealingSectorsForDeals: 0,
WaitDealsDelay: Duration(time.Hour * 6),
AlwaysKeepUnsealedCopy: true,
FinalizeEarly: false,
CollateralFromMinerBalance: false,
AvailableBalanceBuffer: types.FIL(big.Zero()),
DisableCollateralFallback: false,
BatchPreCommits: true,
MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, // up to 256 sectors
PreCommitBatchWait: Duration(24 * time.Hour), // this should be less than 31.5 hours, which is the expiration of a precommit ticket
PreCommitBatchSlack: Duration(3 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration
CommittedCapacitySectorLifetime: Duration(builtin.EpochDurationSeconds * uint64(policy.GetMaxSectorExpirationExtension()) * uint64(time.Second)),
AggregateCommits: true,
MinCommitBatch: miner5.MinAggregatedSectors, // per FIP13, we must have at least four proofs to aggregate, where 4 is the cross over point where aggregation wins out on single provecommit gas costs
MaxCommitBatch: miner5.MaxAggregatedSectors, // maximum 819 sectors, this is the maximum aggregation per FIP13
CommitBatchWait: Duration(24 * time.Hour), // this can be up to 30 days
CommitBatchSlack: Duration(1 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration
BatchPreCommitAboveBaseFee: types.FIL(types.BigMul(types.PicoFil, types.NewInt(320))), // 0.32 nFIL
AggregateAboveBaseFee: types.FIL(types.BigMul(types.PicoFil, types.NewInt(320))), // 0.32 nFIL
TerminateBatchMin: 1,
TerminateBatchMax: 100,
TerminateBatchWait: Duration(5 * time.Minute),
},
Storage: sectorstorage.SealerConfig{
AllowAddPiece: true,
AllowPreCommit1: true,
AllowPreCommit2: true,
AllowCommit: true,
AllowUnseal: true,
// Default to 10 - tcp should still be able to figure this out, and
// it's the ratio between 10gbit / 1gbit
ParallelFetchLimit: 10,
// By default use the hardware resource filtering strategy.
ResourceFiltering: sectorstorage.ResourceFilteringHardware,
},
Dealmaking: DealmakingConfig{
ConsiderOnlineStorageDeals: true,
ConsiderOfflineStorageDeals: true,
ConsiderOnlineRetrievalDeals: true,
ConsiderOfflineRetrievalDeals: true,
ConsiderVerifiedStorageDeals: true,
ConsiderUnverifiedStorageDeals: true,
PieceCidBlocklist: []cid.Cid{},
// TODO: It'd be nice to set this based on sector size
MaxDealStartDelay: Duration(time.Hour * 24 * 14),
ExpectedSealDuration: Duration(time.Hour * 24),
PublishMsgPeriod: Duration(time.Hour),
MaxDealsPerPublishMsg: 8,
MaxProviderCollateralMultiplier: 2,
SimultaneousTransfersForStorage: DefaultSimultaneousTransfers,
SimultaneousTransfersForStoragePerClient: 0,
SimultaneousTransfersForRetrieval: DefaultSimultaneousTransfers,
StartEpochSealingBuffer: 480, // 480 epochs buffer == 4 hours from adding deal to sector to sector being sealed
RetrievalPricing: &RetrievalPricing{
Strategy: RetrievalPricingDefaultMode,
Default: &RetrievalPricingDefault{
VerifiedDealsFreeTransfer: true,
},
External: &RetrievalPricingExternal{
Path: "",
},
},
},
Subsystems: MinerSubsystemConfig{
EnableMining: true,
EnableSealing: true,
EnableSectorStorage: true,
EnableMarkets: true,
},
Fees: MinerFeeConfig{
MaxPreCommitGasFee: types.MustParseFIL("0.025"),
MaxCommitGasFee: types.MustParseFIL("0.05"),
MaxPreCommitBatchGasFee: BatchFeeConfig{
Base: types.MustParseFIL("0"),
PerSector: types.MustParseFIL("0.02"),
},
MaxCommitBatchGasFee: BatchFeeConfig{
Base: types.MustParseFIL("0"),
PerSector: types.MustParseFIL("0.03"), // enough for 6 agg and 1nFIL base fee
},
MaxTerminateGasFee: types.MustParseFIL("0.5"),
MaxWindowPoStGasFee: types.MustParseFIL("5"),
MaxPublishDealsFee: types.MustParseFIL("0.05"),
MaxMarketBalanceAddFee: types.MustParseFIL("0.007"),
},
Addresses: MinerAddressConfig{
PreCommitControl: []string{},
CommitControl: []string{},
TerminateControl: []string{},
DealPublishControl: []string{},
},
DAGStore: DAGStoreConfig{
MaxConcurrentIndex: 5,
MaxConcurrencyStorageCalls: 100,
GCInterval: Duration(1 * time.Minute),
},
}
cfg.Common.API.ListenAddress = "/ip4/127.0.0.1/tcp/2345/http"
cfg.Common.API.RemoteListenAddress = "127.0.0.1:2345"
return cfg
}
var _ encoding.TextMarshaler = (*Duration)(nil)
var _ encoding.TextUnmarshaler = (*Duration)(nil)
// Duration is a wrapper type for time.Duration
// for decoding and encoding from/to TOML
type Duration time.Duration
// UnmarshalText implements interface for TOML decoding
func (dur *Duration) UnmarshalText(text []byte) error {
d, err := time.ParseDuration(string(text))
if err != nil {
return err
}
*dur = Duration(d)
return err
}
func (dur Duration) MarshalText() ([]byte, error) {
d := time.Duration(dur)
return []byte(d.String()), nil
}
|
[
"\"LOTUS_MAX_TRAVERSAL_LINKS\""
] |
[] |
[
"LOTUS_MAX_TRAVERSAL_LINKS"
] |
[]
|
["LOTUS_MAX_TRAVERSAL_LINKS"]
|
go
| 1 | 0 | |
installer2/LinuxInfo.java
|
import java.io.File;
public class LinuxInfo {
public static void main(String[] args) {
String[] files = new String[]
{
"tModLoader.exe",
"tModLoaderServer.exe",
"tModLoaderServer",
"Terraria",
"tModLoader",
"tModLoader-kick",
"tModLoader-mono",
"I18N.dll",
"I18N.West.dll"
};
String[] filesToDelete = new String[]
{
"Terraria.exe.config",
"MP3Sharp.dll",
"Ionic.Zip.Reduced.dll",
"Mono.Cecil.dll"
};
Installer.tryInstall(files, filesToDelete, getInstallDir(), false);
}
private static File getInstallDir() {
File installDir;
String xdgHome = System.getenv("XDG_DATA_HOME");
if (xdgHome != null) {
installDir = new File(xdgHome + "/Steam/steamapps/common/Terraria");
if (installDir.isDirectory()) {
return installDir;
}
}
String home = System.getenv("HOME");
if (home != null) {
installDir = new File(home + "/.local/share/Steam/steamapps/common/Terraria");
if (installDir.isDirectory()) {
return installDir;
}
installDir = new File(home + "/.steam/steam/steamapps/common/Terraria");
if (installDir.isDirectory()) {
return installDir;
}
}
return null;
}
}
|
[
"\"XDG_DATA_HOME\"",
"\"HOME\""
] |
[] |
[
"HOME",
"XDG_DATA_HOME"
] |
[]
|
["HOME", "XDG_DATA_HOME"]
|
java
| 2 | 0 | |
force_displaying_website/force_displaying_website/wsgi.py
|
"""
WSGI config for force_displaying_website project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'force_displaying_website.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
nipype/workflows/fmri/fsl/preprocess.py
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
import nipype.interfaces.fsl as fsl # fsl
import nipype.interfaces.utility as util # utility
import nipype.pipeline.engine as pe # pypeline engine
import nipype.interfaces.freesurfer as fs # freesurfer
import nipype.interfaces.spm as spm
from ...smri.freesurfer.utils import create_getmask_flow
def getthreshop(thresh):
return ['-thr %.10f -Tmin -bin'%(0.1*val[1]) for val in thresh]
def pickfirst(files):
if isinstance(files, list):
return files[0]
else:
return files
def pickmiddle(files):
from nibabel import load
import numpy as np
middlevol = []
for f in files:
middlevol.append(int(np.ceil(load(f).get_shape()[3]/2)))
return middlevol
def pickvol(filenames, fileidx, which):
from nibabel import load
import numpy as np
if which.lower() == 'first':
idx = 0
elif which.lower() == 'middle':
idx = int(np.ceil(load(filenames[fileidx]).get_shape()[3]/2))
else:
raise Exception('unknown value for volume selection : %s'%which)
return idx
def getbtthresh(medianvals):
return [0.75*val for val in medianvals]
def chooseindex(fwhm):
if fwhm<1:
return [0]
else:
return [1]
def getmeanscale(medianvals):
return ['-mul %.10f'%(10000./val) for val in medianvals]
def getusans(x):
return [[tuple([val[0],0.75*val[1]])] for val in x]
tolist = lambda x: [x]
highpass_operand = lambda x:'-bptf %.10f -1'%x
def create_parallelfeat_preproc(name='featpreproc', highpass=True):
"""Preprocess each run with FSL independently of the others
Parameters
----------
::
name : name of workflow (default: featpreproc)
highpass : boolean (default: True)
Inputs::
inputspec.func : functional runs (filename or list of filenames)
inputspec.fwhm : fwhm for smoothing with SUSAN
inputspec.highpass : HWHM in TRs (if created with highpass=True)
Outputs::
outputspec.reference : volume to which runs are realigned
outputspec.motion_parameters : motion correction parameters
outputspec.realigned_files : motion corrected files
outputspec.motion_plots : plots of motion correction parameters
outputspec.mask : mask file used to mask the brain
outputspec.smoothed_files : smoothed functional data
outputspec.highpassed_files : highpassed functional data (if highpass=True)
outputspec.mean : mean file
Example
-------
>>> preproc = create_parallelfeat_preproc()
>>> preproc.inputs.inputspec.func = ['f3.nii', 'f5.nii']
>>> preproc.inputs.inputspec.fwhm = 5
>>> preproc.inputs.inputspec.highpass = 128./(2*2.5)
>>> preproc.base_dir = '/tmp'
>>> preproc.run() # doctest: +SKIP
>>> preproc = create_parallelfeat_preproc(highpass=False)
>>> preproc.inputs.inputspec.func = 'f3.nii'
>>> preproc.inputs.inputspec.fwhm = 5
>>> preproc.base_dir = '/tmp'
>>> preproc.run() # doctest: +SKIP
"""
featpreproc = pe.Workflow(name=name)
"""
Set up a node to define all inputs required for the preprocessing workflow
"""
if highpass:
inputnode = pe.Node(interface=util.IdentityInterface(fields=['func',
'fwhm',
'highpass']),
name='inputspec')
outputnode = pe.Node(interface=util.IdentityInterface(fields=['reference',
'motion_parameters',
'realigned_files',
'motion_plots',
'mask',
'smoothed_files',
'highpassed_files',
'mean']),
name='outputspec')
else:
inputnode = pe.Node(interface=util.IdentityInterface(fields=['func',
'fwhm']),
name='inputspec')
outputnode = pe.Node(interface=util.IdentityInterface(fields=['reference',
'motion_parameters',
'realigned_files',
'motion_plots',
'mask',
'smoothed_files',
'mean']),
name='outputspec')
"""
Set up a node to define outputs for the preprocessing workflow
"""
"""
Convert functional images to float representation. Since there can
be more than one functional run we use a MapNode to convert each
run.
"""
img2float = pe.MapNode(interface=fsl.ImageMaths(out_data_type='float',
op_string = '',
suffix='_dtype'),
iterfield=['in_file'],
name='img2float')
featpreproc.connect(inputnode, 'func', img2float, 'in_file')
"""
Extract the first volume of the first run as the reference
"""
extract_ref = pe.MapNode(interface=fsl.ExtractROI(t_size=1),
iterfield=['in_file', 't_min'],
name = 'extractref')
featpreproc.connect(img2float, 'out_file', extract_ref, 'in_file')
featpreproc.connect(img2float, ('out_file', pickmiddle), extract_ref, 't_min')
featpreproc.connect(extract_ref, 'roi_file', outputnode, 'reference')
"""
Realign the functional runs to the reference (1st volume of first run)
"""
motion_correct = pe.MapNode(interface=fsl.MCFLIRT(save_mats = True,
save_plots = True),
name='realign',
iterfield = ['in_file', 'ref_file'])
featpreproc.connect(img2float, 'out_file', motion_correct, 'in_file')
featpreproc.connect(extract_ref, 'roi_file', motion_correct, 'ref_file')
featpreproc.connect(motion_correct, 'par_file', outputnode, 'motion_parameters')
featpreproc.connect(motion_correct, 'out_file', outputnode, 'realigned_files')
"""
Plot the estimated motion parameters
"""
plot_motion = pe.MapNode(interface=fsl.PlotMotionParams(in_source='fsl'),
name='plot_motion',
iterfield=['in_file'])
plot_motion.iterables = ('plot_type', ['rotations', 'translations'])
featpreproc.connect(motion_correct, 'par_file', plot_motion, 'in_file')
featpreproc.connect(plot_motion, 'out_file', outputnode, 'motion_plots')
"""
Extract the mean volume of the first functional run
"""
meanfunc = pe.MapNode(interface=fsl.ImageMaths(op_string = '-Tmean',
suffix='_mean'),
iterfield=['in_file'],
name='meanfunc')
featpreproc.connect(motion_correct, 'out_file', meanfunc, 'in_file')
"""
Strip the skull from the mean functional to generate a mask
"""
meanfuncmask = pe.MapNode(interface=fsl.BET(mask = True,
no_output=True,
frac = 0.3),
iterfield=['in_file'],
name = 'meanfuncmask')
featpreproc.connect(meanfunc, 'out_file', meanfuncmask, 'in_file')
"""
Mask the functional runs with the extracted mask
"""
maskfunc = pe.MapNode(interface=fsl.ImageMaths(suffix='_bet',
op_string='-mas'),
iterfield=['in_file', 'in_file2'],
name = 'maskfunc')
featpreproc.connect(motion_correct, 'out_file', maskfunc, 'in_file')
featpreproc.connect(meanfuncmask, 'mask_file', maskfunc, 'in_file2')
"""
Determine the 2nd and 98th percentile intensities of each functional run
"""
getthresh = pe.MapNode(interface=fsl.ImageStats(op_string='-p 2 -p 98'),
iterfield = ['in_file'],
name='getthreshold')
featpreproc.connect(maskfunc, 'out_file', getthresh, 'in_file')
"""
Threshold the first run of the functional data at 10% of the 98th percentile
"""
threshold = pe.MapNode(interface=fsl.ImageMaths(out_data_type='char',
suffix='_thresh'),
iterfield=['in_file', 'op_string'],
name='threshold')
featpreproc.connect(maskfunc, 'out_file', threshold, 'in_file')
"""
Define a function to get 10% of the intensity
"""
featpreproc.connect(getthresh, ('out_stat', getthreshop), threshold, 'op_string')
"""
Determine the median value of the functional runs using the mask
"""
medianval = pe.MapNode(interface=fsl.ImageStats(op_string='-k %s -p 50'),
iterfield = ['in_file', 'mask_file'],
name='medianval')
featpreproc.connect(motion_correct, 'out_file', medianval, 'in_file')
featpreproc.connect(threshold, 'out_file', medianval, 'mask_file')
"""
Dilate the mask
"""
dilatemask = pe.MapNode(interface=fsl.ImageMaths(suffix='_dil',
op_string='-dilF'),
iterfield=['in_file'],
name='dilatemask')
featpreproc.connect(threshold, 'out_file', dilatemask, 'in_file')
featpreproc.connect(dilatemask, 'out_file', outputnode, 'mask')
"""
Mask the motion corrected functional runs with the dilated mask
"""
maskfunc2 = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask',
op_string='-mas'),
iterfield=['in_file', 'in_file2'],
name='maskfunc2')
featpreproc.connect(motion_correct, 'out_file', maskfunc2, 'in_file')
featpreproc.connect(dilatemask, 'out_file', maskfunc2, 'in_file2')
"""
Smooth each run using SUSAN with the brightness threshold set to 75%
of the median value for each run and a mask consituting the mean
functional
"""
smooth = create_susan_smooth()
featpreproc.connect(inputnode, 'fwhm', smooth, 'inputnode.fwhm')
featpreproc.connect(maskfunc2, 'out_file', smooth, 'inputnode.in_files')
featpreproc.connect(dilatemask, 'out_file', smooth, 'inputnode.mask_file')
"""
Mask the smoothed data with the dilated mask
"""
maskfunc3 = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask',
op_string='-mas'),
iterfield=['in_file', 'in_file2'],
name='maskfunc3')
featpreproc.connect(smooth, 'outputnode.smoothed_files', maskfunc3, 'in_file')
featpreproc.connect(dilatemask, 'out_file', maskfunc3, 'in_file2')
concatnode = pe.Node(interface=util.Merge(2),
name='concat')
featpreproc.connect(maskfunc2,('out_file', tolist), concatnode, 'in1')
featpreproc.connect(maskfunc3,('out_file', tolist), concatnode, 'in2')
"""
The following nodes select smooth or unsmoothed data depending on the
fwhm. This is because SUSAN defaults to smoothing the data with about the
voxel size of the input data if the fwhm parameter is less than 1/3 of the
voxel size.
"""
selectnode = pe.Node(interface=util.Select(),name='select')
featpreproc.connect(concatnode, 'out', selectnode, 'inlist')
featpreproc.connect(inputnode, ('fwhm', chooseindex), selectnode, 'index')
featpreproc.connect(selectnode, 'out', outputnode, 'smoothed_files')
"""
Scale the median value of the run is set to 10000
"""
meanscale = pe.MapNode(interface=fsl.ImageMaths(suffix='_gms'),
iterfield=['in_file','op_string'],
name='meanscale')
featpreproc.connect(selectnode, 'out', meanscale, 'in_file')
"""
Define a function to get the scaling factor for intensity normalization
"""
featpreproc.connect(medianval, ('out_stat', getmeanscale), meanscale, 'op_string')
"""
Perform temporal highpass filtering on the data
"""
if highpass:
highpass = pe.MapNode(interface=fsl.ImageMaths(suffix='_tempfilt'),
iterfield=['in_file'],
name='highpass')
featpreproc.connect(inputnode, ('highpass', highpass_operand), highpass, 'op_string')
featpreproc.connect(meanscale, 'out_file', highpass, 'in_file')
featpreproc.connect(highpass, 'out_file', outputnode, 'highpassed_files')
"""
Generate a mean functional image from the first run
"""
meanfunc3 = pe.MapNode(interface=fsl.ImageMaths(op_string='-Tmean',
suffix='_mean'),
iterfield=['in_file'],
name='meanfunc3')
if highpass:
featpreproc.connect(highpass, 'out_file', meanfunc3, 'in_file')
else:
featpreproc.connect(meanscale, 'out_file', meanfunc3, 'in_file')
featpreproc.connect(meanfunc3, 'out_file', outputnode, 'mean')
return featpreproc
def create_featreg_preproc(name='featpreproc', highpass=True, whichvol='middle'):
"""Create a FEAT preprocessing workflow with registration to one volume of the first run
Parameters
----------
::
name : name of workflow (default: featpreproc)
highpass : boolean (default: True)
whichvol : which volume of the first run to register to ('first', 'middle', 'mean')
Inputs::
inputspec.func : functional runs (filename or list of filenames)
inputspec.fwhm : fwhm for smoothing with SUSAN
inputspec.highpass : HWHM in TRs (if created with highpass=True)
Outputs::
outputspec.reference : volume to which runs are realigned
outputspec.motion_parameters : motion correction parameters
outputspec.realigned_files : motion corrected files
outputspec.motion_plots : plots of motion correction parameters
outputspec.mask : mask file used to mask the brain
outputspec.smoothed_files : smoothed functional data
outputspec.highpassed_files : highpassed functional data (if highpass=True)
outputspec.mean : mean file
Example
-------
>>> preproc = create_featreg_preproc()
>>> preproc.inputs.inputspec.func = ['f3.nii', 'f5.nii']
>>> preproc.inputs.inputspec.fwhm = 5
>>> preproc.inputs.inputspec.highpass = 128./(2*2.5)
>>> preproc.base_dir = '/tmp'
>>> preproc.run() # doctest: +SKIP
>>> preproc = create_featreg_preproc(highpass=False, whichvol='mean')
>>> preproc.inputs.inputspec.func = 'f3.nii'
>>> preproc.inputs.inputspec.fwhm = 5
>>> preproc.base_dir = '/tmp'
>>> preproc.run() # doctest: +SKIP
"""
featpreproc = pe.Workflow(name=name)
"""
Set up a node to define all inputs required for the preprocessing workflow
"""
if highpass:
inputnode = pe.Node(interface=util.IdentityInterface(fields=['func',
'fwhm',
'highpass']),
name='inputspec')
outputnode = pe.Node(interface=util.IdentityInterface(fields=['reference',
'motion_parameters',
'realigned_files',
'motion_plots',
'mask',
'smoothed_files',
'highpassed_files',
'mean']),
name='outputspec')
else:
inputnode = pe.Node(interface=util.IdentityInterface(fields=['func',
'fwhm']),
name='inputspec')
outputnode = pe.Node(interface=util.IdentityInterface(fields=['reference',
'motion_parameters',
'realigned_files',
'motion_plots',
'mask',
'smoothed_files',
'mean']),
name='outputspec')
"""
Set up a node to define outputs for the preprocessing workflow
"""
"""
Convert functional images to float representation. Since there can
be more than one functional run we use a MapNode to convert each
run.
"""
img2float = pe.MapNode(interface=fsl.ImageMaths(out_data_type='float',
op_string = '',
suffix='_dtype'),
iterfield=['in_file'],
name='img2float')
featpreproc.connect(inputnode, 'func', img2float, 'in_file')
"""
Extract the first volume of the first run as the reference
"""
if whichvol != 'mean':
extract_ref = pe.Node(interface=fsl.ExtractROI(t_size=1),
iterfield=['in_file'],
name = 'extractref')
featpreproc.connect(img2float, ('out_file', pickfirst), extract_ref, 'in_file')
featpreproc.connect(img2float, ('out_file', pickvol, 0, whichvol), extract_ref, 't_min')
featpreproc.connect(extract_ref, 'roi_file', outputnode, 'reference')
"""
Realign the functional runs to the reference (1st volume of first run)
"""
motion_correct = pe.MapNode(interface=fsl.MCFLIRT(save_mats = True,
save_plots = True,
interpolation = 'spline'),
name='realign',
iterfield = ['in_file'])
featpreproc.connect(img2float, 'out_file', motion_correct, 'in_file')
if whichvol != 'mean':
featpreproc.connect(extract_ref, 'roi_file', motion_correct, 'ref_file')
else:
motion_correct.inputs.mean_vol = True
featpreproc.connect(motion_correct, ('mean_img', pickfirst), outputnode, 'reference')
featpreproc.connect(motion_correct, 'par_file', outputnode, 'motion_parameters')
featpreproc.connect(motion_correct, 'out_file', outputnode, 'realigned_files')
"""
Plot the estimated motion parameters
"""
plot_motion = pe.MapNode(interface=fsl.PlotMotionParams(in_source='fsl'),
name='plot_motion',
iterfield=['in_file'])
plot_motion.iterables = ('plot_type', ['rotations', 'translations'])
featpreproc.connect(motion_correct, 'par_file', plot_motion, 'in_file')
featpreproc.connect(plot_motion, 'out_file', outputnode, 'motion_plots')
"""
Extract the mean volume of the first functional run
"""
meanfunc = pe.Node(interface=fsl.ImageMaths(op_string = '-Tmean',
suffix='_mean'),
name='meanfunc')
featpreproc.connect(motion_correct, ('out_file', pickfirst), meanfunc, 'in_file')
"""
Strip the skull from the mean functional to generate a mask
"""
meanfuncmask = pe.Node(interface=fsl.BET(mask = True,
no_output=True,
frac = 0.3),
name = 'meanfuncmask')
featpreproc.connect(meanfunc, 'out_file', meanfuncmask, 'in_file')
"""
Mask the functional runs with the extracted mask
"""
maskfunc = pe.MapNode(interface=fsl.ImageMaths(suffix='_bet',
op_string='-mas'),
iterfield=['in_file'],
name = 'maskfunc')
featpreproc.connect(motion_correct, 'out_file', maskfunc, 'in_file')
featpreproc.connect(meanfuncmask, 'mask_file', maskfunc, 'in_file2')
"""
Determine the 2nd and 98th percentile intensities of each functional run
"""
getthresh = pe.MapNode(interface=fsl.ImageStats(op_string='-p 2 -p 98'),
iterfield = ['in_file'],
name='getthreshold')
featpreproc.connect(maskfunc, 'out_file', getthresh, 'in_file')
"""
Threshold the first run of the functional data at 10% of the 98th percentile
"""
threshold = pe.MapNode(interface=fsl.ImageMaths(out_data_type='char',
suffix='_thresh'),
iterfield=['in_file', 'op_string'],
name='threshold')
featpreproc.connect(maskfunc, 'out_file', threshold, 'in_file')
"""
Define a function to get 10% of the intensity
"""
featpreproc.connect(getthresh, ('out_stat', getthreshop), threshold, 'op_string')
"""
Determine the median value of the functional runs using the mask
"""
medianval = pe.MapNode(interface=fsl.ImageStats(op_string='-k %s -p 50'),
iterfield = ['in_file', 'mask_file'],
name='medianval')
featpreproc.connect(motion_correct, 'out_file', medianval, 'in_file')
featpreproc.connect(threshold, 'out_file', medianval, 'mask_file')
"""
Dilate the mask
"""
dilatemask = pe.MapNode(interface=fsl.ImageMaths(suffix='_dil',
op_string='-dilF'),
iterfield=['in_file'],
name='dilatemask')
featpreproc.connect(threshold, 'out_file', dilatemask, 'in_file')
featpreproc.connect(dilatemask, 'out_file', outputnode, 'mask')
"""
Mask the motion corrected functional runs with the dilated mask
"""
maskfunc2 = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask',
op_string='-mas'),
iterfield=['in_file', 'in_file2'],
name='maskfunc2')
featpreproc.connect(motion_correct, 'out_file', maskfunc2, 'in_file')
featpreproc.connect(dilatemask, 'out_file', maskfunc2, 'in_file2')
"""
Smooth each run using SUSAN with the brightness threshold set to 75%
of the median value for each run and a mask consituting the mean
functional
"""
smooth = create_susan_smooth()
featpreproc.connect(inputnode, 'fwhm', smooth, 'inputnode.fwhm')
featpreproc.connect(maskfunc2, 'out_file', smooth, 'inputnode.in_files')
featpreproc.connect(dilatemask, 'out_file', smooth, 'inputnode.mask_file')
"""
Mask the smoothed data with the dilated mask
"""
maskfunc3 = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask',
op_string='-mas'),
iterfield=['in_file', 'in_file2'],
name='maskfunc3')
featpreproc.connect(smooth, 'outputnode.smoothed_files', maskfunc3, 'in_file')
featpreproc.connect(dilatemask, 'out_file', maskfunc3, 'in_file2')
concatnode = pe.Node(interface=util.Merge(2),
name='concat')
featpreproc.connect(maskfunc2,('out_file', tolist), concatnode, 'in1')
featpreproc.connect(maskfunc3,('out_file', tolist), concatnode, 'in2')
"""
The following nodes select smooth or unsmoothed data depending on the
fwhm. This is because SUSAN defaults to smoothing the data with about the
voxel size of the input data if the fwhm parameter is less than 1/3 of the
voxel size.
"""
selectnode = pe.Node(interface=util.Select(),name='select')
featpreproc.connect(concatnode, 'out', selectnode, 'inlist')
featpreproc.connect(inputnode, ('fwhm', chooseindex), selectnode, 'index')
featpreproc.connect(selectnode, 'out', outputnode, 'smoothed_files')
"""
Scale the median value of the run is set to 10000
"""
meanscale = pe.MapNode(interface=fsl.ImageMaths(suffix='_gms'),
iterfield=['in_file','op_string'],
name='meanscale')
featpreproc.connect(selectnode, 'out', meanscale, 'in_file')
"""
Define a function to get the scaling factor for intensity normalization
"""
featpreproc.connect(medianval, ('out_stat', getmeanscale), meanscale, 'op_string')
"""
Perform temporal highpass filtering on the data
"""
if highpass:
highpass = pe.MapNode(interface=fsl.ImageMaths(suffix='_tempfilt'),
iterfield=['in_file'],
name='highpass')
featpreproc.connect(inputnode, ('highpass', highpass_operand), highpass, 'op_string')
featpreproc.connect(meanscale, 'out_file', highpass, 'in_file')
featpreproc.connect(highpass, 'out_file', outputnode, 'highpassed_files')
"""
Generate a mean functional image from the first run
"""
meanfunc3 = pe.Node(interface=fsl.ImageMaths(op_string='-Tmean',
suffix='_mean'),
iterfield=['in_file'],
name='meanfunc3')
if highpass:
featpreproc.connect(highpass, ('out_file', pickfirst), meanfunc3, 'in_file')
else:
featpreproc.connect(meanscale, ('out_file', pickfirst), meanfunc3, 'in_file')
featpreproc.connect(meanfunc3, 'out_file', outputnode, 'mean')
return featpreproc
def create_susan_smooth(name="susan_smooth", separate_masks=True):
"""Create a SUSAN smoothing workflow
Parameters
----------
::
name : name of workflow (default: susan_smooth)
separate_masks : separate masks for each run
Inputs::
inputnode.in_files : functional runs (filename or list of filenames)
inputnode.fwhm : fwhm for smoothing with SUSAN
inputnode.mask_file : mask used for estimating SUSAN thresholds (but not for smoothing)
Outputs::
outputnode.smoothed_files : functional runs (filename or list of filenames)
Example
-------
>>> smooth = create_susan_smooth()
>>> smooth.inputs.inputnode.in_files = 'f3.nii'
>>> smooth.inputs.inputnode.fwhm = 5
>>> smooth.inputs.inputnode.mask_file = 'mask.nii'
>>> smooth.run() # doctest: +SKIP
"""
susan_smooth = pe.Workflow(name=name)
"""
Set up a node to define all inputs required for the preprocessing workflow
"""
inputnode = pe.Node(interface=util.IdentityInterface(fields=['in_files',
'fwhm',
'mask_file']),
name='inputnode')
"""
Smooth each run using SUSAN with the brightness threshold set to 75%
of the median value for each run and a mask consituting the mean
functional
"""
smooth = pe.MapNode(interface=fsl.SUSAN(),
iterfield=['in_file', 'brightness_threshold','usans'],
name='smooth')
"""
Determine the median value of the functional runs using the mask
"""
if separate_masks:
median = pe.MapNode(interface=fsl.ImageStats(op_string='-k %s -p 50'),
iterfield = ['in_file', 'mask_file'],
name='median')
else:
median = pe.MapNode(interface=fsl.ImageStats(op_string='-k %s -p 50'),
iterfield = ['in_file'],
name='median')
susan_smooth.connect(inputnode, 'in_files', median, 'in_file')
susan_smooth.connect(inputnode, 'mask_file', median, 'mask_file')
"""
Mask the motion corrected functional runs with the dilated mask
"""
if separate_masks:
mask = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask',
op_string='-mas'),
iterfield=['in_file', 'in_file2'],
name='mask')
else:
mask = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask',
op_string='-mas'),
iterfield=['in_file'],
name='mask')
susan_smooth.connect(inputnode, 'in_files', mask, 'in_file')
susan_smooth.connect(inputnode, 'mask_file', mask, 'in_file2')
"""
Determine the mean image from each functional run
"""
meanfunc = pe.MapNode(interface=fsl.ImageMaths(op_string='-Tmean',
suffix='_mean'),
iterfield=['in_file'],
name='meanfunc2')
susan_smooth.connect(mask, 'out_file', meanfunc, 'in_file')
"""
Merge the median values with the mean functional images into a coupled list
"""
merge = pe.Node(interface=util.Merge(2, axis='hstack'),
name='merge')
susan_smooth.connect(meanfunc,'out_file', merge, 'in1')
susan_smooth.connect(median,'out_stat', merge, 'in2')
"""
Define a function to get the brightness threshold for SUSAN
"""
susan_smooth.connect(inputnode, 'fwhm', smooth, 'fwhm')
susan_smooth.connect(inputnode, 'in_files', smooth, 'in_file')
susan_smooth.connect(median, ('out_stat', getbtthresh), smooth, 'brightness_threshold')
susan_smooth.connect(merge, ('out', getusans), smooth, 'usans')
outputnode = pe.Node(interface=util.IdentityInterface(fields=['smoothed_files']),
name='outputnode')
susan_smooth.connect(smooth, 'smoothed_file', outputnode, 'smoothed_files')
return susan_smooth
def create_fsl_fs_preproc(name='preproc', highpass=True, whichvol='middle'):
"""Create a FEAT preprocessing workflow together with freesurfer
Parameters
----------
::
name : name of workflow (default: preproc)
highpass : boolean (default: True)
whichvol : which volume of the first run to register to ('first', 'middle', 'mean')
Inputs::
inputspec.func : functional runs (filename or list of filenames)
inputspec.fwhm : fwhm for smoothing with SUSAN
inputspec.highpass : HWHM in TRs (if created with highpass=True)
inputspec.subject_id : freesurfer subject id
inputspec.subjects_dir : freesurfer subjects dir
Outputs::
outputspec.reference : volume to which runs are realigned
outputspec.motion_parameters : motion correction parameters
outputspec.realigned_files : motion corrected files
outputspec.motion_plots : plots of motion correction parameters
outputspec.mask_file : mask file used to mask the brain
outputspec.smoothed_files : smoothed functional data
outputspec.highpassed_files : highpassed functional data (if highpass=True)
outputspec.reg_file : bbregister registration files
outputspec.reg_cost : bbregister registration cost files
Example
-------
>>> preproc = create_fsl_fs_preproc(whichvol='first')
>>> preproc.inputs.inputspec.highpass = 128./(2*2.5)
>>> preproc.inputs.inputspec.func = ['f3.nii', 'f5.nii']
>>> preproc.inputs.inputspec.subjects_dir = '.'
>>> preproc.inputs.inputspec.subject_id = 's1'
>>> preproc.inputs.inputspec.fwhm = 6
>>> preproc.run() # doctest: +SKIP
"""
featpreproc = pe.Workflow(name=name)
"""
Set up a node to define all inputs required for the preprocessing workflow
"""
if highpass:
inputnode = pe.Node(interface=util.IdentityInterface(fields=['func',
'fwhm',
'subject_id',
'subjects_dir',
'highpass']),
name='inputspec')
outputnode = pe.Node(interface=util.IdentityInterface(fields=['reference',
'motion_parameters',
'realigned_files',
'motion_plots',
'mask_file',
'smoothed_files',
'highpassed_files',
'reg_file',
'reg_cost'
]),
name='outputspec')
else:
inputnode = pe.Node(interface=util.IdentityInterface(fields=['func',
'fwhm',
'subject_id',
'subjects_dir'
]),
name='inputspec')
outputnode = pe.Node(interface=util.IdentityInterface(fields=['reference',
'motion_parameters',
'realigned_files',
'motion_plots',
'mask_file',
'smoothed_files',
'reg_file',
'reg_cost'
]),
name='outputspec')
"""
Set up a node to define outputs for the preprocessing workflow
"""
"""
Convert functional images to float representation. Since there can
be more than one functional run we use a MapNode to convert each
run.
"""
img2float = pe.MapNode(interface=fsl.ImageMaths(out_data_type='float',
op_string = '',
suffix='_dtype'),
iterfield=['in_file'],
name='img2float')
featpreproc.connect(inputnode, 'func', img2float, 'in_file')
"""
Extract the first volume of the first run as the reference
"""
if whichvol != 'mean':
extract_ref = pe.Node(interface=fsl.ExtractROI(t_size=1),
iterfield=['in_file'],
name = 'extractref')
featpreproc.connect(img2float, ('out_file', pickfirst), extract_ref, 'in_file')
featpreproc.connect(img2float, ('out_file', pickvol, 0, whichvol), extract_ref, 't_min')
featpreproc.connect(extract_ref, 'roi_file', outputnode, 'reference')
"""
Realign the functional runs to the reference (1st volume of first run)
"""
motion_correct = pe.MapNode(interface=fsl.MCFLIRT(save_mats = True,
save_plots = True,
interpolation = 'sinc'),
name='realign',
iterfield = ['in_file'])
featpreproc.connect(img2float, 'out_file', motion_correct, 'in_file')
if whichvol != 'mean':
featpreproc.connect(extract_ref, 'roi_file', motion_correct, 'ref_file')
else:
motion_correct.inputs.mean_vol = True
featpreproc.connect(motion_correct, 'mean_img', outputnode, 'reference')
featpreproc.connect(motion_correct, 'par_file', outputnode, 'motion_parameters')
featpreproc.connect(motion_correct, 'out_file', outputnode, 'realigned_files')
"""
Plot the estimated motion parameters
"""
plot_motion = pe.MapNode(interface=fsl.PlotMotionParams(in_source='fsl'),
name='plot_motion',
iterfield=['in_file'])
plot_motion.iterables = ('plot_type', ['rotations', 'translations'])
featpreproc.connect(motion_correct, 'par_file', plot_motion, 'in_file')
featpreproc.connect(plot_motion, 'out_file', outputnode, 'motion_plots')
"""Get the mask from subject for each run
"""
maskflow = create_getmask_flow()
featpreproc.connect([(inputnode, maskflow, [('subject_id','inputspec.subject_id'),
('subjects_dir', 'inputspec.subjects_dir')])])
maskflow.inputs.inputspec.contrast_type = 't2'
if whichvol != 'mean':
featpreproc.connect(extract_ref, 'roi_file', maskflow, 'inputspec.source_file')
else:
featpreproc.connect(motion_correct, ('mean_img', pickfirst), maskflow, 'inputspec.source_file')
"""
Mask the functional runs with the extracted mask
"""
maskfunc = pe.MapNode(interface=fsl.ImageMaths(suffix='_bet',
op_string='-mas'),
iterfield=['in_file'],
name = 'maskfunc')
featpreproc.connect(motion_correct, 'out_file', maskfunc, 'in_file')
featpreproc.connect(maskflow, ('outputspec.mask_file', pickfirst), maskfunc, 'in_file2')
"""
Smooth each run using SUSAN with the brightness threshold set to 75%
of the median value for each run and a mask consituting the mean
functional
"""
smooth = create_susan_smooth(separate_masks=False)
featpreproc.connect(inputnode, 'fwhm', smooth, 'inputnode.fwhm')
featpreproc.connect(maskfunc, 'out_file', smooth, 'inputnode.in_files')
featpreproc.connect(maskflow, ('outputspec.mask_file', pickfirst), smooth, 'inputnode.mask_file')
"""
Mask the smoothed data with the dilated mask
"""
maskfunc3 = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask',
op_string='-mas'),
iterfield=['in_file'],
name='maskfunc3')
featpreproc.connect(smooth, 'outputnode.smoothed_files', maskfunc3, 'in_file')
featpreproc.connect(maskflow, ('outputspec.mask_file', pickfirst), maskfunc3, 'in_file2')
concatnode = pe.Node(interface=util.Merge(2),
name='concat')
featpreproc.connect(maskfunc, ('out_file', tolist), concatnode, 'in1')
featpreproc.connect(maskfunc3, ('out_file', tolist), concatnode, 'in2')
"""
The following nodes select smooth or unsmoothed data depending on the
fwhm. This is because SUSAN defaults to smoothing the data with about the
voxel size of the input data if the fwhm parameter is less than 1/3 of the
voxel size.
"""
selectnode = pe.Node(interface=util.Select(),name='select')
featpreproc.connect(concatnode, 'out', selectnode, 'inlist')
featpreproc.connect(inputnode, ('fwhm', chooseindex), selectnode, 'index')
featpreproc.connect(selectnode, 'out', outputnode, 'smoothed_files')
"""
Scale the median value of the run is set to 10000
"""
meanscale = pe.MapNode(interface=fsl.ImageMaths(suffix='_gms'),
iterfield=['in_file','op_string'],
name='meanscale')
featpreproc.connect(selectnode, 'out', meanscale, 'in_file')
"""
Determine the median value of the functional runs using the mask
"""
medianval = pe.MapNode(interface=fsl.ImageStats(op_string='-k %s -p 50'),
iterfield = ['in_file'],
name='medianval')
featpreproc.connect(motion_correct, 'out_file', medianval, 'in_file')
featpreproc.connect(maskflow, ('outputspec.mask_file', pickfirst), medianval, 'mask_file')
"""
Define a function to get the scaling factor for intensity normalization
"""
featpreproc.connect(medianval, ('out_stat', getmeanscale), meanscale, 'op_string')
"""
Perform temporal highpass filtering on the data
"""
if highpass:
highpass = pe.MapNode(interface=fsl.ImageMaths(suffix='_tempfilt'),
iterfield=['in_file'],
name='highpass')
featpreproc.connect(inputnode, ('highpass', highpass_operand), highpass, 'op_string')
featpreproc.connect(meanscale, 'out_file', highpass, 'in_file')
featpreproc.connect(highpass, 'out_file', outputnode, 'highpassed_files')
featpreproc.connect(maskflow, ('outputspec.mask_file', pickfirst), outputnode, 'mask_file')
featpreproc.connect(maskflow, 'outputspec.reg_file', outputnode, 'reg_file')
featpreproc.connect(maskflow, 'outputspec.reg_cost', outputnode, 'reg_cost')
return featpreproc
def create_reg_workflow(name='registration'):
"""Create a FEAT preprocessing workflow together with freesurfer
Parameters
----------
::
name : name of workflow (default: 'registration')
Inputs::
inputspec.source_files : files (filename or list of filenames to register)
inputspec.mean_image : reference image to use
inputspec.anatomical_image : anatomical image to coregister to
inputspec.target_image : registration target
Outputs::
outputspec.func2anat_transform : FLIRT transform
outputspec.anat2target_transform : FLIRT+FNIRT transform
outputspec.transformed_files : transformed files in target space
outputspec.transformed_mean : mean image in target space
Example
-------
"""
register = pe.Workflow(name=name)
inputnode = pe.Node(interface=util.IdentityInterface(fields=['source_files',
'mean_image',
'anatomical_image',
'target_image']),
name='inputspec')
outputnode = pe.Node(interface=util.IdentityInterface(fields=['func2anat_transform',
'anat2target_transform',
'transformed_files',
'transformed_mean',
]),
name='outputspec')
"""
Estimate the tissue classes from the anatomical image. But use spm's segment
as FSL appears to be breaking.
"""
stripper = pe.Node(fsl.BET(), name='stripper')
register.connect(inputnode, 'anatomical_image', stripper, 'in_file')
fast = pe.Node(fsl.FAST(), name='fast')
register.connect(stripper, 'out_file', fast, 'in_files')
"""
Binarize the segmentation
"""
binarize = pe.Node(fsl.ImageMaths(op_string='-nan -thr 0.5 -bin'),
name='binarize')
pickindex = lambda x, i: x[i]
register.connect(fast, ('partial_volume_files', pickindex, 2),
binarize, 'in_file')
"""
Calculate rigid transform from mean image to anatomical image
"""
mean2anat = pe.Node(fsl.FLIRT(), name='mean2anat')
mean2anat.inputs.dof = 6
register.connect(inputnode, 'mean_image', mean2anat, 'in_file')
register.connect(inputnode, 'anatomical_image', mean2anat, 'reference')
"""
Now use bbr cost function to improve the transform
"""
mean2anatbbr = pe.Node(fsl.FLIRT(), name='mean2anatbbr')
mean2anatbbr.inputs.dof = 6
mean2anatbbr.inputs.cost = 'bbr'
mean2anatbbr.inputs.schedule = os.path.join(os.getenv('FSLDIR'),
'etc/flirtsch/bbr.sch')
register.connect(inputnode, 'mean_image', mean2anatbbr, 'in_file')
register.connect(binarize, 'out_file', mean2anatbbr, 'wm_seg')
register.connect(inputnode, 'anatomical_image', mean2anatbbr, 'reference')
register.connect(mean2anat, 'out_matrix_file', mean2anatbbr, 'in_matrix_file')
"""
Calculate affine transform from anatomical to target
"""
anat2target_affine = pe.Node(fsl.FLIRT(), name='anat2target_linear')
register.connect(inputnode, 'anatomical_image', anat2target_affine, 'in_file')
register.connect(inputnode, 'target_image', anat2target_affine, 'reference')
"""
Calculate nonlinear transform from anatomical to target
"""
anat2target_nonlinear = pe.Node(fsl.FNIRT(), name='anat2target_nonlinear')
register.connect(anat2target_affine, 'out_matrix_file',
anat2target_nonlinear, 'affine_file')
anat2target_nonlinear.inputs.warp_resolution = (8, 8, 8)
register.connect(inputnode, 'anatomical_image', anat2target_nonlinear, 'in_file')
register.connect(inputnode, 'target_image',
anat2target_nonlinear, 'ref_file')
"""
Transform the mean image. First to anatomical and then to target
"""
warp2anat = pe.Node(fsl.ApplyWarp(interp='spline'), name='warp2anat')
register.connect(inputnode, 'mean_image', warp2anat, 'in_file')
register.connect(inputnode, 'anatomical_image', warp2anat, 'ref_file')
register.connect(mean2anatbbr, 'out_matrix_file', warp2anat, 'premat')
warpmean = warp2anat.clone(name='warpmean')
register.connect(warp2anat, 'out_file', warpmean, 'in_file')
register.connect(inputnode, 'target_image', warpmean, 'ref_file')
register.connect(anat2target_affine, 'out_matrix_file', warpmean, 'premat')
register.connect(anat2target_nonlinear, 'field_file',
warpmean, 'field_file')
"""
Transform the remaining images. First to anatomical and then to target
"""
warpall2anat = pe.MapNode(fsl.ApplyWarp(interp='spline'),
iterfield=['in_file'],
name='warpall2anat')
register.connect(inputnode, 'source_files', warpall2anat, 'in_file')
register.connect(inputnode, 'anatomical_image', warpall2anat, 'ref_file')
register.connect(mean2anatbbr, 'out_matrix_file', warpall2anat, 'premat')
warpall = warpall2anat.clone(name='warpall')
register.connect(warpall2anat, 'out_file', warpall, 'in_file')
register.connect(inputnode, 'target_image', warpall, 'ref_file')
register.connect(anat2target_affine, 'out_matrix_file', warpall, 'premat')
register.connect(anat2target_nonlinear, 'field_file',
warpall, 'field_file')
"""
Assign all the output files
"""
register.connect(warpmean, 'out_file', outputnode, 'transformed_mean')
register.connect(warpall, 'out_file', outputnode, 'transformed_files')
register.connect(mean2anatbbr, 'out_matrix_file',
outputnode, 'func2anat_transform')
register.connect(anat2target_nonlinear, 'field_file',
outputnode, 'anat2target_transform')
return register
|
[] |
[] |
[
"FSLDIR"
] |
[]
|
["FSLDIR"]
|
python
| 1 | 0 | |
spirl/configs/few_shot_imitation_learning/maze_right/hierarchical_cl_state_4M_B1024_only_demos/conf.py
|
import os
from spirl.models.closed_loop_spirl_mdl import ClSPiRLMdl
from spirl.models.skill_prior_mdl import SkillSpaceLogger
from spirl.utils.general_utils import AttrDict
from spirl.configs.default_data_configs.maze import data_spec
from spirl.components.evaluator import TopOfNSequenceEvaluator
from spirl.data.maze.src.maze_data_loader import MazeStateSequenceDataset
from spirl.components.fsil import FewshotDataset
NUM_IL_DEMO = 10
subseq_len = 10
fewshot_dataset = FewshotDataset(
'data/maze/right/demos.pkl',
num_demo=NUM_IL_DEMO,
subseq_len=subseq_len,
)
current_dir = os.path.dirname(os.path.realpath(__file__))
configuration = {
'model': ClSPiRLMdl,
'logger': SkillSpaceLogger,
'data_dir': '.',
'epoch_cycles_train': 1,
'evaluator': TopOfNSequenceEvaluator,
'top_of_n_eval': 100,
'top_comp_metric': 'mse',
'batch_size': 128,
'num_epochs': 100,
'lr': 1e-4,
'fewshot_data': fewshot_dataset,
'fewshot_batch_size': 128,
'finetune_vae': False,
'rst_data_path': 'data/maze/right/rsts.npy',
}
configuration = AttrDict(configuration)
model_config = AttrDict(
state_dim=data_spec.state_dim,
action_dim=data_spec.n_actions,
n_rollout_steps=subseq_len,
kl_div_weight=1e-2,
nz_enc=32,
nz_mid=32,
n_processing_layers=3,
cond_decode=True,
# checkpt_path=f'{os.environ["EXP_DIR"]}/skill_prior_learning/maze_right/hierarchical_cl_state_4M_B1024'
)
# Dataset
data_config = AttrDict()
data_config.dataset_spec = data_spec
data_config['dataset_spec']['dataset_class'] = MazeStateSequenceDataset
data_config['dataset_spec']['env_name'] = 'maze2d-large-v1'
data_config['dataset_spec']['dataset_path'] = './data/maze/right/blocked-4M.hdf5'
data_config.dataset_spec.subseq_len = model_config.n_rollout_steps + 1
|
[] |
[] |
[
"EXP_DIR"
] |
[]
|
["EXP_DIR"]
|
python
| 1 | 0 | |
backend/src/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "geo2.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
main_test.go
|
// Copyright © 2019 Banzai Cloud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package objectmatch
import (
"flag"
"fmt"
"log"
"os"
"reflect"
"testing"
"github.com/banzaicloud/k8s-objectmatcher/patch"
"github.com/goph/emperror"
"github.com/pkg/errors"
admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/autoscaling/v2beta1"
v1 "k8s.io/api/core/v1"
policyv1beta1 "k8s.io/api/policy/v1beta1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
apiextension "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog"
)
var (
integration = flag.Bool("integration", false, "run integration tests")
kubeconfig = flag.String("kubeconfig", "", "kubernetes config to use for tests")
kubecontext = flag.String("kubecontext", "", "kubernetes context to use in tests")
keepnamespace = flag.Bool("keepnamespace", false, "keep the kubernetes namespace that was used for the tests")
failonerror = flag.Bool("failonerror", false, "fail on error to be able to debug invalid state")
testContext = &IntegrationTestContext{}
)
func TestMain(m *testing.M) {
flag.Parse()
if testing.Verbose() {
klogFlags := flag.NewFlagSet("klog", flag.ExitOnError)
klog.InitFlags(klogFlags)
err := klogFlags.Set("v", "3")
if err != nil {
fmt.Printf("Failed to set log level, moving on")
}
}
if *integration {
err := testContext.Setup()
if err != nil {
panic("Failed to setup test namespace: " + err.Error())
}
}
result := m.Run()
if *integration {
if !*keepnamespace {
err := testContext.DeleteNamespace()
if err != nil {
panic("Failed to delete test namespace")
}
}
}
os.Exit(result)
}
type IntegrationTestContext struct {
Client kubernetes.Interface
DynamicClient dynamic.Interface
ExtensionsClient apiextension.Interface
Namespace string
}
func (ctx *IntegrationTestContext) CreateNamespace() error {
ns := &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "integration-",
},
}
namespace, err := ctx.Client.CoreV1().Namespaces().Create(ns)
if err != nil {
return err
}
ctx.Namespace = namespace.Name
return nil
}
func (ctx *IntegrationTestContext) Setup() error {
kubeconfigOverride := os.Getenv("KUBECONFIG")
if *kubeconfig != "" {
kubeconfigOverride = *kubeconfig
}
config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfigOverride},
&clientcmd.ConfigOverrides{CurrentContext: *kubecontext},
).ClientConfig()
if err != nil {
return err
}
ctx.Client, err = kubernetes.NewForConfig(config)
if err != nil {
return emperror.Wrap(err, "Failed to create kubernetes client")
}
ctx.DynamicClient, err = dynamic.NewForConfig(config)
if err != nil {
return emperror.Wrap(err, "Failed to create dynamic client")
}
ctx.ExtensionsClient, err = apiextension.NewForConfig(config)
if err != nil {
return emperror.Wrap(err, "Failed to create apiextensions client")
}
err = testContext.CreateNamespace()
if err != nil {
return emperror.Wrap(err, "Failed to create test namespace")
}
return err
}
func (ctx *IntegrationTestContext) DeleteNamespace() error {
err := ctx.Client.CoreV1().Namespaces().Delete(ctx.Namespace, &metav1.DeleteOptions{
GracePeriodSeconds: new(int64),
})
return err
}
type TestItem struct {
name string
object metav1.Object
shouldMatch bool
gvr *schema.GroupVersionResource
remoteChange func(interface{})
localChange func(interface{})
ignoreVersions []string
}
func NewTestMatch(name string, object metav1.Object) *TestItem {
return &TestItem{
name: name,
object: object,
shouldMatch: true,
}
}
func NewTestDiff(name string, object metav1.Object) *TestItem {
return &TestItem{
name: name,
object: object,
shouldMatch: false,
}
}
func (t *TestItem) withRemoteChange(extender func(interface{})) *TestItem {
t.remoteChange = extender
return t
}
func (t *TestItem) withLocalChange(extender func(interface{})) *TestItem {
t.localChange = extender
return t
}
func (t *TestItem) withGroupVersionResource(gvr *schema.GroupVersionResource) *TestItem {
t.gvr = gvr
return t
}
func (t *TestItem) withIgnoreVersions(v []string) *TestItem {
t.ignoreVersions = v
return t
}
func testMatchOnObject(testItem *TestItem) error {
var existing metav1.Object
var err error
opts := []patch.CalculateOption{
patch.IgnoreStatusFields(),
}
newObject := testItem.object
err = patch.DefaultAnnotator.SetLastAppliedAnnotation(newObject.(runtime.Object))
if err != nil {
return err
}
deleteOptions := &metav1.DeleteOptions{
GracePeriodSeconds: new(int64),
}
switch newObject.(type) {
default:
return emperror.With(errors.New("Unsupported type"), "type", reflect.TypeOf(newObject), "object", newObject)
case *rbacv1.ClusterRole:
existing, err = testContext.Client.RbacV1().ClusterRoles().Create(newObject.(*rbacv1.ClusterRole))
if err != nil {
return emperror.WrapWith(err, "failed to create object", "object", newObject)
}
defer func() {
err = testContext.Client.RbacV1().ClusterRoles().Delete(existing.GetName(), deleteOptions)
if err != nil {
log.Printf("Failed to remove object %s", existing.GetName())
}
}()
case *rbacv1.Role:
existing, err = testContext.Client.RbacV1().Roles(newObject.GetNamespace()).Create(newObject.(*rbacv1.Role))
if err != nil {
return emperror.WrapWith(err, "failed to create object", "object", newObject)
}
defer func() {
err = testContext.Client.RbacV1().Roles(newObject.GetNamespace()).Delete(existing.GetName(), deleteOptions)
if err != nil {
log.Printf("Failed to remove object %s", existing.GetName())
}
}()
case *rbacv1.ClusterRoleBinding:
existing, err = testContext.Client.RbacV1().ClusterRoleBindings().Create(newObject.(*rbacv1.ClusterRoleBinding))
if err != nil {
return emperror.WrapWith(err, "failed to create object", "object", newObject)
}
defer func() {
err = testContext.Client.RbacV1().ClusterRoleBindings().Delete(existing.GetName(), deleteOptions)
if err != nil {
log.Printf("Failed to remove object %s", existing.GetName())
}
}()
case *rbacv1.RoleBinding:
existing, err = testContext.Client.RbacV1().RoleBindings(newObject.GetNamespace()).Create(newObject.(*rbacv1.RoleBinding))
if err != nil {
return emperror.WrapWith(err, "failed to create object", "object", newObject)
}
defer func() {
err = testContext.Client.RbacV1().RoleBindings(newObject.GetNamespace()).Delete(existing.GetName(), deleteOptions)
if err != nil {
log.Printf("Failed to remove object %s", existing.GetName())
}
}()
case *v1.Pod:
existing, err = testContext.Client.CoreV1().Pods(newObject.GetNamespace()).Create(newObject.(*v1.Pod))
if err != nil {
return emperror.WrapWith(err, "failed to create object", "object", newObject)
}
defer func() {
err = testContext.Client.CoreV1().Pods(newObject.GetNamespace()).Delete(existing.GetName(), deleteOptions)
if err != nil {
log.Printf("Failed to remove object %s", existing.GetName())
}
}()
case *v1.Service:
existing, err = testContext.Client.CoreV1().Services(newObject.GetNamespace()).Create(newObject.(*v1.Service))
if err != nil {
return emperror.WrapWith(err, "failed to create object", "object", newObject)
}
defer func() {
err = testContext.Client.CoreV1().Services(newObject.GetNamespace()).Delete(existing.GetName(), deleteOptions)
if err != nil {
log.Printf("Failed to remove object %s", existing.GetName())
}
}()
case *v1.ConfigMap:
existing, err = testContext.Client.CoreV1().ConfigMaps(newObject.GetNamespace()).Create(newObject.(*v1.ConfigMap))
if err != nil {
return emperror.WrapWith(err, "failed to create object", "object", newObject)
}
defer func() {
err = testContext.Client.CoreV1().ConfigMaps(newObject.GetNamespace()).Delete(existing.GetName(), deleteOptions)
if err != nil {
log.Printf("Failed to remove object %s", existing.GetName())
}
}()
case *v1.Secret:
existing, err = testContext.Client.CoreV1().Secrets(newObject.GetNamespace()).Create(newObject.(*v1.Secret))
if err != nil {
return emperror.WrapWith(err, "failed to create object", "object", newObject)
}
defer func() {
err = testContext.Client.CoreV1().Secrets(newObject.GetNamespace()).Delete(existing.GetName(), deleteOptions)
if err != nil {
log.Printf("Failed to remove object %s", existing.GetName())
}
}()
case *v1beta1.CustomResourceDefinition:
existing, err = testContext.ExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(newObject.(*v1beta1.CustomResourceDefinition))
if err != nil {
return emperror.WrapWith(err, "failed to create object", "object", newObject)
}
defer func() {
err = testContext.ExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Delete(existing.GetName(), deleteOptions)
if err != nil {
log.Printf("Failed to remove object %s", existing.GetName())
}
}()
case *appsv1.DaemonSet:
existing, err = testContext.Client.AppsV1().DaemonSets(newObject.GetNamespace()).Create(newObject.(*appsv1.DaemonSet))
if err != nil {
return emperror.WrapWith(err, "failed to create object", "object", newObject)
}
defer func() {
err = testContext.Client.AppsV1().DaemonSets(newObject.GetNamespace()).Delete(existing.GetName(), deleteOptions)
if err != nil {
log.Printf("Failed to remove object %s", existing.GetName())
}
}()
case *appsv1.Deployment:
existing, err = testContext.Client.AppsV1().Deployments(newObject.GetNamespace()).Create(newObject.(*appsv1.Deployment))
if err != nil {
return emperror.WrapWith(err, "failed to create object", "object", newObject)
}
defer func() {
err = testContext.Client.AppsV1().Deployments(newObject.GetNamespace()).Delete(existing.GetName(), deleteOptions)
if err != nil {
log.Printf("Failed to remove object %s", existing.GetName())
}
}()
case *v2beta1.HorizontalPodAutoscaler:
existing, err = testContext.Client.AutoscalingV2beta1().HorizontalPodAutoscalers(newObject.GetNamespace()).Create(newObject.(*v2beta1.HorizontalPodAutoscaler))
if err != nil {
return emperror.WrapWith(err, "failed to create object", "object", newObject)
}
defer func() {
err = testContext.Client.AutoscalingV2beta1().HorizontalPodAutoscalers(newObject.GetNamespace()).Delete(existing.GetName(), deleteOptions)
if err != nil {
log.Printf("Failed to remove object %s", existing.GetName())
}
}()
case *admissionregistrationv1beta1.MutatingWebhookConfiguration:
existing, err = testContext.Client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(newObject.(*admissionregistrationv1beta1.MutatingWebhookConfiguration))
if err != nil {
return emperror.WrapWith(err, "failed to create object", "object", newObject)
}
defer func() {
err = testContext.Client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(existing.GetName(), deleteOptions)
if err != nil {
log.Printf("Failed to remove object %s", existing.GetName())
}
}()
case *policyv1beta1.PodDisruptionBudget:
existing, err = testContext.Client.PolicyV1beta1().PodDisruptionBudgets(newObject.GetNamespace()).Create(newObject.(*policyv1beta1.PodDisruptionBudget))
if err != nil {
return emperror.WrapWith(err, "failed to create object", "object", newObject)
}
defer func() {
err = testContext.Client.PolicyV1beta1().PodDisruptionBudgets(newObject.GetNamespace()).Delete(existing.GetName(), deleteOptions)
if err != nil {
log.Printf("Failed to remove object %s", existing.GetName())
}
}()
case *v1.PersistentVolumeClaim:
existing, err = testContext.Client.CoreV1().PersistentVolumeClaims(newObject.GetNamespace()).Create(newObject.(*v1.PersistentVolumeClaim))
if err != nil {
return emperror.WrapWith(err, "failed to create object", "object", newObject)
}
defer func() {
err = testContext.Client.CoreV1().PersistentVolumeClaims(newObject.GetNamespace()).Delete(existing.GetName(), deleteOptions)
if err != nil {
log.Printf("Failed to remove object %s", existing.GetName())
}
}()
case *v1.ServiceAccount:
existing, err = testContext.Client.CoreV1().ServiceAccounts(newObject.GetNamespace()).Create(newObject.(*v1.ServiceAccount))
if err != nil {
return emperror.WrapWith(err, "failed to create object", "object", newObject)
}
defer func() {
err = testContext.Client.CoreV1().ServiceAccounts(newObject.GetNamespace()).Delete(existing.GetName(), deleteOptions)
if err != nil {
log.Printf("Failed to remove object %s", existing.GetName())
}
}()
case *unstructured.Unstructured:
existing, err = testContext.DynamicClient.Resource(*testItem.gvr).Namespace(testContext.Namespace).
Create(newObject.(*unstructured.Unstructured), metav1.CreateOptions{})
if err != nil {
return emperror.WrapWith(err, "failed to create object", "object", newObject)
}
defer func() {
err = testContext.DynamicClient.Resource(*testItem.gvr).Delete(existing.GetName(), deleteOptions)
if err != nil {
log.Printf("Failed to remove object %s", existing.GetName())
}
}()
case *v1.Node:
existing, err = testContext.Client.CoreV1().Nodes().Create(newObject.(*v1.Node))
if err != nil {
return emperror.WrapWith(err, "failed to create object", "object", newObject)
}
defer func() {
err = testContext.Client.CoreV1().Nodes().Delete(existing.GetName(), deleteOptions)
if err != nil {
log.Printf("Failed to remove object %s", existing.GetName())
}
}()
case *appsv1.StatefulSet:
opts = append(opts, patch.IgnoreVolumeClaimTemplateTypeMetaAndStatus())
existing, err = testContext.Client.AppsV1().StatefulSets(newObject.GetNamespace()).Create(newObject.(*appsv1.StatefulSet))
if err != nil {
return emperror.WrapWith(err, "failed to create object", "object", newObject)
}
defer func() {
err = testContext.Client.AppsV1().StatefulSets(newObject.GetNamespace()).Delete(existing.GetName(), deleteOptions)
if err != nil {
log.Printf("Failed to remove object %s", existing.GetName())
}
}()
}
if testItem.remoteChange != nil {
testItem.remoteChange(existing)
}
if testItem.localChange != nil {
testItem.localChange(newObject)
}
patchResult, err := patch.DefaultPatchMaker.Calculate(existing.(runtime.Object), newObject.(runtime.Object), opts...)
if err != nil {
return err
}
matched := patchResult.IsEmpty()
if testItem.shouldMatch && !matched {
return emperror.With(errors.New("Objects did not match"), "patch", patchResult)
}
if !testItem.shouldMatch && matched {
return emperror.With(errors.New("Objects matched although they should not"), "patch", patchResult)
}
return nil
}
func standardObjectMeta() metav1.ObjectMeta {
return metav1.ObjectMeta{
GenerateName: "test-",
Namespace: testContext.Namespace,
}
}
func metaWithLabels(labels map[string]string) metav1.ObjectMeta {
meta := standardObjectMeta()
meta.Labels = labels
return meta
}
|
[
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
pkg/util/k8sutil/k8sutil.go
|
// Copyright 2016 The etcd-operator Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package k8sutil
import (
"encoding/json"
"fmt"
"net"
"net/url"
"os"
"strconv"
"strings"
"time"
api "github.com/coreos/etcd-operator/pkg/apis/etcd/v1beta2"
"github.com/coreos/etcd-operator/pkg/util/etcdutil"
"github.com/coreos/etcd-operator/pkg/util/retryutil"
"github.com/pborman/uuid"
appsv1beta1 "k8s.io/api/apps/v1beta1"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
utilrand "k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" // for gcp auth
"k8s.io/client-go/rest"
)
const (
// EtcdClientPort is the client port on client service and etcd nodes.
EtcdClientPort = 2379
etcdVolumeMountDir = "/var/etcd"
dataDir = etcdVolumeMountDir + "/data"
backupFile = "/var/etcd/latest.backup"
etcdVersionAnnotationKey = "etcd.version"
peerTLSDir = "/etc/etcdtls/member/peer-tls"
peerTLSVolume = "member-peer-tls"
serverTLSDir = "/etc/etcdtls/member/server-tls"
serverTLSVolume = "member-server-tls"
operatorEtcdTLSDir = "/etc/etcdtls/operator/etcd-tls"
operatorEtcdTLSVolume = "etcd-client-tls"
randomSuffixLength = 10
// k8s object name has a maximum length
MaxNameLength = 63 - randomSuffixLength - 1
defaultBusyboxImage = "busybox:1.28.0-glibc"
// AnnotationScope annotation name for defining instance scope. Used for specifying cluster wide clusters.
AnnotationScope = "etcd.database.coreos.com/scope"
//AnnotationClusterWide annotation value for cluster wide clusters.
AnnotationClusterWide = "clusterwide"
// defaultDNSTimeout is the default maximum allowed time for the init container of the etcd pod
// to reverse DNS lookup its IP. The default behavior is to wait forever and has a value of 0.
defaultDNSTimeout = int64(0)
)
const TolerateUnreadyEndpointsAnnotation = "service.alpha.kubernetes.io/tolerate-unready-endpoints"
func GetEtcdVersion(pod *v1.Pod) string {
return pod.Annotations[etcdVersionAnnotationKey]
}
func SetEtcdVersion(pod *v1.Pod, version string) {
pod.Annotations[etcdVersionAnnotationKey] = version
}
func GetPodNames(pods []*v1.Pod) []string {
if len(pods) == 0 {
return nil
}
res := []string{}
for _, p := range pods {
res = append(res, p.Name)
}
return res
}
// PVCNameFromMember the way we get PVC name from the member name
func PVCNameFromMember(memberName string) string {
return memberName
}
func makeRestoreInitContainers(backupURL *url.URL, token, repo, version string, m *etcdutil.Member) []v1.Container {
return []v1.Container{
{
Name: "fetch-backup",
Image: "tutum/curl",
Command: []string{
"/bin/bash", "-ec",
fmt.Sprintf(`
httpcode=$(curl --write-out %%\{http_code\} --silent --output %[1]s %[2]s)
if [[ "$httpcode" != "200" ]]; then
echo "http status code: ${httpcode}" >> /dev/termination-log
cat %[1]s >> /dev/termination-log
exit 1
fi
`, backupFile, backupURL.String()),
},
VolumeMounts: etcdVolumeMounts(),
},
{
Name: "restore-datadir",
Image: ImageName(repo, version),
Command: []string{
"/bin/sh", "-ec",
fmt.Sprintf("ETCDCTL_API=3 etcdctl snapshot restore %[1]s"+
" --name %[2]s"+
" --initial-cluster %[2]s=%[3]s"+
" --initial-cluster-token %[4]s"+
" --initial-advertise-peer-urls %[3]s"+
" --data-dir %[5]s 2>/dev/termination-log", backupFile, m.Name, m.PeerURL(), token, dataDir),
},
VolumeMounts: etcdVolumeMounts(),
},
}
}
func ImageName(repo, version string) string {
return fmt.Sprintf("%s:v%v", repo, version)
}
// imageNameBusybox returns the default image for busybox init container, or the image specified in the PodPolicy
func imageNameBusybox(policy *api.PodPolicy) string {
if policy != nil && len(policy.BusyboxImage) > 0 {
return policy.BusyboxImage
}
return defaultBusyboxImage
}
func PodWithNodeSelector(p *v1.Pod, ns map[string]string) *v1.Pod {
p.Spec.NodeSelector = ns
return p
}
func CreateClientService(kubecli kubernetes.Interface, clusterName, ns string, owner metav1.OwnerReference) error {
ports := []v1.ServicePort{{
Name: "client",
Port: EtcdClientPort,
TargetPort: intstr.FromInt(EtcdClientPort),
Protocol: v1.ProtocolTCP,
}}
return createService(kubecli, ClientServiceName(clusterName), clusterName, ns, "", ports, owner, false)
}
func ClientServiceName(clusterName string) string {
return clusterName + "-client"
}
func CreatePeerService(kubecli kubernetes.Interface, clusterName, ns string, owner metav1.OwnerReference) error {
ports := []v1.ServicePort{{
Name: "client",
Port: EtcdClientPort,
TargetPort: intstr.FromInt(EtcdClientPort),
Protocol: v1.ProtocolTCP,
}, {
Name: "peer",
Port: 2380,
TargetPort: intstr.FromInt(2380),
Protocol: v1.ProtocolTCP,
}}
return createService(kubecli, clusterName, clusterName, ns, v1.ClusterIPNone, ports, owner, true)
}
func createService(kubecli kubernetes.Interface, svcName, clusterName, ns, clusterIP string, ports []v1.ServicePort, owner metav1.OwnerReference, publishNotReadyAddresses bool) error {
svc := newEtcdServiceManifest(svcName, clusterName, clusterIP, ports, publishNotReadyAddresses)
addOwnerRefToObject(svc.GetObjectMeta(), owner)
_, err := kubecli.CoreV1().Services(ns).Create(svc)
if err != nil && !apierrors.IsAlreadyExists(err) {
return err
}
return nil
}
// CreateAndWaitPod creates a pod and waits until it is running
func CreateAndWaitPod(kubecli kubernetes.Interface, ns string, pod *v1.Pod, timeout time.Duration) (*v1.Pod, error) {
_, err := kubecli.CoreV1().Pods(ns).Create(pod)
if err != nil {
return nil, err
}
interval := 5 * time.Second
var retPod *v1.Pod
err = retryutil.Retry(interval, int(timeout/(interval)), func() (bool, error) {
retPod, err = kubecli.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
switch retPod.Status.Phase {
case v1.PodRunning:
return true, nil
case v1.PodPending:
return false, nil
default:
return false, fmt.Errorf("unexpected pod status.phase: %v", retPod.Status.Phase)
}
})
if err != nil {
if retryutil.IsRetryFailure(err) {
return nil, fmt.Errorf("failed to wait pod running, it is still pending: %v", err)
}
return nil, fmt.Errorf("failed to wait pod running: %v", err)
}
return retPod, nil
}
func newEtcdServiceManifest(svcName, clusterName, clusterIP string, ports []v1.ServicePort, publishNotReadyAddresses bool) *v1.Service {
labels := LabelsForCluster(clusterName)
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: svcName,
Labels: labels,
Annotations: map[string]string{
TolerateUnreadyEndpointsAnnotation: strconv.FormatBool(publishNotReadyAddresses),
},
},
Spec: v1.ServiceSpec{
Ports: ports,
Selector: labels,
ClusterIP: clusterIP,
// PublishNotReadyAddresses: publishNotReadyAddresses, // TODO(ckoehn): Activate once TolerateUnreadyEndpointsAnnotation is deprecated.
},
}
return svc
}
// AddEtcdVolumeToPod abstract the process of appending volume spec to pod spec
func AddEtcdVolumeToPod(pod *v1.Pod, pvc *v1.PersistentVolumeClaim) {
vol := v1.Volume{Name: etcdVolumeName}
if pvc != nil {
vol.VolumeSource = v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvc.Name},
}
} else {
vol.VolumeSource = v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}
}
pod.Spec.Volumes = append(pod.Spec.Volumes, vol)
}
func addRecoveryToPod(pod *v1.Pod, token string, m *etcdutil.Member, cs api.ClusterSpec, backupURL *url.URL) {
pod.Spec.InitContainers = append(pod.Spec.InitContainers,
makeRestoreInitContainers(backupURL, token, cs.Repository, cs.Version, m)...)
}
func addOwnerRefToObject(o metav1.Object, r metav1.OwnerReference) {
o.SetOwnerReferences(append(o.GetOwnerReferences(), r))
}
// NewSeedMemberPod returns a Pod manifest for a seed member.
// It's special that it has new token, and might need recovery init containers
func NewSeedMemberPod(clusterName string, ms etcdutil.MemberSet, m *etcdutil.Member, cs api.ClusterSpec, owner metav1.OwnerReference, backupURL *url.URL) *v1.Pod {
token := uuid.New()
pod := newEtcdPod(m, ms.PeerURLPairs(), clusterName, "new", token, cs)
// TODO: PVC datadir support for restore process
AddEtcdVolumeToPod(pod, nil)
if backupURL != nil {
addRecoveryToPod(pod, token, m, cs, backupURL)
}
applyPodPolicy(clusterName, pod, cs.Pod)
addOwnerRefToObject(pod.GetObjectMeta(), owner)
return pod
}
// NewEtcdPodPVC create PVC object from etcd pod's PVC spec
func NewEtcdPodPVC(m *etcdutil.Member, pvcSpec v1.PersistentVolumeClaimSpec, clusterName, namespace string, owner metav1.OwnerReference) *v1.PersistentVolumeClaim {
pvc := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: PVCNameFromMember(m.Name),
Namespace: namespace,
Labels: LabelsForCluster(clusterName),
},
Spec: pvcSpec,
}
addOwnerRefToObject(pvc.GetObjectMeta(), owner)
return pvc
}
func newEtcdPod(m *etcdutil.Member, initialCluster []string, clusterName, state, token string, cs api.ClusterSpec) *v1.Pod {
commands := fmt.Sprintf("/usr/local/bin/etcd --data-dir=%s --name=%s --initial-advertise-peer-urls=%s "+
"--listen-peer-urls=%s --listen-client-urls=%s --advertise-client-urls=%s "+
"--initial-cluster=%s --initial-cluster-state=%s",
dataDir, m.Name, m.PeerURL(), m.ListenPeerURL(), m.ListenClientURL(), m.ClientURL(), strings.Join(initialCluster, ","), state)
if m.SecurePeer {
commands += fmt.Sprintf(" --peer-client-cert-auth=true --peer-trusted-ca-file=%[1]s/peer-ca.crt --peer-cert-file=%[1]s/peer.crt --peer-key-file=%[1]s/peer.key", peerTLSDir)
}
if m.SecureClient {
commands += fmt.Sprintf(" --client-cert-auth=true --trusted-ca-file=%[1]s/server-ca.crt --cert-file=%[1]s/server.crt --key-file=%[1]s/server.key", serverTLSDir)
}
if state == "new" {
commands = fmt.Sprintf("%s --initial-cluster-token=%s", commands, token)
}
labels := map[string]string{
"app": "etcd",
"etcd_node": m.Name,
"etcd_cluster": clusterName,
}
livenessProbe := newEtcdProbe(cs.TLS.IsSecureClient())
readinessProbe := newEtcdProbe(cs.TLS.IsSecureClient())
readinessProbe.InitialDelaySeconds = 1
readinessProbe.TimeoutSeconds = 5
readinessProbe.PeriodSeconds = 5
readinessProbe.FailureThreshold = 3
container := containerWithProbes(
etcdContainer(strings.Split(commands, " "), cs.Repository, cs.Version),
livenessProbe,
readinessProbe)
volumes := []v1.Volume{}
if m.SecurePeer {
container.VolumeMounts = append(container.VolumeMounts, v1.VolumeMount{
MountPath: peerTLSDir,
Name: peerTLSVolume,
})
volumes = append(volumes, v1.Volume{Name: peerTLSVolume, VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{SecretName: cs.TLS.Static.Member.PeerSecret},
}})
}
if m.SecureClient {
container.VolumeMounts = append(container.VolumeMounts, v1.VolumeMount{
MountPath: serverTLSDir,
Name: serverTLSVolume,
}, v1.VolumeMount{
MountPath: operatorEtcdTLSDir,
Name: operatorEtcdTLSVolume,
})
volumes = append(volumes, v1.Volume{Name: serverTLSVolume, VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{SecretName: cs.TLS.Static.Member.ServerSecret},
}}, v1.Volume{Name: operatorEtcdTLSVolume, VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{SecretName: cs.TLS.Static.OperatorSecret},
}})
}
DNSTimeout := defaultDNSTimeout
if cs.Pod != nil {
DNSTimeout = cs.Pod.DNSTimeoutInSecond
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name,
Labels: labels,
Annotations: map[string]string{},
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{{
// busybox:latest uses uclibc which contains a bug that sometimes prevents name resolution
// More info: https://github.com/docker-library/busybox/issues/27
//Image default: "busybox:1.28.0-glibc",
Image: imageNameBusybox(cs.Pod),
Name: "check-dns",
// In etcd 3.2, TLS listener will do a reverse-DNS lookup for pod IP -> hostname.
// If DNS entry is not warmed up, it will return empty result and peer connection will be rejected.
// In some cases the DNS is not created correctly so we need to time out after a given period.
Command: []string{"/bin/sh", "-c", fmt.Sprintf(`
TIMEOUT_READY=%d
while ( ! nslookup %s )
do
# If TIMEOUT_READY is 0 we should never time out and exit
TIMEOUT_READY=$(( TIMEOUT_READY-1 ))
if [ $TIMEOUT_READY -eq 0 ];
then
echo "Timed out waiting for DNS entry"
exit 1
fi
sleep 1
done`, DNSTimeout, m.Addr())},
}},
Containers: []v1.Container{container},
RestartPolicy: v1.RestartPolicyNever,
Volumes: volumes,
// DNS A record: `[m.Name].[clusterName].Namespace.svc`
// For example, etcd-795649v9kq in default namesapce will have DNS name
// `etcd-795649v9kq.etcd.default.svc`.
Hostname: m.Name,
Subdomain: clusterName,
AutomountServiceAccountToken: func(b bool) *bool { return &b }(false),
SecurityContext: podSecurityContext(cs.Pod),
},
}
if cs.Pod.DNSPolicy != "" {
pod.Spec.DNSPolicy = cs.Pod.DNSPolicy
}
if cs.Pod.HostNetwork {
pod.Spec.HostNetwork = true
}
SetEtcdVersion(pod, cs.Version)
return pod
}
func podSecurityContext(podPolicy *api.PodPolicy) *v1.PodSecurityContext {
if podPolicy == nil {
return nil
}
return podPolicy.SecurityContext
}
func NewEtcdPod(m *etcdutil.Member, initialCluster []string, clusterName, state, token string, cs api.ClusterSpec, owner metav1.OwnerReference) *v1.Pod {
pod := newEtcdPod(m, initialCluster, clusterName, state, token, cs)
applyPodPolicy(clusterName, pod, cs.Pod)
addOwnerRefToObject(pod.GetObjectMeta(), owner)
return pod
}
func MustNewKubeClient() kubernetes.Interface {
cfg, err := InClusterConfig()
if err != nil {
panic(err)
}
return kubernetes.NewForConfigOrDie(cfg)
}
func InClusterConfig() (*rest.Config, error) {
// Work around https://github.com/kubernetes/kubernetes/issues/40973
// See https://github.com/coreos/etcd-operator/issues/731#issuecomment-283804819
if len(os.Getenv("KUBERNETES_SERVICE_HOST")) == 0 {
addrs, err := net.LookupHost("kubernetes.default.svc")
if err != nil {
panic(err)
}
os.Setenv("KUBERNETES_SERVICE_HOST", addrs[0])
}
if len(os.Getenv("KUBERNETES_SERVICE_PORT")) == 0 {
os.Setenv("KUBERNETES_SERVICE_PORT", "443")
}
cfg, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
return cfg, nil
}
func IsKubernetesResourceAlreadyExistError(err error) bool {
return apierrors.IsAlreadyExists(err)
}
func IsKubernetesResourceNotFoundError(err error) bool {
return apierrors.IsNotFound(err)
}
// We are using internal api types for cluster related.
func ClusterListOpt(clusterName string) metav1.ListOptions {
return metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(LabelsForCluster(clusterName)).String(),
}
}
func LabelsForCluster(clusterName string) map[string]string {
return map[string]string{
"etcd_cluster": clusterName,
"app": "etcd",
}
}
func CreatePatch(o, n, datastruct interface{}) ([]byte, error) {
oldData, err := json.Marshal(o)
if err != nil {
return nil, err
}
newData, err := json.Marshal(n)
if err != nil {
return nil, err
}
return strategicpatch.CreateTwoWayMergePatch(oldData, newData, datastruct)
}
func PatchDeployment(kubecli kubernetes.Interface, namespace, name string, updateFunc func(*appsv1beta1.Deployment)) error {
od, err := kubecli.AppsV1beta1().Deployments(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return err
}
nd := od.DeepCopy()
updateFunc(nd)
patchData, err := CreatePatch(od, nd, appsv1beta1.Deployment{})
if err != nil {
return err
}
_, err = kubecli.AppsV1beta1().Deployments(namespace).Patch(name, types.StrategicMergePatchType, patchData)
return err
}
func CascadeDeleteOptions(gracePeriodSeconds int64) *metav1.DeleteOptions {
return &metav1.DeleteOptions{
GracePeriodSeconds: func(t int64) *int64 { return &t }(gracePeriodSeconds),
PropagationPolicy: func() *metav1.DeletionPropagation {
foreground := metav1.DeletePropagationForeground
return &foreground
}(),
}
}
// mergeLabels merges l2 into l1. Conflicting label will be skipped.
func mergeLabels(l1, l2 map[string]string) {
for k, v := range l2 {
if _, ok := l1[k]; ok {
continue
}
l1[k] = v
}
}
func UniqueMemberName(clusterName string) string {
suffix := utilrand.String(randomSuffixLength)
if len(clusterName) > MaxNameLength {
clusterName = clusterName[:MaxNameLength]
}
return clusterName + "-" + suffix
}
|
[
"\"KUBERNETES_SERVICE_HOST\"",
"\"KUBERNETES_SERVICE_PORT\""
] |
[] |
[
"KUBERNETES_SERVICE_HOST",
"KUBERNETES_SERVICE_PORT"
] |
[]
|
["KUBERNETES_SERVICE_HOST", "KUBERNETES_SERVICE_PORT"]
|
go
| 2 | 0 | |
discordbot.py
|
from discord.ext import commands
import os
import traceback
bot = commands.Bot(command_prefix='!')
token = os.environ['DISCORD_BOT_TOKEN']
# 発言時に実行されるイベントハンドラを定義
@client.event
async def on_message(message):
if message.content.startswith('/mkch'):
# チャンネルを作成する非同期関数を実行して Channel オブジェクトを取得
new_channel = await create_channel(message, channel_name='new')
# チャンネルのリンクと作成メッセージを送信
text = f'{new_channel.mention} を作成しました'
await message.channel.send(text)
@bot.event
async def on_command_error(ctx, error):
orig_error = getattr(error, "original", error)
error_msg = ''.join(traceback.TracebackException.from_exception(orig_error).format())
await ctx.send(error_msg)
@bot.command()
async def ping(ctx):
await ctx.send('pong')
async def create_channel(message, channel_name):
category_id = message.channel.category_id
category = message.guild.get_channel(category_id)
new_channel = await category.create_text_channel(name=channel_name)
return new_channel
bot.run(token)
|
[] |
[] |
[
"DISCORD_BOT_TOKEN"
] |
[]
|
["DISCORD_BOT_TOKEN"]
|
python
| 1 | 0 | |
tests/test_getObjectRoutes.py
|
from TikTokApi import TikTokApi
import os
api = TikTokApi.get_instance(custom_verifyFp=os.environ.get("verifyFp", None))
def test_tiktok_object():
assert len(api.getTikTokById("6829267836783971589")) > 0
assert (
len(
api.getTikTokByUrl(
"https://www.tiktok.com/@therock/video/6829267836783971589"
)
)
> 0
)
def test_user_object():
assert len(api.getUserObject("therock")) > 0
def test_music_object():
assert len(api.getMusicObject("6820695018429253633")) > 0
def test_hashtag_object():
assert len(api.getHashtagObject("funny")) > 0
|
[] |
[] |
[
"verifyFp"
] |
[]
|
["verifyFp"]
|
python
| 1 | 0 | |
pkg/cmd/cli/restic/server.go
|
/*
Copyright 2019 the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package restic
import (
"context"
"fmt"
"os"
"strings"
"sync"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
kubeinformers "k8s.io/client-go/informers"
corev1informers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"github.com/heptio/velero/pkg/buildinfo"
"github.com/heptio/velero/pkg/client"
"github.com/heptio/velero/pkg/cmd"
"github.com/heptio/velero/pkg/cmd/util/signals"
"github.com/heptio/velero/pkg/controller"
clientset "github.com/heptio/velero/pkg/generated/clientset/versioned"
informers "github.com/heptio/velero/pkg/generated/informers/externalversions"
"github.com/heptio/velero/pkg/restic"
"github.com/heptio/velero/pkg/util/filesystem"
"github.com/heptio/velero/pkg/util/logging"
)
func NewServerCommand(f client.Factory) *cobra.Command {
logLevelFlag := logging.LogLevelFlag(logrus.InfoLevel)
formatFlag := logging.NewFormatFlag()
command := &cobra.Command{
Use: "server",
Short: "Run the velero restic server",
Long: "Run the velero restic server",
Hidden: true,
Run: func(c *cobra.Command, args []string) {
logLevel := logLevelFlag.Parse()
logrus.Infof("Setting log-level to %s", strings.ToUpper(logLevel.String()))
logger := logging.DefaultLogger(logLevel, formatFlag.Parse())
logger.Infof("Starting Velero restic server %s (%s)", buildinfo.Version, buildinfo.FormattedGitSHA())
s, err := newResticServer(logger, fmt.Sprintf("%s-%s", c.Parent().Name(), c.Name()))
cmd.CheckError(err)
s.run()
},
}
command.Flags().Var(logLevelFlag, "log-level", fmt.Sprintf("the level at which to log. Valid values are %s.", strings.Join(logLevelFlag.AllowedValues(), ", ")))
command.Flags().Var(formatFlag, "log-format", fmt.Sprintf("the format for log output. Valid values are %s.", strings.Join(formatFlag.AllowedValues(), ", ")))
return command
}
type resticServer struct {
kubeClient kubernetes.Interface
veleroClient clientset.Interface
veleroInformerFactory informers.SharedInformerFactory
kubeInformerFactory kubeinformers.SharedInformerFactory
podInformer cache.SharedIndexInformer
secretInformer cache.SharedIndexInformer
logger logrus.FieldLogger
ctx context.Context
cancelFunc context.CancelFunc
fileSystem filesystem.Interface
}
func newResticServer(logger logrus.FieldLogger, baseName string) (*resticServer, error) {
clientConfig, err := client.Config("", "", baseName)
if err != nil {
return nil, err
}
kubeClient, err := kubernetes.NewForConfig(clientConfig)
if err != nil {
return nil, errors.WithStack(err)
}
veleroClient, err := clientset.NewForConfig(clientConfig)
if err != nil {
return nil, errors.WithStack(err)
}
// use a stand-alone pod informer because we want to use a field selector to
// filter to only pods scheduled on this node.
podInformer := corev1informers.NewFilteredPodInformer(
kubeClient,
metav1.NamespaceAll,
0,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
func(opts *metav1.ListOptions) {
opts.FieldSelector = fmt.Sprintf("spec.nodeName=%s", os.Getenv("NODE_NAME"))
},
)
// use a stand-alone secrets informer so we can filter to only the restic credentials
// secret(s) within the velero namespace
//
// note: using an informer to access the single secret for all velero-managed
// restic repositories is overkill for now, but will be useful when we move
// to fully-encrypted backups and have unique keys per repository.
secretInformer := corev1informers.NewFilteredSecretInformer(
kubeClient,
os.Getenv("VELERO_NAMESPACE"),
0,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
func(opts *metav1.ListOptions) {
opts.FieldSelector = fmt.Sprintf("metadata.name=%s", restic.CredentialsSecretName)
},
)
ctx, cancelFunc := context.WithCancel(context.Background())
s := &resticServer{
kubeClient: kubeClient,
veleroClient: veleroClient,
veleroInformerFactory: informers.NewFilteredSharedInformerFactory(veleroClient, 0, os.Getenv("VELERO_NAMESPACE"), nil),
kubeInformerFactory: kubeinformers.NewSharedInformerFactory(kubeClient, 0),
podInformer: podInformer,
secretInformer: secretInformer,
logger: logger,
ctx: ctx,
cancelFunc: cancelFunc,
fileSystem: filesystem.NewFileSystem(),
}
if err := s.validatePodVolumesHostPath(); err != nil {
return nil, err
}
return s, nil
}
func (s *resticServer) run() {
signals.CancelOnShutdown(s.cancelFunc, s.logger)
s.logger.Info("Starting controllers")
var wg sync.WaitGroup
backupController := controller.NewPodVolumeBackupController(
s.logger,
s.veleroInformerFactory.Velero().V1().PodVolumeBackups(),
s.veleroClient.VeleroV1(),
s.podInformer,
s.secretInformer,
s.kubeInformerFactory.Core().V1().PersistentVolumeClaims(),
s.kubeInformerFactory.Core().V1().PersistentVolumes(),
s.veleroInformerFactory.Velero().V1().BackupStorageLocations(),
os.Getenv("NODE_NAME"),
)
wg.Add(1)
go func() {
defer wg.Done()
backupController.Run(s.ctx, 1)
}()
restoreController := controller.NewPodVolumeRestoreController(
s.logger,
s.veleroInformerFactory.Velero().V1().PodVolumeRestores(),
s.veleroClient.VeleroV1(),
s.podInformer,
s.secretInformer,
s.kubeInformerFactory.Core().V1().PersistentVolumeClaims(),
s.kubeInformerFactory.Core().V1().PersistentVolumes(),
s.veleroInformerFactory.Velero().V1().BackupStorageLocations(),
os.Getenv("NODE_NAME"),
)
wg.Add(1)
go func() {
defer wg.Done()
restoreController.Run(s.ctx, 1)
}()
go s.veleroInformerFactory.Start(s.ctx.Done())
go s.kubeInformerFactory.Start(s.ctx.Done())
go s.podInformer.Run(s.ctx.Done())
go s.secretInformer.Run(s.ctx.Done())
s.logger.Info("Controllers started successfully")
<-s.ctx.Done()
s.logger.Info("Waiting for all controllers to shut down gracefully")
wg.Wait()
}
// validatePodVolumesHostPath validates that the pod volumes path contains a
// directory for each Pod running on this node
func (s *resticServer) validatePodVolumesHostPath() error {
files, err := s.fileSystem.ReadDir("/host_pods/")
if err != nil {
return errors.Wrap(err, "could not read pod volumes host path")
}
// create a map of directory names inside the pod volumes path
dirs := sets.NewString()
for _, f := range files {
if f.IsDir() {
dirs.Insert(f.Name())
}
}
pods, err := s.kubeClient.CoreV1().Pods("").List(metav1.ListOptions{FieldSelector: fmt.Sprintf("spec.nodeName=%s,status.phase=Running", os.Getenv("NODE_NAME"))})
if err != nil {
return errors.WithStack(err)
}
valid := true
for _, pod := range pods.Items {
dirName := string(pod.GetUID())
// if the pod is a mirror pod, the directory name is the hash value of the
// mirror pod annotation
if hash, ok := pod.GetAnnotations()[v1.MirrorPodAnnotationKey]; ok {
dirName = hash
}
if !dirs.Has(dirName) {
valid = false
s.logger.WithFields(logrus.Fields{
"pod": fmt.Sprintf("%s/%s", pod.GetNamespace(), pod.GetName()),
"path": "/host_pods/" + dirName,
}).Debug("could not find volumes for pod in host path")
}
}
if !valid {
return errors.New("unexpected directory structure for host-pods volume, ensure that the host-pods volume corresponds to the pods subdirectory of the kubelet root directory")
}
return nil
}
|
[
"\"NODE_NAME\"",
"\"VELERO_NAMESPACE\"",
"\"VELERO_NAMESPACE\"",
"\"NODE_NAME\"",
"\"NODE_NAME\"",
"\"NODE_NAME\""
] |
[] |
[
"VELERO_NAMESPACE",
"NODE_NAME"
] |
[]
|
["VELERO_NAMESPACE", "NODE_NAME"]
|
go
| 2 | 0 | |
pkg/controller/status/status.go
|
package status
import (
"context"
"encoding/json"
"fmt"
"os"
"reflect"
"sort"
"strings"
"sync"
"time"
"golang.org/x/time/rate"
configv1 "github.com/openshift/api/config/v1"
configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1"
"github.com/openshift/insights-operator/pkg/config"
"github.com/openshift/insights-operator/pkg/controllerstatus"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/klog/v2"
)
// How many upload failures in a row we tolerate before starting reporting
// as UploadDegraded
const uploadFailuresCountThreshold = 5
// How many gatherings can fail in a row before we report Degraded
const GatherFailuresCountThreshold = 5
type Reported struct {
LastReportTime metav1.Time `json:"lastReportTime"`
}
type Configurator interface {
Config() *config.Controller
}
type Controller struct {
name string
namespace string
client configv1client.ConfigV1Interface
coreClient corev1client.CoreV1Interface
statusCh chan struct{}
configurator Configurator
lock sync.Mutex
sources []controllerstatus.Interface
reported Reported
start time.Time
}
func NewController(client configv1client.ConfigV1Interface, coreClient corev1client.CoreV1Interface, configurator Configurator, namespace string) *Controller {
c := &Controller{
name: "insights",
client: client,
coreClient: coreClient,
statusCh: make(chan struct{}, 1),
configurator: configurator,
namespace: namespace,
}
return c
}
func (c *Controller) triggerStatusUpdate() {
select {
case c.statusCh <- struct{}{}:
default:
}
}
func (c *Controller) controllerStartTime() time.Time {
c.lock.Lock()
defer c.lock.Unlock()
if c.start.IsZero() {
c.start = time.Now()
}
return c.start
}
func (c *Controller) LastReportedTime() time.Time {
c.lock.Lock()
defer c.lock.Unlock()
return c.reported.LastReportTime.Time
}
func (c *Controller) SetLastReportedTime(at time.Time) {
c.lock.Lock()
defer c.lock.Unlock()
if c.reported.LastReportTime.IsZero() {
klog.V(2).Infof("Initializing last reported time to %s", at.UTC().Format(time.RFC3339))
}
c.reported.LastReportTime.Time = at
c.triggerStatusUpdate()
}
func (c *Controller) AddSources(sources ...controllerstatus.Interface) {
c.lock.Lock()
defer c.lock.Unlock()
c.sources = append(c.sources, sources...)
}
func (c *Controller) Sources() []controllerstatus.Interface {
c.lock.Lock()
defer c.lock.Unlock()
return c.sources
}
func (c *Controller) merge(existing *configv1.ClusterOperator) *configv1.ClusterOperator {
// prime the object if it does not exist
if existing == nil {
existing = &configv1.ClusterOperator{
ObjectMeta: metav1.ObjectMeta{
Name: c.name,
},
}
}
// calculate the current controller state
var last time.Time
var reason string
var errors []string
var uploadErrorReason, uploadErrorMessage, disabledReason, disabledMessage string
allReady := true
for i, source := range c.Sources() {
summary, ready := source.CurrentStatus()
if !ready {
klog.V(4).Infof("Source %d %T is not ready", i, source)
allReady = false
continue
}
if summary.Healthy {
continue
}
if len(summary.Message) == 0 {
klog.Errorf("Programmer error: status source %d %T reported an empty message: %#v", i, source, summary)
continue
}
degradingFailure := true
if summary.Operation == controllerstatus.Uploading {
if summary.Count < uploadFailuresCountThreshold {
klog.V(4).Infof("Number of last upload failures %d lower than threshold %d. Not marking as degraded.", summary.Count, uploadFailuresCountThreshold)
degradingFailure = false
} else {
klog.V(4).Infof("Number of last upload failures %d exceeded than threshold %d. Marking as degraded.", summary.Count, uploadFailuresCountThreshold)
}
uploadErrorReason = summary.Reason
uploadErrorMessage = summary.Message
// NotAuthorized is a special case where we want to disable the operator
if isNotAuthorizedReason(summary.Reason) {
degradingFailure = false
disabledReason = summary.Reason
disabledMessage = summary.Message
}
} else if summary.Operation == controllerstatus.GatheringReport {
degradingFailure = false
if summary.Count < GatherFailuresCountThreshold {
klog.V(5).Infof("Number of last gather failures %d lower than threshold %d. Not marking as disabled.", summary.Count, GatherFailuresCountThreshold)
} else {
klog.V(3).Infof("Number of last gather failures %d exceeded the threshold %d. Marking as disabled.", summary.Count, GatherFailuresCountThreshold)
disabledReason = summary.Reason
disabledMessage = summary.Message
}
}
if degradingFailure {
reason = summary.Reason
errors = append(errors, summary.Message)
}
if last.Before(summary.LastTransitionTime) {
last = summary.LastTransitionTime
}
}
var errorMessage string
switch len(errors) {
case 0:
case 1:
if len(reason) == 0 {
reason = "UnknownError"
}
errorMessage = errors[0]
default:
reason = "MultipleFailures"
sort.Strings(errors)
errorMessage = fmt.Sprintf("There are multiple errors blocking progress:\n* %s", strings.Join(errors, "\n* "))
}
if !c.configurator.Config().Report {
disabledReason = "Disabled"
disabledMessage = "Health reporting is disabled"
}
existing = existing.DeepCopy()
now := time.Now()
if len(c.namespace) > 0 {
existing.Status.RelatedObjects = []configv1.ObjectReference{
{Resource: "namespaces", Name: c.namespace},
{Group: "apps", Resource: "deployments", Namespace: c.namespace, Name: "insights-operator"},
{Resource: "secrets", Namespace: "openshift-config", Name: "pull-secret"},
{Resource: "secrets", Namespace: "openshift-config", Name: "support"},
{Resource: "serviceaccounts", Namespace: c.namespace, Name: "gather"},
{Resource: "serviceaccounts", Namespace: c.namespace, Name: "operator"},
{Resource: "services", Namespace: c.namespace, Name: "metrics"},
{Resource: "configmaps", Namespace: c.namespace, Name: "service-ca-bundle"},
}
}
reported := Reported{LastReportTime: metav1.Time{Time: c.LastReportedTime()}}
isInitializing := !allReady && now.Sub(c.controllerStartTime()) < 3*time.Minute
// update the disabled and failing conditions
switch {
case isInitializing:
// the disabled condition is optional, but set it now if we already know we're disabled
if len(disabledReason) > 0 {
setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{
Type: OperatorDisabled,
Status: configv1.ConditionTrue,
Reason: disabledReason,
Message: disabledMessage,
})
}
if findOperatorStatusCondition(existing.Status.Conditions, configv1.OperatorDegraded) == nil {
setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorDegraded,
Status: configv1.ConditionFalse,
Reason: "AsExpected",
})
}
default:
// once we've initialized set Failing and Disabled as best we know
if len(disabledMessage) > 0 {
setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{
Type: OperatorDisabled,
Status: configv1.ConditionTrue,
Reason: disabledReason,
Message: disabledMessage,
})
} else {
setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{
Type: OperatorDisabled,
Status: configv1.ConditionFalse,
Reason: "AsExpected",
})
}
if len(errorMessage) > 0 {
klog.V(4).Infof("The operator has some internal errors: %s", errorMessage)
setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorDegraded,
Status: configv1.ConditionTrue,
LastTransitionTime: metav1.Time{Time: last},
Reason: reason,
Message: errorMessage,
})
} else {
setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorDegraded,
Status: configv1.ConditionFalse,
Reason: "AsExpected",
})
}
if len(uploadErrorReason) > 0 {
setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{
Type: UploadDegraded,
Status: configv1.ConditionTrue,
LastTransitionTime: metav1.Time{Time: last},
Reason: uploadErrorReason,
Message: uploadErrorMessage,
})
} else {
removeOperatorStatusCondition(&existing.Status.Conditions, UploadDegraded)
}
}
// once the operator is running it is always considered available
setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorAvailable,
Status: configv1.ConditionTrue,
Reason: "AsExpected",
})
// update the Progressing condition with a summary of the current state
switch {
case isInitializing:
klog.V(4).Infof("The operator is still being initialized")
// if we're still starting up and some sources are not ready, initialize the conditions
// but don't update
if findOperatorStatusCondition(existing.Status.Conditions, configv1.OperatorProgressing) == nil {
setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorProgressing,
Status: configv1.ConditionTrue,
Reason: "Initializing",
Message: "Initializing the operator",
})
}
case len(errorMessage) > 0:
klog.V(4).Infof("The operator has some internal errors: %s", errorMessage)
setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorProgressing,
Status: configv1.ConditionFalse,
Reason: "Degraded",
Message: "An error has occurred",
})
case len(disabledMessage) > 0:
klog.V(4).Infof("The operator is marked as disabled")
setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorProgressing,
Status: configv1.ConditionFalse,
LastTransitionTime: metav1.Time{Time: last},
Reason: reason,
Message: disabledMessage,
})
default:
klog.V(4).Infof("The operator is healthy")
setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorProgressing,
Status: configv1.ConditionFalse,
Reason: "AsExpected",
Message: "Monitoring the cluster",
})
}
if release := os.Getenv("RELEASE_VERSION"); len(release) > 0 {
existing.Status.Versions = []configv1.OperandVersion{
{Name: "operator", Version: release},
}
}
if data, err := json.Marshal(reported); err != nil {
klog.Errorf("Unable to marshal status extension: %v", err)
} else {
existing.Status.Extension.Raw = data
}
return existing
}
func (c *Controller) Start(ctx context.Context) error {
if err := c.updateStatus(ctx, true); err != nil {
return err
}
limiter := rate.NewLimiter(rate.Every(30*time.Second), 2)
go wait.Until(func() {
timer := time.NewTicker(2 * time.Minute)
defer timer.Stop()
for {
select {
case <-ctx.Done():
case <-timer.C:
err := limiter.Wait(ctx)
if err != nil {
klog.Errorf("Limiter error by timer: %v", err)
}
case <-c.statusCh:
err := limiter.Wait(ctx)
if err != nil {
klog.Errorf("Limiter error by status: %v", err)
}
}
if err := c.updateStatus(ctx, false); err != nil {
klog.Errorf("Unable to write cluster operator status: %v", err)
}
}
}, time.Second, ctx.Done())
return nil
}
func (c *Controller) updateStatus(ctx context.Context, initial bool) error {
existing, err := c.client.ClusterOperators().Get(ctx, c.name, metav1.GetOptions{})
if err != nil {
if !errors.IsNotFound(err) {
return err
}
existing = nil
}
if initial {
if existing != nil {
var reported Reported
if len(existing.Status.Extension.Raw) > 0 {
if err := json.Unmarshal(existing.Status.Extension.Raw, &reported); err != nil {
klog.Errorf("The initial operator extension status is invalid: %v", err)
}
}
c.SetLastReportedTime(reported.LastReportTime.Time.UTC())
if con := findOperatorStatusCondition(existing.Status.Conditions, configv1.OperatorDegraded); con == nil ||
con != nil && con.Status == configv1.ConditionFalse {
klog.Info("The initial operator extension status is healthy")
}
}
}
updated := c.merge(existing)
if existing == nil {
created, err := c.client.ClusterOperators().Create(ctx, updated, metav1.CreateOptions{})
if err != nil {
return err
}
updated.ObjectMeta = created.ObjectMeta
updated.Spec = created.Spec
} else {
if reflect.DeepEqual(updated.Status, existing.Status) {
klog.V(4).Infof("No status update necessary, objects are identical")
return nil
}
}
_, err = c.client.ClusterOperators().UpdateStatus(ctx, updated, metav1.UpdateOptions{})
return err
}
// OperatorDisabled reports when the primary function of the operator has been disabled.
const OperatorDisabled configv1.ClusterStatusConditionType = "Disabled"
// Uploading reports true when the operator is successfully uploading
const UploadDegraded configv1.ClusterStatusConditionType = "UploadDegraded"
func isNotAuthorizedReason(reason string) bool {
return reason == "NotAuthorized"
}
func setOperatorStatusCondition(conditions *[]configv1.ClusterOperatorStatusCondition, newCondition configv1.ClusterOperatorStatusCondition) {
if conditions == nil {
conditions = &[]configv1.ClusterOperatorStatusCondition{}
}
existingCondition := findOperatorStatusCondition(*conditions, newCondition.Type)
if existingCondition == nil {
newCondition.LastTransitionTime = metav1.NewTime(time.Now())
*conditions = append(*conditions, newCondition)
return
}
if existingCondition.Status != newCondition.Status {
existingCondition.Status = newCondition.Status
existingCondition.LastTransitionTime = newCondition.LastTransitionTime
}
existingCondition.Reason = newCondition.Reason
existingCondition.Message = newCondition.Message
if existingCondition.LastTransitionTime.IsZero() {
existingCondition.LastTransitionTime = metav1.NewTime(time.Now())
}
}
func removeOperatorStatusCondition(conditions *[]configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType) {
if conditions == nil {
return
}
newConditions := []configv1.ClusterOperatorStatusCondition{}
for _, condition := range *conditions {
if condition.Type != conditionType {
newConditions = append(newConditions, condition)
}
}
*conditions = newConditions
}
func findOperatorStatusCondition(conditions []configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType) *configv1.ClusterOperatorStatusCondition {
for i := range conditions {
if conditions[i].Type == conditionType {
return &conditions[i]
}
}
return nil
}
|
[
"\"RELEASE_VERSION\""
] |
[] |
[
"RELEASE_VERSION"
] |
[]
|
["RELEASE_VERSION"]
|
go
| 1 | 0 | |
app.py
|
import os
from flask import (
Flask, flash, render_template,
redirect, request, session, url_for, json, abort)
from flask_pymongo import PyMongo
from pymongo.collection import ReturnDocument
from bson.objectid import ObjectId
from werkzeug.security import generate_password_hash, check_password_hash
from helpers import login_required, update_recipe_rating, is_valid, check_form
from flask_paginate import Pagination, get_page_args
if os.path.exists("env.py"):
import env
app = Flask(__name__)
app.config["MONGO_DBNAME"] = os.environ.get("MONGO_DBNAME")
app.config["MONGO_URI"] = os.environ.get("MONGO_URI")
app.secret_key = os.environ.get("SECRET_KEY")
mongo = PyMongo(app)
# Pagination for recipes was inspired from this two website
# and modified and adapted to my understanding on my project
# Credit code
# https://gist.github.com/mozillazg/69fb40067ae6d80386e10e105e6803c9
# Credit code https://harishvc.com/2015/04/15/pagination-flask-mongodb/
PER_PAGE = 6
def get_page_items():
# get the page number
try:
page = int(request.args.get('page', 1))
# check if page is positive integer
if page <= 0:
abort(404)
except ValueError:
abort(404)
# get the per_page items
per_page = request.args.get('per_page')
if not per_page:
per_page = PER_PAGE
else:
try:
per_page = int(per_page)
# check if per_page is positive integer
if per_page <= 0:
abort(404)
except ValueError:
abort(404)
# calculate the offset
offset = (page - 1) * per_page
return page, per_page, offset
def get_css_framework():
return 'bootstrap4'
def get_link_size():
return 'sm'
def paginated(recipes):
page, per_page, offset = get_page_items()
return recipes[offset: offset + per_page]
def get_pagination(recipes):
page, per_page, offset = get_page_items()
total = len(recipes)
return Pagination(css_framework=get_css_framework(),
link_size=get_link_size(),
page=page,
per_page=per_page,
offset=offset,
total=total
)
# End Credit code
@app.route("/", methods=["GET", "POST"])
@app.route("/home", methods=["GET", "POST"])
def home():
"""
Display three random recipes on home page and
three kitchen tools
"""
# get the categories that are in use for navigation menu
nav_categories = mongo.db.recipes.distinct("category_name")
# get three sample recipe to display on home page
recipes = mongo.db.recipes.find().sort("_id", -1).limit(3)
# get three sample kitchen tools to display on home page
products = mongo.db.shop.find().sort("_id", -1).limit(3)
page_set = {
"type": "form"
}
return render_template("pages/index.html",
recipes=recipes,
products=products,
nav_categories=nav_categories,
page_set=page_set)
@app.route("/all_recipes", methods=["GET", "POST"])
def all_recipes():
"""
Display all recipes
"""
# get the categories that are in use for navigation menu
nav_categories = mongo.db.recipes.distinct("category_name")
# show one recipe as suggest recipe for users when access
# the recipes page
sugestion_recipe = mongo.db.recipes.aggregate([{'$sample': {'size': 1}}])
# the query for existing recipes on the database
recipes = list(mongo.db.recipes.find().sort("_id", -1))
# call the paginated function to display only the
# specific number of recipes per page
paginated_recipes = paginated(recipes)
# get the page pagination
pagination = get_pagination(recipes)
# total number of recipes found
total = len(recipes)
# set up the page_set object
page_set = {
"title": "Recipes",
"type": "form"
}
return render_template("pages/all_recipes.html",
nav_categories=nav_categories,
sugestion_recipe=sugestion_recipe,
recipes=paginated_recipes,
pagination=pagination,
total=total,
page_set=page_set)
@app.route("/category/<category>", methods=["GET", "POST"])
def category(category):
"""
Display recipes from requested category
"""
# see if there are any recipes for specific
# category on the database
check_category = mongo.db.recipes.count_documents(
{"category_name": category})
# return page not found if no recipes found
# for the specific category
if not check_category:
return redirect(url_for('error', code=404))
# get the categories that are in use for navigation menu
nav_categories = mongo.db.recipes.distinct("category_name")
# the query for existing recipes on the database for specific category
recipes = list(mongo.db.recipes.find(
{"category_name": category}).sort("_id", -1))
# call the paginated function to display only the
# specific number of recipes per page
paginated_recipes = paginated(recipes)
# get the page pagination
pagination = get_pagination(recipes)
total = len(recipes)
# set up the page_set object
page_set = {
"title": category.title(),
"type": "form"
}
return render_template("pages/category.html",
recipes=paginated_recipes,
pagination=pagination,
total=total,
nav_categories=nav_categories,
page_set=page_set,
category=category)
@app.route("/category/<category>/search", methods=["GET", "POST"])
def category_search(category):
"""
Display the search recipes from requested category only
"""
# get the search query
query = request.args.get('query')
# if no query return page not found
if not query:
return redirect(url_for('error', code=404))
# get the categories that are in use for navigation menu
nav_categories = mongo.db.recipes.distinct("category_name")
# the query for existing recipes on the database for specific search query
recipes = list(
mongo.db.recipes.find({
"category_name": category,
"$text": {"$search": query}
}).sort("_id", -1)
)
# call the paginated function to display only the
# specific number of recipes per page
paginated_recipes = paginated(recipes)
# get the page pagination
pagination = get_pagination(recipes)
# total number of recipes found
total = len(recipes)
# set up the page_set object
page_set = {
"title": "Search",
"type": "form"
}
return render_template("pages/category.html",
recipes=paginated_recipes,
pagination=pagination,
total=total,
nav_categories=nav_categories,
page_set=page_set,
category=category)
@app.route("/search", methods=["GET", "POST"])
def search():
"""
Display the search query that was requested
"""
# get the search query
query = request.args.get('query')
# if no query return page not found
if not query:
return redirect(url_for('error', code=404))
# get the categories that are in use for navigation menu
nav_categories = mongo.db.recipes.distinct("category_name")
# the query for existing recipes on the database for specific search query
recipes = list(
mongo.db.recipes.find({
"$text": {"$search": query}
}).sort("_id", -1)
)
# call the paginated function to display only the
# specific number of recipes per page
paginated_recipes = paginated(recipes)
# get the page pagination
pagination = get_pagination(recipes)
# total number of recipes found
total = len(recipes)
# set up the page_set object
page_set = {
"title": "Search",
"type": "form"
}
return render_template("pages/search.html",
recipes=paginated_recipes,
pagination=pagination,
total=total,
page_set=page_set,
nav_categories=nav_categories)
@app.route("/users/<username>", methods=["GET", "POST"])
def users(username):
"""
Display the user page that was requested
"""
# check if user exists on the database
user_found = mongo.db.users.find_one({"username": username.lower()})
# return page not found if invalid user
if not user_found:
return redirect(url_for('error', code=404))
# get the categories that are in use for navigation menu
nav_categories = mongo.db.recipes.distinct("category_name")
# the query to show the recipes from specific user on the database
recipes = list(
mongo.db.recipes.find(
{"created_by": username.lower()}
).sort("_id", -1)
)
# call the paginated function to display only the
# specific number of recipes per page
paginated_recipes = paginated(recipes)
# get the page pagination
pagination = get_pagination(recipes)
# total number of recipes found
total = len(recipes)
# set up the page_set object
page_set = {
"title": username.title()
}
return render_template("pages/search.html",
recipes=paginated_recipes,
pagination=pagination,
total=total,
page_set=page_set,
nav_categories=nav_categories)
@app.route("/recipe/<recipe_id>", methods=["GET", "POST"])
def recipe(recipe_id):
"""
Display the recipe on-page for each recipe id that was requested
"""
# Update the rating if it's an AJAX call
if request.method == "POST":
# check if user is login in order to proceed with rating
if not session:
return json.dumps({'status': 'not logged in'})
# check if the recipe id hasn't been change
if not is_valid(recipe_id):
return json.dumps({'status': 'error'})
# the query for the specific recipe that has to be rated
recipe = mongo.db.recipes.find_one({"_id": ObjectId(recipe_id)})
# if user want to rate it's own recipe return denied
if recipe["created_by"] == session["user"]:
return json.dumps({'status': 'denied'})
# check if user didn't altered the form value
new_rating = request.form.get("stars")
if int(new_rating) > 0 and int(new_rating) <= 5:
# update the recipe rating
rating = update_recipe_rating(mongo, new_rating, recipe)
return json.dumps({'status': 'success', 'rating': rating})
return json.dumps({'status': 'error'})
# check if the recipe id hasn't been change
if not is_valid(recipe_id):
return redirect(url_for('error', code=404))
# get the categories that are in use for navigation menu
nav_categories = mongo.db.recipes.distinct("category_name")
# the query for the specific recipe that the user wants to access
recipe = mongo.db.recipes.find_one({"_id": ObjectId(recipe_id)})
# added in case the owner decide to delete the recipe while
# other users might by on this recipe page and cause an error
# after refresh the page as we access the recipe["recipe_name"] on page_set
# due to access None["recipe_name"]
if not recipe:
return redirect(url_for('error', code=404))
# set up the page_set object
page_set = {
"title": recipe["recipe_name"].title(),
"type": "recipe"
}
return render_template("pages/recipe.html",
recipe=recipe,
page_set=page_set,
nav_categories=nav_categories)
@app.route("/register", methods=["GET", "POST"])
def register():
"""
Display the login page and check for authentication
"""
if request.method == "POST":
# make a check on the server-side before sending data to the database
# remove any trail of white spaces from
# the beginning or end of each string
username = request.form.get("username").strip()
email = request.form.get("email").strip()
password = request.form.get("password").strip()
password_confirmation = request.form.get(
"passwordConfirmation").strip()
# if no username provided alert the user
if not username:
flash("You must provide a username", category="alert-danger")
return redirect("/register")
# if no emali provided alert the user
if not email:
flash("You must provide a email", category="alert-danger")
return redirect("/register")
# if password does match confirmation password alert the user
if password != password_confirmation:
flash("Password does not match", category="alert-danger")
return redirect("/register")
# check if the username/email is not already reqister
# this is because the username and email have to be unique to each user
exist_user = mongo.db.users.find_one({"username": username.lower()})
exist_email = mongo.db.users.find_one({"email": email.lower()})
# alert the user if username and email is already in use
if exist_user and exist_email:
flash("Username and email already in use", category="alert-danger")
return redirect("/register")
# if we found that username already exist alert the user
if exist_user:
flash("Username already in use", category="alert-danger")
return redirect("/register")
# if we found that email already exist alert the user
if exist_email:
flash("Email already in use", category="alert-danger")
return redirect("/register")
# create obj to the new user
new_user = {
"username": username.lower(),
"email": email.lower(),
"password": generate_password_hash(password)
}
# insert the new user into database
mongo.db.users.insert_one(new_user)
# login the user and redirect to his profile page
session["user"] = username.lower()
flash("Registration Successful!", category="alert-success")
return redirect(url_for("profile", username=session["user"]))
# get the categories that are in use for navigation menu
nav_categories = mongo.db.recipes.distinct("category_name")
# set up the page_set object
page_set = {
"title": "Register",
"type": "form"
}
return render_template("pages/register.html",
page_set=page_set,
nav_categories=nav_categories)
@app.route("/login", methods=["GET", "POST"])
def login():
"""
Display the login page and check for authentication
"""
if request.method == "POST":
# check if user provided an email
if not request.form.get("email"):
flash("You must provide a email", category="alert-danger")
return redirect("/login")
# check if user provided an password
if not request.form.get("password"):
flash("You must provide a password", category="alert-danger")
return redirect("/login")
# query the users database for the specific email
existing_user = mongo.db.users.find_one(
{"email": request.form.get("email")})
# check if there is any user with that email address and
# that the provided password matches the existed password
if not existing_user or not check_password_hash(
existing_user["password"], request.form.get("password")):
flash("Incorrect Email and/or Password", category="alert-danger")
return redirect("/login")
# login the user and redirect to his profile page
session["user"] = existing_user["username"]
return redirect(url_for(
"profile", username=session["user"]))
# get the categories that are in use for navigation menu
nav_categories = mongo.db.recipes.distinct("category_name")
# set up the page_set object
page_set = {
"title": "Login",
"type": "form"
}
return render_template("pages/login.html",
page_set=page_set,
nav_categories=nav_categories)
@app.route("/profile/<username>", methods=["GET", "POST"])
@login_required
def profile(username):
"""
Display the profile page
"""
# Denied user access to other profile pages
if username != session["user"]:
return redirect(url_for('error', code=403))
# get the categories that are in use for navigation menu
nav_categories = mongo.db.recipes.distinct("category_name")
# the query to show the recipes from specific user on the database
recipes = list(
mongo.db.recipes.find({
"created_by": username.lower()
}).sort("_id", -1)
)
# call the paginated function to display only the
# specific number of recipes per page
paginated_recipes = paginated(recipes)
# get the page pagination
pagination = get_pagination(recipes)
# total number of recipes found
total = len(recipes)
# set up the page_set object
page_set = {
"title": "Profile"
}
return render_template("pages/profile.html",
username=username,
page_set=page_set,
recipes=paginated_recipes,
pagination=pagination,
total=total,
nav_categories=nav_categories)
@app.route("/add_recipe", methods=["GET", "POST"])
@login_required
def add_recipe():
"""
Display the form for add recipe
"""
if request.method == "POST":
# check if all fields has been completed
# for the ingredient and methods we can only check for the first item
# as request.form.items() does not take the
# array of same key but only the first value
form_data = check_form(list(request.form.items()))
if not form_data:
flash("All Recipe Fields Must be Completed",
category="alert-warning")
return redirect('/add_recipe')
# create new recipe obj
new_recipe = {
"category_name": request.form.get("recipe-category").strip(),
"recipe_name": request.form.get("recipe-title").strip(),
"description": request.form.get("recipe-description").strip(),
"image_url": request.form.get("recipe-image-url").strip(),
"ingredients": request.form.getlist("recipe-ingredient"),
"methods": request.form.getlist("recipe-methods"),
"tips": request.form.get("recipe-tips").strip(),
"time": request.form.get("recipe-cook-time"),
"serve": request.form.get("recipe-serve"),
"ratings": {
"status": False,
"number_of_ratings": 0,
"weighted_average": 0.0,
"rated_stars": {
"1": 0,
"2": 0,
"3": 0,
"4": 0,
"5": 0}},
"created_by": session["user"]
}
# insert the new recipe into database and return
# the new ObjectId for that recipe
recipe_id = mongo.db.recipes.insert_one(new_recipe).inserted_id
# redirect the user to the new recipe page and show the success message
flash("Recipe Added Successfully", category="alert-success")
return redirect(url_for('recipe', recipe_id=recipe_id))
# get all the categories that the admin has added in database
# the user can select from these categories and add his
# recipe accordingly to his selection
categories_recipes = mongo.db.categories.find().sort("category_name", 1)
# get the categories that are in use for navigation menu
nav_categories = mongo.db.recipes.distinct("category_name")
# set up the page_set object
page_set = {
"title": "Add Recipe",
"type": "form",
"route": "add_recipe"
}
return render_template("pages/add_recipe.html",
page_set=page_set,
categories_recipes=categories_recipes,
nav_categories=nav_categories)
@app.route("/edit_recipe/<recipe_id>", methods=["GET", "POST"])
@login_required
def edit_recipe(recipe_id):
"""
Display the form for edit recipe
"""
if request.method == "POST":
# check if the recipe id hasn't been change
if not is_valid(recipe_id):
return redirect(url_for('error', code=404))
# check if all fields has been completed
# for the ingredient and methods we can only check for the first item
# as request.form.items() does not take the
# array of same key but only the first value
form_data = check_form(list(request.form.items()))
if not form_data:
flash("All Recipe Fields Must be Completed",
category="alert-warning")
return redirect(url_for('edit_recipe', recipe_id=recipe_id))
# update the recipe with the new changes
mongo.db.recipes.update_one(
{"_id": ObjectId(recipe_id)},
{
"$set": {
"category_name": request.form.get(
"recipe-category").strip(),
"recipe_name": request.form.get("recipe-title").strip(),
"description": request.form.get(
"recipe-description").strip(),
"image_url": request.form.get("recipe-image-url").strip(),
"ingredients": request.form.getlist("recipe-ingredient"),
"methods": request.form.getlist("recipe-methods"),
"tips": request.form.get("recipe-tips").strip(),
"time": request.form.get("recipe-cook-time"),
"serve": request.form.get("recipe-serve")
}
}
)
flash("Recipe Edited Successfully", category="alert-success")
return redirect(url_for('recipe', recipe_id=recipe_id))
# check if the recipe id hasn't been change
if not is_valid(recipe_id):
return redirect(url_for('error', code=404))
# get all the categories that the admin has added in database
# the user can select from these categories and add his
# recipe accordingly to his selection
categories_recipes = mongo.db.categories.find().sort("category_name", 1)
# the query for the specific recipe that the user wants to access
recipe = mongo.db.recipes.find_one({"_id": ObjectId(recipe_id)})
# get the categories that are in use for navigation menu
nav_categories = mongo.db.recipes.distinct("category_name")
# set up the page_set object
page_set = {
"title": "Edit Recipe",
"type": "form",
"route": "edit_recipe"
}
return render_template("pages/add_recipe.html",
page_set=page_set,
recipe=recipe,
categories_recipes=categories_recipes,
nav_categories=nav_categories)
@app.route("/delete", methods=["POST"])
@login_required
def delete():
"""
Delete the recipe/category
"""
# get the id for the item that has to be deleted
delete_item_id = request.form.get("delete-item-id")
# take the type of item (recipe or category) that
# the user/admin wants to delete
delete_item = request.form.get("delete-item")
# check if the id hasn't been change
if not is_valid(delete_item_id):
return redirect(url_for('error', code=404))
# check if we have any type of item (recipe or category)
if not delete_item:
return redirect(url_for('error', code=404))
# if the item is a recipe proceeds in deleting
# the specific recipe base on the item id
if delete_item == "recipe":
# delete the recipe from recipes collection
mongo.db.recipes.delete_one({"_id": ObjectId(delete_item_id)})
# delete all the recipe ratings from userRatings collection
mongo.db.userRatings.delete_many(
{"recipe_id": ObjectId(delete_item_id)})
# alert the user that recipe was successfully deleted
flash("Recipe Deleted Successfully", category="alert-success")
return redirect(url_for(
"profile", username=session["user"]))
# if the item is a category proceeds in deleting
# the specific category base on the item id
elif delete_item == "category":
# Denied user access from delete categories
if session["user"] != "admin":
return redirect(url_for('error', code=403))
# revoke delete category if category already has one or more recipes
category = mongo.db.categories.find_one(
{"_id": ObjectId(delete_item_id)})
check_category = mongo.db.recipes.count_documents(
{"category_name": category["category_name"]})
if check_category:
flash("Cannot Delete Category", category="alert-danger")
flash("Category Already in Use", category="alert-danger")
return redirect("/manage_categories")
# delete the category from categories collection
mongo.db.categories.delete_one({"_id": ObjectId(delete_item_id)})
flash("Category Deleted Successfully", category="alert-success")
return redirect("/manage_categories")
return redirect(url_for('error', code=404))
@app.route("/manage_categories", methods=["GET", "POST"])
@login_required
def manage_categories():
"""
Display all categories to manage categories page (admin only)
"""
# Denied user access to manage_categories page
if session["user"] != "admin":
return redirect(url_for('error', code=403))
# query for all categories from categories collection
manage_categories = list(mongo.db.categories.find().sort(
"category_name", 1))
# get the categories that are in use for navigation menu
nav_categories = mongo.db.recipes.distinct("category_name")
# call the paginated function to display only the
# specific number of categories per page
paginated_categories = paginated(manage_categories)
# get the page pagination
pagination = get_pagination(manage_categories)
# total number of categories found
total = len(manage_categories)
# set up the page_set object
page_set = {
"title": "Manage Categories",
"type": "form"
}
return render_template("pages/manage_categories.html",
page_set=page_set,
nav_categories=nav_categories,
manage_categories=paginated_categories,
pagination=pagination,
total=total)
@app.route("/add_categories", methods=["POST"])
@login_required
def add_categories():
"""
Add categories or edit categories (admin only)
"""
# Denied user access to manage_categories page
if session["user"] != "admin":
return redirect(url_for('error', code=403))
# get all the information in order to know
# what type of request the admin wants
category_id = request.form.get("category-id")
category_type = request.form.get("category-type")
category_name = request.form.get("category-name").strip()
# if no category name provided return error
if not category_name:
return redirect(url_for('error', code=404))
# check if admin wants to add or edit category
if category_type == "Add":
# check if the category exists in our database
category_exists = mongo.db.categories.count_documents(
{"category_name": category_name.lower()})
if category_exists:
flash("Category Already Exists", category="alert-warning")
return redirect(url_for('manage_categories'))
# add new category in database
new_category = {
"category_name": category_name.lower()
}
mongo.db.categories.insert_one(new_category)
# alert the admin that the category was successfully added
flash("Category Added Successfully", category="alert-success")
return redirect(url_for('manage_categories'))
elif category_type == "Edit":
# check if the category id hasn't been change
if not is_valid(category_id):
return redirect(url_for('error', code=404))
# check if the admin edit category is not submitted with the same name
check_category = mongo.db.categories.count_documents(
{"category_name": category_name.lower()})
if check_category:
flash("Category Already Exists",
category="alert-warning")
return redirect(url_for('manage_categories'))
# update category name in category collection
# and return the old document in order to update the
# recipe collection the old category with the new name
old_category = mongo.db.categories.find_one_and_update(
{"_id": ObjectId(category_id)},
{
"$set": {
"category_name": category_name.lower()
}
},
return_document=ReturnDocument.BEFORE
)
# update category name in recipe collection
mongo.db.recipes.update_many(
{"category_name": old_category["category_name"]},
{
"$set": {
"category_name": category_name.lower()
}
}
)
# alert the admin that the category was successfully edited
flash("Category Edited Successfully", category="alert-success")
return redirect(url_for('manage_categories'))
return redirect(url_for('error', code=404))
@app.route("/logout")
@login_required
def logout():
"""
Log out the user
"""
# Remove user from session cookies
session.pop("user")
flash("You have been logged out", category="alert-info")
return redirect("/login")
@app.route("/shop", methods=["GET", "POST"])
def shop():
"""
Display the shop page
"""
# query all products from shop collection
products = list(mongo.db.shop.find().sort("_id", -1))
# call the paginated function to display only the
# specific number of product per page
paginated_products = paginated(products)
# get the page pagination
pagination = get_pagination(products)
# total number of products found
total = len(products)
# set up the page_set object
page_set = {
"title": "Kitchen Tools"
}
# get the categories that are in use for navigation menu
nav_categories = mongo.db.recipes.distinct("category_name")
# get the recomanded products
recommended_products = mongo.db.shop.aggregate([{'$sample': {'size': 3}}])
return render_template("pages/shop.html",
page_set=page_set,
nav_categories=nav_categories,
products=paginated_products,
pagination=pagination,
total=total,
recommended_products=recommended_products)
@app.route("/shop/search", methods=["GET", "POST"])
def shop_search():
"""
Display the search items from shop page
"""
# get the query search
query = request.args.get('query')
# if no query return page not found
if not query:
return redirect(url_for('error', code=404))
# get the requested product(s)
products = list(
mongo.db.shop.find({
"$text": {"$search": query}
}).sort("_id", -1)
)
# call the paginated function to display only the
# specific number of product per page
paginated_products = paginated(products)
# get the page pagination
pagination = get_pagination(products)
# total number of products found
total = len(products)
# set up the page_set object
page_set = {
"title": "Kitchen Tools"
}
# get the categories that are in use for navigation menu
nav_categories = mongo.db.recipes.distinct("category_name")
# get the recomanded products
# get the recomanded products
recommended_products = mongo.db.shop.aggregate([{'$sample': {'size': 3}}])
return render_template("pages/shop.html",
page_set=page_set,
nav_categories=nav_categories,
products=paginated_products,
pagination=pagination,
total=total,
recommended_products=recommended_products)
@app.route("/contact", methods=["GET"])
def contact():
"""
Display the contact page
"""
# get the categories that are in use for navigation menu
nav_categories = mongo.db.recipes.distinct("category_name")
# set up the page_set object
page_set = {
"title": "Contact",
"type": "form"
}
return render_template("pages/contact.html",
page_set=page_set,
nav_categories=nav_categories)
@app.route("/subscribe_ajax", methods=["POST"])
def subscribe_ajax():
"""
Display the message after the user subscribe to out newsletter
"""
# check if user sent the 'email address' and not an empty form
if not request.form.get("email"):
return json.dumps({'status': 'error'})
# check if the email is already in our subscriptions
email_exists = mongo.db.subscriptions.count_documents(
{"email": request.form.get("email").strip().lower()})
# if email already exists return status to user
if email_exists:
return json.dumps({'status': 'already subscribed'})
# insert new email in our database in subscriptions collection
subscribed_email = mongo.db.subscriptions.insert_one(
{"email": request.form.get("email").strip().lower()}).inserted_id
if subscribed_email:
return json.dumps({'status': 'success'})
return json.dumps({'status': 'error'})
@app.route("/terms_and_conditions", methods=["GET"])
def terms_and_conditions():
"""
Display the terms and conditions page
"""
# get the categories that are in use for navigation menu
nav_categories = mongo.db.recipes.distinct("category_name")
# set up the page_set object
page_set = {
"title": "Terms and Conditions"
}
return render_template("pages/terms_conditions.html",
page_set=page_set,
nav_categories=nav_categories)
@app.errorhandler(401)
def http_unauthorized(e):
return redirect(url_for('error', code=401))
@app.errorhandler(403)
def http_forbidden(e):
return redirect(url_for('error', code=403))
@app.errorhandler(404)
def http_not_found(e):
return redirect(url_for('error', code=404))
@app.route("/error/<code>")
def error(code):
"""
Show user error to the erro page
"""
# check which type of error needs to be display
if code == "401":
title = "Authorization Required"
elif code == "403":
title = "Access Forbidden"
elif code == "404":
title = "Page Not Found"
# get the categories that are in use for navigation menu
nav_categories = mongo.db.recipes.distinct("category_name")
# set up the page_set object
page_set = {
"title": title,
"code": code
}
return render_template("pages/error.html",
page_set=page_set,
nav_categories=nav_categories)
if __name__ == "__main__":
app.run(host=os.environ.get("IP"),
port=int(os.environ.get("PORT")),
debug=False)
|
[] |
[] |
[
"MONGO_DBNAME",
"PORT",
"MONGO_URI",
"IP",
"SECRET_KEY"
] |
[]
|
["MONGO_DBNAME", "PORT", "MONGO_URI", "IP", "SECRET_KEY"]
|
python
| 5 | 0 | |
core/src/main/java/com/linecorp/armeria/common/Flags.java
|
/*
* Copyright 2017 LINE Corporation
*
* LINE Corporation licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.linecorp.armeria.common;
import static com.google.common.collect.ImmutableList.toImmutableList;
import java.io.IOException;
import java.net.InetAddress;
import java.nio.channels.ClosedChannelException;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.CompletableFuture;
import java.util.function.IntPredicate;
import java.util.function.LongPredicate;
import java.util.function.Predicate;
import javax.annotation.Nullable;
import javax.net.ssl.SSLEngine;
import javax.net.ssl.SSLException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.CaffeineSpec;
import com.google.common.base.Ascii;
import com.google.common.base.CharMatcher;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableList;
import com.linecorp.armeria.client.ClientFactoryBuilder;
import com.linecorp.armeria.client.retry.Backoff;
import com.linecorp.armeria.client.retry.RetryingClient;
import com.linecorp.armeria.client.retry.RetryingRpcClient;
import com.linecorp.armeria.common.util.Exceptions;
import com.linecorp.armeria.common.util.InetAddressPredicates;
import com.linecorp.armeria.common.util.Sampler;
import com.linecorp.armeria.common.util.SystemInfo;
import com.linecorp.armeria.internal.common.util.SslContextUtil;
import com.linecorp.armeria.server.RoutingContext;
import com.linecorp.armeria.server.ServerBuilder;
import com.linecorp.armeria.server.Service;
import com.linecorp.armeria.server.ServiceConfig;
import com.linecorp.armeria.server.ServiceRequestContext;
import com.linecorp.armeria.server.annotation.ExceptionHandler;
import com.linecorp.armeria.server.annotation.ExceptionVerbosity;
import io.micrometer.core.instrument.Meter;
import io.micrometer.core.instrument.Tag;
import io.micrometer.core.instrument.config.NamingConvention;
import io.netty.buffer.ByteBufAllocator;
import io.netty.channel.epoll.Epoll;
import io.netty.handler.codec.http2.Http2CodecUtil;
import io.netty.handler.codec.http2.Http2Exception;
import io.netty.handler.ssl.OpenSsl;
import io.netty.handler.ssl.SslContextBuilder;
import io.netty.resolver.DefaultAddressResolverGroup;
import io.netty.resolver.dns.DnsNameResolverTimeoutException;
import io.netty.util.ReferenceCountUtil;
/**
* The system properties that affect Armeria's runtime behavior.
*/
public final class Flags {
private static final Logger logger = LoggerFactory.getLogger(Flags.class);
private static final Splitter CSV_SPLITTER = Splitter.on(',').trimResults().omitEmptyStrings();
private static final String PREFIX = "com.linecorp.armeria.";
private static final int NUM_CPU_CORES = Runtime.getRuntime().availableProcessors();
private static final String DEFAULT_VERBOSE_EXCEPTION_SAMPLER_SPEC = "rate-limit=10";
private static final String VERBOSE_EXCEPTION_SAMPLER_SPEC;
private static final Sampler<Class<? extends Throwable>> VERBOSE_EXCEPTION_SAMPLER;
@Nullable
private static final Predicate<InetAddress> PREFERRED_IP_V4_ADDRESSES;
static {
final String spec = getNormalized("verboseExceptions", DEFAULT_VERBOSE_EXCEPTION_SAMPLER_SPEC, val -> {
if ("true".equals(val) || "false".equals(val)) {
return true;
}
try {
Sampler.of(val);
return true;
} catch (Exception e) {
// Invalid sampler specification
return false;
}
});
switch (spec) {
case "true":
case "always":
VERBOSE_EXCEPTION_SAMPLER_SPEC = "always";
VERBOSE_EXCEPTION_SAMPLER = Sampler.always();
break;
case "false":
case "never":
VERBOSE_EXCEPTION_SAMPLER_SPEC = "never";
VERBOSE_EXCEPTION_SAMPLER = Sampler.never();
break;
default:
VERBOSE_EXCEPTION_SAMPLER_SPEC = spec;
VERBOSE_EXCEPTION_SAMPLER = new ExceptionSampler(VERBOSE_EXCEPTION_SAMPLER_SPEC);
}
final List<Predicate<InetAddress>> preferredIpV4Addresses =
CSV_SPLITTER.splitToList(getNormalized("preferredIpV4Addresses", "", unused -> true))
.stream()
.map(cidr -> {
try {
return InetAddressPredicates.ofCidr(cidr);
} catch (Exception e) {
logger.warn("Failed to parse a preferred IPv4: {}", cidr);
}
return null;
})
.filter(Objects::nonNull)
.collect(toImmutableList());
switch (preferredIpV4Addresses.size()) {
case 0:
PREFERRED_IP_V4_ADDRESSES = null;
break;
case 1:
PREFERRED_IP_V4_ADDRESSES = preferredIpV4Addresses.get(0);
break;
default:
PREFERRED_IP_V4_ADDRESSES = inetAddress -> {
for (Predicate<InetAddress> preferredIpV4Addr : preferredIpV4Addresses) {
if (preferredIpV4Addr.test(inetAddress)) {
return true;
}
}
return false;
};
}
}
private static final boolean VERBOSE_SOCKET_EXCEPTIONS = getBoolean("verboseSocketExceptions", false);
private static final boolean VERBOSE_RESPONSES = getBoolean("verboseResponses", false);
@Nullable
private static final String REQUEST_CONTEXT_STORAGE_PROVIDER =
System.getProperty(PREFIX + "requestContextStorageProvider");
private static final boolean HAS_WSLENV = System.getenv("WSLENV") != null;
private static final boolean USE_EPOLL = getBoolean("useEpoll", isEpollAvailable(),
value -> isEpollAvailable() || !value);
@Nullable
private static Boolean useOpenSsl;
@Nullable
private static Boolean dumpOpenSslInfo;
private static final int DEFAULT_MAX_NUM_CONNECTIONS = Integer.MAX_VALUE;
private static final int MAX_NUM_CONNECTIONS =
getInt("maxNumConnections", DEFAULT_MAX_NUM_CONNECTIONS, value -> value > 0);
private static final int DEFAULT_NUM_COMMON_WORKERS = NUM_CPU_CORES * 2;
private static final int NUM_COMMON_WORKERS =
getInt("numCommonWorkers", DEFAULT_NUM_COMMON_WORKERS, value -> value > 0);
private static final int DEFAULT_NUM_COMMON_BLOCKING_TASK_THREADS = 200; // from Tomcat default maxThreads
private static final int NUM_COMMON_BLOCKING_TASK_THREADS =
getInt("numCommonBlockingTaskThreads",
DEFAULT_NUM_COMMON_BLOCKING_TASK_THREADS,
value -> value > 0);
private static final long DEFAULT_DEFAULT_MAX_REQUEST_LENGTH = 10 * 1024 * 1024; // 10 MiB
private static final long DEFAULT_MAX_REQUEST_LENGTH =
getLong("defaultMaxRequestLength",
DEFAULT_DEFAULT_MAX_REQUEST_LENGTH,
value -> value >= 0);
private static final long DEFAULT_DEFAULT_MAX_RESPONSE_LENGTH = 10 * 1024 * 1024; // 10 MiB
private static final long DEFAULT_MAX_RESPONSE_LENGTH =
getLong("defaultMaxResponseLength",
DEFAULT_DEFAULT_MAX_RESPONSE_LENGTH,
value -> value >= 0);
private static final long DEFAULT_DEFAULT_REQUEST_TIMEOUT_MILLIS = 10 * 1000; // 10 seconds
private static final long DEFAULT_REQUEST_TIMEOUT_MILLIS =
getLong("defaultRequestTimeoutMillis",
DEFAULT_DEFAULT_REQUEST_TIMEOUT_MILLIS,
value -> value >= 0);
// Use slightly greater value than the default request timeout so that clients have a higher chance of
// getting proper 503 Service Unavailable response when server-side timeout occurs.
private static final long DEFAULT_DEFAULT_RESPONSE_TIMEOUT_MILLIS = 15 * 1000; // 15 seconds
private static final long DEFAULT_RESPONSE_TIMEOUT_MILLIS =
getLong("defaultResponseTimeoutMillis",
DEFAULT_DEFAULT_RESPONSE_TIMEOUT_MILLIS,
value -> value >= 0);
private static final long DEFAULT_DEFAULT_CONNECT_TIMEOUT_MILLIS = 3200; // 3.2 seconds
private static final long DEFAULT_CONNECT_TIMEOUT_MILLIS =
getLong("defaultConnectTimeoutMillis",
DEFAULT_DEFAULT_CONNECT_TIMEOUT_MILLIS,
value -> value > 0);
private static final long DEFAULT_DEFAULT_WRITE_TIMEOUT_MILLIS = 1000; // 1 second
private static final long DEFAULT_WRITE_TIMEOUT_MILLIS =
getLong("defaultWriteTimeoutMillis",
DEFAULT_DEFAULT_WRITE_TIMEOUT_MILLIS,
value -> value >= 0);
// Use slightly greater value than the client-side default so that clients close the connection more often.
private static final long DEFAULT_DEFAULT_SERVER_IDLE_TIMEOUT_MILLIS = 15000; // 15 seconds
private static final long DEFAULT_SERVER_IDLE_TIMEOUT_MILLIS =
getLong("defaultServerIdleTimeoutMillis",
DEFAULT_DEFAULT_SERVER_IDLE_TIMEOUT_MILLIS,
value -> value >= 0);
private static final long DEFAULT_DEFAULT_CLIENT_IDLE_TIMEOUT_MILLIS = 10000; // 10 seconds
private static final long DEFAULT_CLIENT_IDLE_TIMEOUT_MILLIS =
getLong("defaultClientIdleTimeoutMillis",
DEFAULT_DEFAULT_CLIENT_IDLE_TIMEOUT_MILLIS,
value -> value >= 0);
private static final long DEFAULT_DEFAULT_PING_INTERVAL_MILLIS = 0; // Disabled
private static final long DEFAULT_PING_INTERVAL_MILLIS =
getLong("defaultPingIntervalMillis",
DEFAULT_DEFAULT_PING_INTERVAL_MILLIS,
value -> value >= 0);
private static final int DEFAULT_DEFAULT_HTTP2_INITIAL_CONNECTION_WINDOW_SIZE = 1024 * 1024; // 1MiB
private static final int DEFAULT_HTTP2_INITIAL_CONNECTION_WINDOW_SIZE =
getInt("defaultHttp2InitialConnectionWindowSize",
DEFAULT_DEFAULT_HTTP2_INITIAL_CONNECTION_WINDOW_SIZE,
value -> value > 0);
private static final int DEFAULT_DEFAULT_HTTP2_INITIAL_STREAM_WINDOW_SIZE = 1024 * 1024; // 1MiB
private static final int DEFAULT_HTTP2_INITIAL_STREAM_WINDOW_SIZE =
getInt("defaultHttp2InitialStreamWindowSize",
DEFAULT_DEFAULT_HTTP2_INITIAL_STREAM_WINDOW_SIZE,
value -> value > 0);
private static final int DEFAULT_DEFAULT_HTTP2_MAX_FRAME_SIZE = 16384; // From HTTP/2 specification
private static final int DEFAULT_HTTP2_MAX_FRAME_SIZE =
getInt("defaultHttp2MaxFrameSize",
DEFAULT_DEFAULT_HTTP2_MAX_FRAME_SIZE,
value -> value >= Http2CodecUtil.MAX_FRAME_SIZE_LOWER_BOUND &&
value <= Http2CodecUtil.MAX_FRAME_SIZE_UPPER_BOUND);
// Can't use 0xFFFFFFFFL because some implementations use a signed 32-bit integer to store HTTP/2 SETTINGS
// parameter values, thus anything greater than 0x7FFFFFFF will break them or make them unhappy.
private static final long DEFAULT_DEFAULT_HTTP2_MAX_STREAMS_PER_CONNECTION = Integer.MAX_VALUE;
private static final long DEFAULT_HTTP2_MAX_STREAMS_PER_CONNECTION =
getLong("defaultHttp2MaxStreamsPerConnection",
DEFAULT_DEFAULT_HTTP2_MAX_STREAMS_PER_CONNECTION,
value -> value > 0 && value <= 0xFFFFFFFFL);
// from Netty default maxHeaderSize
private static final long DEFAULT_DEFAULT_HTTP2_MAX_HEADER_LIST_SIZE = 8192;
private static final long DEFAULT_HTTP2_MAX_HEADER_LIST_SIZE =
getLong("defaultHttp2MaxHeaderListSize",
DEFAULT_DEFAULT_HTTP2_MAX_HEADER_LIST_SIZE,
value -> value > 0 && value <= 0xFFFFFFFFL);
private static final int DEFAULT_DEFAULT_HTTP1_MAX_INITIAL_LINE_LENGTH = 4096; // from Netty
private static final int DEFAULT_MAX_HTTP1_INITIAL_LINE_LENGTH =
getInt("defaultHttp1MaxInitialLineLength",
DEFAULT_DEFAULT_HTTP1_MAX_INITIAL_LINE_LENGTH,
value -> value >= 0);
private static final int DEFAULT_DEFAULT_HTTP1_MAX_HEADER_SIZE = 8192; // from Netty
private static final int DEFAULT_MAX_HTTP1_HEADER_SIZE =
getInt("defaultHttp1MaxHeaderSize",
DEFAULT_DEFAULT_HTTP1_MAX_HEADER_SIZE,
value -> value >= 0);
private static final int DEFAULT_DEFAULT_HTTP1_MAX_CHUNK_SIZE = 8192; // from Netty
private static final int DEFAULT_HTTP1_MAX_CHUNK_SIZE =
getInt("defaultHttp1MaxChunkSize",
DEFAULT_DEFAULT_HTTP1_MAX_CHUNK_SIZE,
value -> value >= 0);
private static final boolean DEFAULT_USE_HTTP2_PREFACE = getBoolean("defaultUseHttp2Preface", true);
private static final boolean DEFAULT_USE_HTTP1_PIPELINING = getBoolean("defaultUseHttp1Pipelining", false);
private static final String DEFAULT_DEFAULT_BACKOFF_SPEC =
"exponential=200:10000,jitter=0.2";
private static final String DEFAULT_BACKOFF_SPEC =
getNormalized("defaultBackoffSpec", DEFAULT_DEFAULT_BACKOFF_SPEC, value -> {
try {
Backoff.of(value);
return true;
} catch (Exception e) {
// Invalid backoff specification
return false;
}
});
private static final int DEFAULT_DEFAULT_MAX_TOTAL_ATTEMPTS = 10;
private static final int DEFAULT_MAX_TOTAL_ATTEMPTS =
getInt("defaultMaxTotalAttempts",
DEFAULT_DEFAULT_MAX_TOTAL_ATTEMPTS,
value -> value > 0);
private static final String DEFAULT_ROUTE_CACHE_SPEC = "maximumSize=4096";
@Nullable
private static final String ROUTE_CACHE_SPEC =
caffeineSpec("routeCache", DEFAULT_ROUTE_CACHE_SPEC);
private static final String DEFAULT_ROUTE_DECORATOR_CACHE_SPEC = "maximumSize=4096";
@Nullable
private static final String ROUTE_DECORATOR_CACHE_SPEC =
caffeineSpec("routeDecoratorCache", DEFAULT_ROUTE_DECORATOR_CACHE_SPEC);
private static final String DEFAULT_COMPOSITE_SERVICE_CACHE_SPEC = "maximumSize=256";
@Nullable
private static final String COMPOSITE_SERVICE_CACHE_SPEC =
caffeineSpec("compositeServiceCache", DEFAULT_COMPOSITE_SERVICE_CACHE_SPEC);
private static final String DEFAULT_PARSED_PATH_CACHE_SPEC = "maximumSize=4096";
@Nullable
private static final String PARSED_PATH_CACHE_SPEC =
caffeineSpec("parsedPathCache", DEFAULT_PARSED_PATH_CACHE_SPEC);
private static final String DEFAULT_HEADER_VALUE_CACHE_SPEC = "maximumSize=4096";
@Nullable
private static final String HEADER_VALUE_CACHE_SPEC =
caffeineSpec("headerValueCache", DEFAULT_HEADER_VALUE_CACHE_SPEC);
private static final String DEFAULT_FILE_SERVICE_CACHE_SPEC = "maximumSize=1024";
@Nullable
private static final String FILE_SERVICE_CACHE_SPEC =
caffeineSpec("fileServiceCache", DEFAULT_FILE_SERVICE_CACHE_SPEC);
private static final String DEFAULT_CACHED_HEADERS =
":authority,:scheme,:method,accept-encoding,content-type";
private static final List<String> CACHED_HEADERS =
CSV_SPLITTER.splitToList(getNormalized(
"cachedHeaders", DEFAULT_CACHED_HEADERS, CharMatcher.ascii()::matchesAllOf));
private static final String DEFAULT_ANNOTATED_SERVICE_EXCEPTION_VERBOSITY = "unhandled";
private static final ExceptionVerbosity ANNOTATED_SERVICE_EXCEPTION_VERBOSITY =
exceptionLoggingMode("annotatedServiceExceptionVerbosity",
DEFAULT_ANNOTATED_SERVICE_EXCEPTION_VERBOSITY);
private static final boolean USE_JDK_DNS_RESOLVER = getBoolean("useJdkDnsResolver", false);
private static final boolean REPORT_BLOCKED_EVENT_LOOP =
getBoolean("reportBlockedEventLoop", true);
private static final boolean VALIDATE_HEADERS = getBoolean("validateHeaders", true);
private static final boolean USE_LEGACY_METER_NAMES = getBoolean("useLegacyMeterNames", false);
static {
if (!isEpollAvailable()) {
final Throwable cause = Epoll.unavailabilityCause();
if (cause != null) {
logger.info("/dev/epoll not available: {}", Exceptions.peel(cause).toString());
} else {
if (HAS_WSLENV) {
logger.info("/dev/epoll not available: WSL not supported");
} else {
logger.info("/dev/epoll not available: ?");
}
}
} else if (USE_EPOLL) {
logger.info("Using /dev/epoll");
}
}
private static boolean isEpollAvailable() {
if (SystemInfo.isLinux()) {
// Netty epoll transport does not work with WSL (Windows Sybsystem for Linux) yet.
// TODO(trustin): Re-enable on WSL if https://github.com/Microsoft/WSL/issues/1982 is resolved.
return Epoll.isAvailable() && !HAS_WSLENV;
}
return false;
}
/**
* Returns the {@link Sampler} that determines whether to retain the stack trace of the exceptions
* that are thrown frequently by Armeria.
*
* @see #verboseExceptionSamplerSpec()
*/
public static Sampler<Class<? extends Throwable>> verboseExceptionSampler() {
return VERBOSE_EXCEPTION_SAMPLER;
}
/**
* Returns the specification string of the {@link Sampler} that determines whether to retain the stack
* trace of the exceptions that are thrown frequently by Armeria. A sampled exception will have the stack
* trace while the others will have an empty stack trace to eliminate the cost of capturing the stack
* trace.
*
* <p>The default value of this flag is {@value #DEFAULT_VERBOSE_EXCEPTION_SAMPLER_SPEC}, which retains
* the stack trace of the exceptions at the maximum rate of 10 exceptions/sec.
* Specify the {@code -Dcom.linecorp.armeria.verboseExceptions=<specification>} JVM option to override
* the default. See {@link Sampler#of(String)} for the specification string format.</p>
*/
public static String verboseExceptionSamplerSpec() {
// XXX(trustin): Is it worth allowing to specify different specs for different exception types?
return VERBOSE_EXCEPTION_SAMPLER_SPEC;
}
/**
* Returns whether to log the socket exceptions which are mostly harmless. If enabled, the following
* exceptions will be logged:
* <ul>
* <li>{@link ClosedChannelException}</li>
* <li>{@link ClosedSessionException}</li>
* <li>{@link IOException} - 'Connection reset/closed/aborted by peer'</li>
* <li>'Broken pipe'</li>
* <li>{@link Http2Exception} - 'Stream closed'</li>
* <li>{@link SSLException} - 'SSLEngine closed already'</li>
* </ul>
*
* <p>It is recommended to keep this flag disabled, because it increases the amount of log messages for
* the errors you usually do not have control over, e.g. unexpected socket disconnection due to network
* or remote peer issues.</p>
*
* <p>This flag is disabled by default.
* Specify the {@code -Dcom.linecorp.armeria.verboseSocketExceptions=true} JVM option to enable it.</p>
*
* @see Exceptions#isExpected(Throwable)
*/
public static boolean verboseSocketExceptions() {
return VERBOSE_SOCKET_EXCEPTIONS;
}
/**
* Returns whether the verbose response mode is enabled. When enabled, the server responses will contain
* the exception type and its full stack trace, which may be useful for debugging while potentially
* insecure. When disabled, the server responses will not expose such server-side details to the client.
*
* <p>This flag is disabled by default. Specify the {@code -Dcom.linecorp.armeria.verboseResponses=true}
* JVM option or use {@link ServerBuilder#verboseResponses(boolean)} to enable it.
*/
public static boolean verboseResponses() {
return VERBOSE_RESPONSES;
}
/**
* Returns the fully qualified class name of {@link RequestContextStorageProvider} that is used to choose
* when multiple {@link RequestContextStorageProvider}s exist.
*
* <p>The default value of this flag is {@code null}, which means only one
* {@link RequestContextStorageProvider} must be found via Java SPI. If there are more than one,
* you must specify the {@code -Dcom.linecorp.armeria.requestContextStorageProvider=<FQCN>} JVM option to
* choose the {@link RequestContextStorageProvider}.
*/
@Nullable
public static String requestContextStorageProvider() {
return REQUEST_CONTEXT_STORAGE_PROVIDER;
}
/**
* Returns whether the JNI-based {@code /dev/epoll} socket I/O is enabled. When enabled on Linux, Armeria
* uses {@code /dev/epoll} directly for socket I/O. When disabled, {@code java.nio} socket API is used
* instead.
*
* <p>This flag is enabled by default for supported platforms. Specify the
* {@code -Dcom.linecorp.armeria.useEpoll=false} JVM option to disable it.
*/
public static boolean useEpoll() {
return USE_EPOLL;
}
/**
* Returns whether the JNI-based TLS support with OpenSSL is enabled. When enabled, Armeria uses OpenSSL
* for processing TLS connections. When disabled, the current JVM's default {@link SSLEngine} is used
* instead.
*
* <p>This flag is enabled by default for supported platforms. Specify the
* {@code -Dcom.linecorp.armeria.useOpenSsl=false} JVM option to disable it.
*/
public static boolean useOpenSsl() {
if (useOpenSsl != null) {
return useOpenSsl;
}
setUseOpenSslAndDumpOpenSslInfo();
return useOpenSsl;
}
private static void setUseOpenSslAndDumpOpenSslInfo() {
final boolean useOpenSsl = getBoolean("useOpenSsl", true);
if (!useOpenSsl) {
// OpenSSL explicitly disabled
Flags.useOpenSsl = false;
dumpOpenSslInfo = false;
return;
}
if (!OpenSsl.isAvailable()) {
final Throwable cause = Exceptions.peel(OpenSsl.unavailabilityCause());
logger.info("OpenSSL not available: {}", cause.toString());
Flags.useOpenSsl = false;
dumpOpenSslInfo = false;
return;
}
Flags.useOpenSsl = true;
logger.info("Using OpenSSL: {}, 0x{}", OpenSsl.versionString(),
Long.toHexString(OpenSsl.version() & 0xFFFFFFFFL));
dumpOpenSslInfo = getBoolean("dumpOpenSslInfo", false);
if (dumpOpenSslInfo) {
final SSLEngine engine = SslContextUtil.createSslContext(
SslContextBuilder::forClient,
false,
ImmutableList.of()).newEngine(ByteBufAllocator.DEFAULT);
logger.info("All available SSL protocols: {}",
ImmutableList.copyOf(engine.getSupportedProtocols()));
logger.info("Default enabled SSL protocols: {}", SslContextUtil.DEFAULT_PROTOCOLS);
ReferenceCountUtil.release(engine);
logger.info("All available SSL ciphers: {}", OpenSsl.availableJavaCipherSuites());
logger.info("Default enabled SSL ciphers: {}", SslContextUtil.DEFAULT_CIPHERS);
}
}
/**
* Returns whether information about the OpenSSL environment should be dumped when first starting the
* application, including supported ciphers.
*
* <p>This flag is disabled by default. Specify the {@code -Dcom.linecorp.armeria.dumpOpenSslInfo=true} JVM
* option to enable it.
*
* <p>If {@link #useOpenSsl()} returns {@code false}, this also returns {@code false} no matter you
* specified the JVM option.
*/
public static boolean dumpOpenSslInfo() {
if (dumpOpenSslInfo != null) {
return dumpOpenSslInfo;
}
setUseOpenSslAndDumpOpenSslInfo();
return dumpOpenSslInfo;
}
/**
* Returns the default server-side maximum number of connections.
*
* <p>The default value of this flag is {@value #DEFAULT_MAX_NUM_CONNECTIONS}. Specify the
* {@code -Dcom.linecorp.armeria.maxNumConnections=<integer>} JVM option to override
* the default value.
*/
public static int maxNumConnections() {
return MAX_NUM_CONNECTIONS;
}
/**
* Returns the default number of {@linkplain CommonPools#workerGroup() common worker group} threads.
* Note that this value has effect only if a user did not specify a worker group.
*
* <p>The default value of this flag is {@code 2 * <numCpuCores>}. Specify the
* {@code -Dcom.linecorp.armeria.numCommonWorkers=<integer>} JVM option to override the default value.
*/
public static int numCommonWorkers() {
return NUM_COMMON_WORKERS;
}
/**
* Returns the default number of {@linkplain CommonPools#blockingTaskExecutor() blocking task executor}
* threads. Note that this value has effect only if a user did not specify a blocking task executor.
*
* <p>The default value of this flag is {@value #DEFAULT_NUM_COMMON_BLOCKING_TASK_THREADS}. Specify the
* {@code -Dcom.linecorp.armeria.numCommonBlockingTaskThreads=<integer>} JVM option to override
* the default value.
*/
public static int numCommonBlockingTaskThreads() {
return NUM_COMMON_BLOCKING_TASK_THREADS;
}
/**
* Returns the default server-side maximum length of a request. Note that this value has effect
* only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_MAX_REQUEST_LENGTH}. Specify the
* {@code -Dcom.linecorp.armeria.defaultMaxRequestLength=<long>} to override the default value.
* {@code 0} disables the length limit.
*/
public static long defaultMaxRequestLength() {
return DEFAULT_MAX_REQUEST_LENGTH;
}
/**
* Returns the default client-side maximum length of a response. Note that this value has effect
* only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_MAX_RESPONSE_LENGTH}. Specify the
* {@code -Dcom.linecorp.armeria.defaultMaxResponseLength=<long>} to override the default value.
* {@code 0} disables the length limit.
*/
public static long defaultMaxResponseLength() {
return DEFAULT_MAX_RESPONSE_LENGTH;
}
/**
* Returns the default server-side timeout of a request in milliseconds. Note that this value has effect
* only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_REQUEST_TIMEOUT_MILLIS}.
* Specify the {@code -Dcom.linecorp.armeria.defaultRequestTimeoutMillis=<long>} to override
* the default value. {@code 0} disables the timeout.
*/
public static long defaultRequestTimeoutMillis() {
return DEFAULT_REQUEST_TIMEOUT_MILLIS;
}
/**
* Returns the default client-side timeout of a response in milliseconds. Note that this value has effect
* only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_RESPONSE_TIMEOUT_MILLIS}.
* Specify the {@code -Dcom.linecorp.armeria.defaultResponseTimeoutMillis=<long>} to override
* the default value. {@code 0} disables the timeout.
*/
public static long defaultResponseTimeoutMillis() {
return DEFAULT_RESPONSE_TIMEOUT_MILLIS;
}
/**
* Returns the default client-side timeout of a socket connection attempt in milliseconds.
* Note that this value has effect only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_CONNECT_TIMEOUT_MILLIS}. Specify the
* {@code -Dcom.linecorp.armeria.defaultConnectTimeoutMillis=<integer>} JVM option to override
* the default value.
*/
public static long defaultConnectTimeoutMillis() {
return DEFAULT_CONNECT_TIMEOUT_MILLIS;
}
/**
* Returns the default client-side timeout of a socket write attempt in milliseconds.
* Note that this value has effect only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_WRITE_TIMEOUT_MILLIS}. Specify the
* {@code -Dcom.linecorp.armeria.defaultWriteTimeoutMillis=<integer>} JVM option to override
* the default value. {@code 0} disables the timeout.
*/
public static long defaultWriteTimeoutMillis() {
return DEFAULT_WRITE_TIMEOUT_MILLIS;
}
/**
* Returns the default server-side idle timeout of a connection for keep-alive in milliseconds.
* Note that this value has effect only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_SERVER_IDLE_TIMEOUT_MILLIS}. Specify the
* {@code -Dcom.linecorp.armeria.defaultServerIdleTimeoutMillis=<integer>} JVM option to override
* the default value.
*/
public static long defaultServerIdleTimeoutMillis() {
return DEFAULT_SERVER_IDLE_TIMEOUT_MILLIS;
}
/**
* Returns the default client-side idle timeout of a connection for keep-alive in milliseconds.
* Note that this value has effect only if a user did not specify it.
*
* <p>This default value of this flag is {@value #DEFAULT_DEFAULT_CLIENT_IDLE_TIMEOUT_MILLIS}. Specify the
* {@code -Dcom.linecorp.armeria.defaultClientIdleTimeoutMillis=<integer>} JVM option to override
* the default value.
*/
public static long defaultClientIdleTimeoutMillis() {
return DEFAULT_CLIENT_IDLE_TIMEOUT_MILLIS;
}
/**
* Returns the default maximum length of an HTTP/1 response initial line.
* Note that this value has effect only if a user did not specify it.
*
* <p>This default value of this flag is {@value #DEFAULT_DEFAULT_HTTP1_MAX_INITIAL_LINE_LENGTH}.
* Specify the {@code -Dcom.linecorp.armeria.defaultHttp1MaxInitialLineLength=<integer>} JVM option
* to override the default value.
*/
public static int defaultHttp1MaxInitialLineLength() {
return DEFAULT_MAX_HTTP1_INITIAL_LINE_LENGTH;
}
/**
* Returns the default maximum length of all headers in an HTTP/1 response.
* Note that this value has effect only if a user did not specify it.
*
* <p>This default value of this flag is {@value #DEFAULT_DEFAULT_HTTP1_MAX_HEADER_SIZE}.
* Specify the {@code -Dcom.linecorp.armeria.defaultHttp1MaxHeaderSize=<integer>} JVM option
* to override the default value.
*/
public static int defaultHttp1MaxHeaderSize() {
return DEFAULT_MAX_HTTP1_HEADER_SIZE;
}
/**
* Returns the default maximum length of each chunk in an HTTP/1 response content.
* The content or a chunk longer than this value will be split into smaller chunks
* so that their lengths never exceed it.
* Note that this value has effect only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_HTTP1_MAX_CHUNK_SIZE}.
* Specify the {@code -Dcom.linecorp.armeria.defaultHttp1MaxChunkSize=<integer>} JVM option
* to override the default value.
*/
public static int defaultHttp1MaxChunkSize() {
return DEFAULT_HTTP1_MAX_CHUNK_SIZE;
}
/**
* Returns the default value of the {@link ClientFactoryBuilder#useHttp2Preface(boolean)} option.
* Note that this value has effect only if a user did not specify it.
*
* <p>This flag is enabled by default. Specify the
* {@code -Dcom.linecorp.armeria.defaultUseHttp2Preface=false} JVM option to disable it.
*/
public static boolean defaultUseHttp2Preface() {
return DEFAULT_USE_HTTP2_PREFACE;
}
/**
* Returns the default value of the {@link ClientFactoryBuilder#useHttp1Pipelining(boolean)} option.
* Note that this value has effect only if a user did not specify it.
*
* <p>This flag is disabled by default. Specify the
* {@code -Dcom.linecorp.armeria.defaultUseHttp1Pipelining=true} JVM option to enable it.
*/
public static boolean defaultUseHttp1Pipelining() {
return DEFAULT_USE_HTTP1_PIPELINING;
}
/**
* Returns the default value for the PING interval.
* A <a href="https://httpwg.org/specs/rfc7540.html#PING">PING</a> frame
* is sent for HTTP/2 server and client or
* an <a herf="https://tools.ietf.org/html/rfc7231#section-4.3.7">OPTIONS</a> request with an asterisk ("*")
* is sent for HTTP/1 client.
*
* <p>Note that this flag is only in effect when {@link #defaultServerIdleTimeoutMillis()} for server and
* {@link #defaultClientIdleTimeoutMillis()} for client are greater than the value of this flag.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_PING_INTERVAL_MILLIS} milliseconds.
* Specify the {@code -Dcom.linecorp.armeria.defaultPingIntervalMillis=<integer>} JVM option to override
* the default value. If the specified value was smaller than 10 seconds, bumps PING interval to 10 seconds.
*/
public static long defaultPingIntervalMillis() {
return DEFAULT_PING_INTERVAL_MILLIS;
}
/**
* Returns the default value of the {@link ServerBuilder#http2InitialConnectionWindowSize(int)} and
* {@link ClientFactoryBuilder#http2InitialConnectionWindowSize(int)} option.
* Note that this value has effect only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_HTTP2_INITIAL_CONNECTION_WINDOW_SIZE}.
* Specify the {@code -Dcom.linecorp.armeria.defaultHttp2InitialConnectionWindowSize=<integer>} JVM option
* to override the default value.
*/
public static int defaultHttp2InitialConnectionWindowSize() {
return DEFAULT_HTTP2_INITIAL_CONNECTION_WINDOW_SIZE;
}
/**
* Returns the default value of the {@link ServerBuilder#http2InitialStreamWindowSize(int)} and
* {@link ClientFactoryBuilder#http2InitialStreamWindowSize(int)} option.
* Note that this value has effect only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_HTTP2_INITIAL_STREAM_WINDOW_SIZE}.
* Specify the {@code -Dcom.linecorp.armeria.defaultHttp2InitialStreamWindowSize=<integer>} JVM option
* to override the default value.
*/
public static int defaultHttp2InitialStreamWindowSize() {
return DEFAULT_HTTP2_INITIAL_STREAM_WINDOW_SIZE;
}
/**
* Returns the default value of the {@link ServerBuilder#http2MaxFrameSize(int)} and
* {@link ClientFactoryBuilder#http2MaxFrameSize(int)} option.
* Note that this value has effect only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_HTTP2_MAX_FRAME_SIZE}.
* Specify the {@code -Dcom.linecorp.armeria.defaultHttp2MaxFrameSize=<integer>} JVM option
* to override the default value.
*/
public static int defaultHttp2MaxFrameSize() {
return DEFAULT_HTTP2_MAX_FRAME_SIZE;
}
/**
* Returns the default value of the {@link ServerBuilder#http2MaxStreamsPerConnection(long)} option.
* Note that this value has effect only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_HTTP2_MAX_STREAMS_PER_CONNECTION}.
* Specify the {@code -Dcom.linecorp.armeria.defaultHttp2MaxStreamsPerConnection=<integer>} JVM option
* to override the default value.
*/
public static long defaultHttp2MaxStreamsPerConnection() {
return DEFAULT_HTTP2_MAX_STREAMS_PER_CONNECTION;
}
/**
* Returns the default value of the {@link ServerBuilder#http2MaxHeaderListSize(long)} and
* {@link ClientFactoryBuilder#http2MaxHeaderListSize(long)} option.
* Note that this value has effect only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_HTTP2_MAX_HEADER_LIST_SIZE}.
* Specify the {@code -Dcom.linecorp.armeria.defaultHttp2MaxHeaderListSize=<integer>} JVM option
* to override the default value.
*/
public static long defaultHttp2MaxHeaderListSize() {
return DEFAULT_HTTP2_MAX_HEADER_LIST_SIZE;
}
/**
* Returns the default value of the {@code backoffSpec} parameter when instantiating a {@link Backoff}
* using {@link Backoff#of(String)}. Note that this value has effect only if a user did not specify the
* {@code defaultBackoffSpec} in the constructor call.
*
* <p>The default value of this flag is {@value DEFAULT_DEFAULT_BACKOFF_SPEC}. Specify the
* {@code -Dcom.linecorp.armeria.defaultBackoffSpec=<spec>} JVM option to override the default value.
*/
public static String defaultBackoffSpec() {
return DEFAULT_BACKOFF_SPEC;
}
/**
* Returns the default maximum number of total attempts. Note that this value has effect only if a user
* did not specify it when creating a {@link RetryingClient} or a {@link RetryingRpcClient}.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_MAX_TOTAL_ATTEMPTS}. Specify the
* {@code -Dcom.linecorp.armeria.defaultMaxTotalAttempts=<integer>} JVM option to
* override the default value.
*/
public static int defaultMaxTotalAttempts() {
return DEFAULT_MAX_TOTAL_ATTEMPTS;
}
/**
* Returns the value of the {@code routeCache} parameter. It would be used to create a Caffeine
* {@link Cache} instance using {@link CaffeineSpec} for routing a request. The {@link Cache}
* would hold the mappings of {@link RoutingContext} and the designated {@link ServiceConfig}
* for a request to improve server performance.
*
* <p>The default value of this flag is {@value DEFAULT_ROUTE_CACHE_SPEC}. Specify the
* {@code -Dcom.linecorp.armeria.routeCache=<spec>} JVM option to override the default value.
* For example, {@code -Dcom.linecorp.armeria.routeCache=maximumSize=4096,expireAfterAccess=600s}.
* Also, specify {@code -Dcom.linecorp.armeria.routeCache=off} JVM option to disable it.
*/
@Nullable
public static String routeCacheSpec() {
return ROUTE_CACHE_SPEC;
}
/**
* Returns the value of the {@code routeDecoratorCache} parameter. It would be used to create a Caffeine
* {@link Cache} instance using {@link CaffeineSpec} for mapping a route to decorator.
* The {@link Cache} would hold the mappings of {@link RoutingContext} and the designated
* dispatcher {@link Service}s for a request to improve server performance.
*
* <p>The default value of this flag is {@value DEFAULT_ROUTE_DECORATOR_CACHE_SPEC}. Specify the
* {@code -Dcom.linecorp.armeria.routeDecoratorCache=<spec>} JVM option to override the default value.
* For example, {@code -Dcom.linecorp.armeria.routeDecoratorCache=maximumSize=4096,expireAfterAccess=600s}.
* Also, specify {@code -Dcom.linecorp.armeria.routeDecoratorCache=off} JVM option to disable it.
*/
@Nullable
public static String routeDecoratorCacheSpec() {
return ROUTE_DECORATOR_CACHE_SPEC;
}
/**
* Returns the value of the {@code parsedPathCache} parameter. It would be used to create a Caffeine
* {@link Cache} instance using {@link CaffeineSpec} for mapping raw HTTP paths to parsed pair of
* path and query, after validation.
*
* <p>The default value of this flag is {@value DEFAULT_PARSED_PATH_CACHE_SPEC}. Specify the
* {@code -Dcom.linecorp.armeria.parsedPathCache=<spec>} JVM option to override the default value.
* For example, {@code -Dcom.linecorp.armeria.parsedPathCache=maximumSize=4096,expireAfterAccess=600s}.
* Also, specify {@code -Dcom.linecorp.armeria.parsedPathCache=off} JVM option to disable it.
*/
@Nullable
public static String parsedPathCacheSpec() {
return PARSED_PATH_CACHE_SPEC;
}
/**
* Returns the value of the {@code headerValueCache} parameter. It would be used to create a Caffeine
* {@link Cache} instance using {@link CaffeineSpec} for mapping raw HTTP ASCII header values to
* {@link String}.
*
* <p>The default value of this flag is {@value DEFAULT_HEADER_VALUE_CACHE_SPEC}. Specify the
* {@code -Dcom.linecorp.armeria.headerValueCache=<spec>} JVM option to override the default value.
* For example, {@code -Dcom.linecorp.armeria.headerValueCache=maximumSize=4096,expireAfterAccess=600s}.
* Also, specify {@code -Dcom.linecorp.armeria.headerValueCache=off} JVM option to disable it.
*/
@Nullable
public static String headerValueCacheSpec() {
return HEADER_VALUE_CACHE_SPEC;
}
/**
* Returns the value of the {@code fileServiceCache} parameter. It would be used to create a Caffeine
* {@link Cache} instance using {@link CaffeineSpec} for caching file entries.
*
* <p>The default value of this flag is {@value DEFAULT_FILE_SERVICE_CACHE_SPEC}. Specify the
* {@code -Dcom.linecorp.armeria.fileServiceCache=<spec>} JVM option to override the default value.
* For example, {@code -Dcom.linecorp.armeria.fileServiceCache=maximumSize=1024,expireAfterAccess=600s}.
* Also, specify {@code -Dcom.linecorp.armeria.fileServiceCache=off} JVM option to disable it.
*/
@Nullable
public static String fileServiceCacheSpec() {
return FILE_SERVICE_CACHE_SPEC;
}
/**
* Returns the value of the {@code cachedHeaders} parameter which contains a comma-separated list of
* headers whose values are cached using {@code headerValueCache}.
*
* <p>The default value of this flag is {@value DEFAULT_CACHED_HEADERS}. Specify the
* {@code -Dcom.linecorp.armeria.cachedHeaders=<csv>} JVM option to override the default value.
*/
public static List<String> cachedHeaders() {
return CACHED_HEADERS;
}
/**
* Returns the value of the {@code compositeServiceCache} parameter. It would be used to create a
* Caffeine {@link Cache} instance using {@link CaffeineSpec} for routing a request.
* The {@link Cache} would hold the mappings of {@link RoutingContext} and the designated
* {@link ServiceConfig} for a request to improve server performance.
*
* <p>The default value of this flag is {@value DEFAULT_COMPOSITE_SERVICE_CACHE_SPEC}. Specify the
* {@code -Dcom.linecorp.armeria.compositeServiceCache=<spec>} JVM option to override the default value.
* For example, {@code -Dcom.linecorp.armeria.compositeServiceCache=maximumSize=256,expireAfterAccess=600s}.
* Also, specify {@code -Dcom.linecorp.armeria.compositeServiceCache=off} JVM option to disable it.
*/
@Nullable
public static String compositeServiceCacheSpec() {
return COMPOSITE_SERVICE_CACHE_SPEC;
}
/**
* Returns the verbosity of exceptions logged by annotated HTTP services. The value of this property
* is one of the following:
* <ul>
* <li>{@link ExceptionVerbosity#ALL} - logging all exceptions raised from annotated HTTP services</li>
* <li>{@link ExceptionVerbosity#UNHANDLED} - logging exceptions which are not handled by
* {@link ExceptionHandler}s provided by a user and are not well-known exceptions
* <li>{@link ExceptionVerbosity#NONE} - no logging exceptions</li>
* </ul>
* A log message would be written at {@code WARN} level.
*
* <p>The default value of this flag is {@value DEFAULT_ANNOTATED_SERVICE_EXCEPTION_VERBOSITY}.
* Specify the
* {@code -Dcom.linecorp.armeria.annotatedServiceExceptionVerbosity=<all|unhandled|none>} JVM option
* to override the default value.
*
* @see ExceptionVerbosity
*/
public static ExceptionVerbosity annotatedServiceExceptionVerbosity() {
return ANNOTATED_SERVICE_EXCEPTION_VERBOSITY;
}
/**
* Returns the {@link Predicate} that is used to choose the non-loopback IP v4 address in
* {@link SystemInfo#defaultNonLoopbackIpV4Address()}.
*
* <p>The default value of this flag is {@code null}, which means all valid IPv4 addresses are
* preferred. Specify the {@code -Dcom.linecorp.armeria.preferredIpV4Addresses=<csv>} JVM option
* to override the default value. The {@code csv} should be
* <a href="https://tools.ietf.org/html/rfc4632">Classless Inter-domain Routing(CIDR)</a>s or
* exact IP addresses separated by commas. For example,
* {@code -Dcom.linecorp.armeria.preferredIpV4Addresses=211.111.111.111,10.0.0.0/8,192.168.1.0/24}.
*/
@Nullable
public static Predicate<InetAddress> preferredIpV4Addresses() {
return PREFERRED_IP_V4_ADDRESSES;
}
/**
* Enables {@link DefaultAddressResolverGroup} that resolves domain name using JDK's built-in domain name
* lookup mechanism.
* Note that JDK's built-in resolver performs a blocking name lookup from the caller thread, and thus
* this flag should be enabled only when the default asynchronous resolver does not work as expected,
* for example by always throwing a {@link DnsNameResolverTimeoutException}.
*
* <p>This flag is disabled by default.
* Specify the {@code -Dcom.linecorp.armeria.useJdkDnsResolver=true} JVM option
* to enable it.
*/
public static boolean useJdkDnsResolver() {
return USE_JDK_DNS_RESOLVER;
}
/**
* Returns whether {@link CompletableFuture}s returned by Armeria methods log a warning if
* {@link CompletableFuture#join()} or {@link CompletableFuture#get()} are called from an event loop thread.
* Blocking an event loop thread in this manner reduces performance significantly, possibly causing
* deadlocks, so it should be avoided at all costs (e.g. using {@code thenApply()} type methods to execute
* asynchronously or running the logic using {@link ServiceRequestContext#blockingTaskExecutor()}.
*
* <p>This flag is enabled by default.
* Specify the {@code -Dcom.linecorp.armeria.reportBlockedEventLoop=false} JVM option
* to disable it.
*/
public static boolean reportBlockedEventLoop() {
return REPORT_BLOCKED_EVENT_LOOP;
}
/**
* Enables validation of HTTP headers for dangerous characters like newlines - such characters can be used
* for injecting arbitrary content into HTTP responses.
*
* <p><strong>DISCLAIMER:</strong> Do not disable this unless you know what you are doing. It is recommended
* to keep this validation enabled to ensure the sanity of responses. However, you may wish to disable the
* validation to improve performance when you are sure responses are always safe, for example when only
* HTTP/2 is used, or when you populate headers with known values, and have no chance of using untrusted
* ones.
*
* <p>See <a href="https://github.com/line/armeria/security/advisories/GHSA-35fr-h7jr-hh86">CWE-113</a> for
* more details on the security implications of this flag.
*
* <p>This flag is enabled by default.
* Specify the {@code -Dcom.linecorp.armeria.validateHeaders=false} JVM option to disable it.</p>
*/
public static boolean validateHeaders() {
return VALIDATE_HEADERS;
}
/**
* Returns whether to switch back to Armeria's legacy {@link Meter} and {@link Tag} naming convention
* that is not compliant with Micrometer's default {@link NamingConvention}.
*
* <p>This flag is disabled by default. Specify the {@code -Dcom.linecorp.armeria.useLegacyMeterNames=true}
* JVM option to enable it.</p>
*/
public static boolean useLegacyMeterNames() {
return USE_LEGACY_METER_NAMES;
}
@Nullable
private static String caffeineSpec(String name, String defaultValue) {
final String spec = get(name, defaultValue, value -> {
try {
if (!"off".equals(value)) {
CaffeineSpec.parse(value);
}
return true;
} catch (Exception e) {
return false;
}
});
return "off".equals(spec) ? null : spec;
}
private static ExceptionVerbosity exceptionLoggingMode(String name, String defaultValue) {
final String mode = getNormalized(name, defaultValue,
value -> Arrays.stream(ExceptionVerbosity.values())
.anyMatch(v -> v.name().equalsIgnoreCase(value)));
return ExceptionVerbosity.valueOf(mode.toUpperCase());
}
private static boolean getBoolean(String name, boolean defaultValue) {
return getBoolean(name, defaultValue, value -> true);
}
private static boolean getBoolean(String name, boolean defaultValue, Predicate<Boolean> validator) {
return "true".equals(getNormalized(name, String.valueOf(defaultValue), value -> {
if ("true".equals(value)) {
return validator.test(true);
}
if ("false".equals(value)) {
return validator.test(false);
}
return false;
}));
}
private static int getInt(String name, int defaultValue, IntPredicate validator) {
return Integer.parseInt(getNormalized(name, String.valueOf(defaultValue), value -> {
try {
return validator.test(Integer.parseInt(value));
} catch (Exception e) {
// null or non-integer
return false;
}
}));
}
private static long getLong(String name, long defaultValue, LongPredicate validator) {
return Long.parseLong(getNormalized(name, String.valueOf(defaultValue), value -> {
try {
return validator.test(Long.parseLong(value));
} catch (Exception e) {
// null or non-integer
return false;
}
}));
}
private static String get(String name, String defaultValue, Predicate<String> validator) {
final String fullName = PREFIX + name;
final String value = System.getProperty(fullName);
if (value == null) {
logger.info("{}: {} (default)", fullName, defaultValue);
return defaultValue;
}
if (validator.test(value)) {
logger.info("{}: {}", fullName, value);
return value;
}
logger.info("{}: {} (default instead of: {})", fullName, defaultValue, value);
return defaultValue;
}
private static String getNormalized(String name, String defaultValue, Predicate<String> validator) {
final String fullName = PREFIX + name;
final String value = getLowerCased(fullName);
if (value == null) {
logger.info("{}: {} (default)", fullName, defaultValue);
return defaultValue;
}
if (validator.test(value)) {
logger.info("{}: {}", fullName, value);
return value;
}
logger.info("{}: {} (default instead of: {})", fullName, defaultValue, value);
return defaultValue;
}
@Nullable
private static String getLowerCased(String fullName) {
String value = System.getProperty(fullName);
if (value != null) {
value = Ascii.toLowerCase(value);
}
return value;
}
private Flags() {}
}
|
[
"\"WSLENV\""
] |
[] |
[
"WSLENV"
] |
[]
|
["WSLENV"]
|
java
| 1 | 0 | |
noxfile.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# type: ignore
import copy
import os
import platform
from dataclasses import dataclass
from pathlib import Path
from typing import List
import nox
from nox.logger import logger
BASE = os.path.abspath(os.path.dirname(__file__))
DEFAULT_PYTHON_VERSIONS = ["3.6", "3.7", "3.8"]
DEFAULT_OS_NAMES = ["Linux", "MacOS", "Windows"]
PYTHON_VERSIONS = os.environ.get(
"NOX_PYTHON_VERSIONS", ",".join(DEFAULT_PYTHON_VERSIONS)
).split(",")
INSTALL_EDITABLE_MODE = os.environ.get("INSTALL_EDITABLE_MODE", 0)
INSTALL_COMMAND = (
["pip", "install", "-e"] if INSTALL_EDITABLE_MODE else ["pip", "install"]
)
# Allow limiting testing to specific plugins
# The list ['ALL'] indicates all plugins
PLUGINS = os.environ.get("PLUGINS", "ALL").split(",")
SKIP_CORE_TESTS = "0"
SKIP_CORE_TESTS = os.environ.get("SKIP_CORE_TESTS", SKIP_CORE_TESTS) != "0"
FIX = os.environ.get("FIX", "0") == "1"
VERBOSE = os.environ.get("VERBOSE", "0")
SILENT = VERBOSE == "0"
@dataclass
class Plugin:
name: str
path: str
module: str
def get_current_os() -> str:
current_os = platform.system()
if current_os == "Darwin":
current_os = "MacOS"
return current_os
print(f"Operating system\t:\t{get_current_os()}")
print(f"NOX_PYTHON_VERSIONS\t:\t{PYTHON_VERSIONS}")
print(f"PLUGINS\t\t\t:\t{PLUGINS}")
print(f"SKIP_CORE_TESTS\t\t:\t{SKIP_CORE_TESTS}")
print(f"FIX\t\t\t:\t{FIX}")
print(f"VERBOSE\t\t\t:\t{VERBOSE}")
print(f"INSTALL_EDITABLE_MODE\t:\t{INSTALL_EDITABLE_MODE}")
def _upgrade_basic(session):
session.run(
"python",
"-m",
"pip",
"install",
"--upgrade",
"setuptools",
"pip",
silent=SILENT,
)
def find_dirs(path: str):
for file in os.listdir(path):
fullname = os.path.join(path, file)
if os.path.isdir(fullname):
yield fullname
def install_hydra(session, cmd):
# clean install hydra
session.chdir(BASE)
session.run(*cmd, ".", silent=SILENT)
if not SILENT:
session.install("pipdeptree", silent=SILENT)
session.run("pipdeptree", "-p", "hydra-core")
def pytest_args(*args):
ret = ["pytest", "-Werror"]
ret.extend(args)
return ret
def run_pytest(session, directory=".", *args):
pytest_cmd = pytest_args(directory, *args)
# silent=False to enable some output on CI
# (otherwise we risk no-output timeout)
session.run(*pytest_cmd, silent=False)
def get_setup_python_versions(classifiers):
pythons = filter(lambda line: "Programming Language :: Python" in line, classifiers)
return [p[len("Programming Language :: Python :: ") :] for p in pythons]
def get_plugin_os_names(classifiers: List[str]) -> List[str]:
oses = list(filter(lambda line: "Operating System" in line, classifiers))
if len(oses) == 0:
# No Os is specified so all oses are supported
return DEFAULT_OS_NAMES
if len(oses) == 1 and oses[0] == "Operating System :: OS Independent":
# All oses are supported
return DEFAULT_OS_NAMES
else:
return [p.split("::")[-1].strip() for p in oses]
def select_plugins(session, directory: str) -> List[Plugin]:
"""
Select all plugins that should be tested in this session.
Considers the current Python version and operating systems against the supported ones,
as well as the user plugins selection (via the PLUGINS environment variable).
"""
assert session.python is not None, "Session python version is not specified"
blacklist = [".isort.cfg", "examples"]
plugins = [
{"dir_name": x, "path": x}
for x in sorted(os.listdir(os.path.join(BASE, directory)))
if x not in blacklist
]
ret = []
skipped = []
for plugin in plugins:
if not (plugin["dir_name"] in PLUGINS or PLUGINS == ["ALL"]):
skipped.append(f"Deselecting {plugin['dir_name']}: User request")
continue
setup_py = os.path.join(BASE, directory, plugin["path"], "setup.py")
classifiers = session.run(
"python", setup_py, "--name", "--classifiers", silent=True
).splitlines()
plugin_name = classifiers.pop(0)
plugin_python_versions = get_setup_python_versions(classifiers)
python_supported = session.python in plugin_python_versions
plugin_os_names = get_plugin_os_names(classifiers)
os_supported = get_current_os() in plugin_os_names
if not python_supported:
py_str = ", ".join(plugin_python_versions)
skipped.append(
f"Deselecting {plugin['dir_name']} : Incompatible Python {session.python}. Supports [{py_str}]"
)
continue
# Verify this plugin supports the OS we are testing on, skip otherwise
if not os_supported:
os_str = ", ".join(plugin_os_names)
skipped.append(
f"Deselecting {plugin['dir_name']}: Incompatible OS {get_current_os()}. Supports [{os_str}]"
)
continue
ret.append(
Plugin(
name=plugin_name,
path=plugin["path"],
module="hydra_plugins." + plugin["dir_name"],
)
)
for msg in skipped:
logger.warn(msg)
if len(ret) == 0:
logger.warn("No plugins selected")
return ret
def install_dev_deps(session):
_upgrade_basic(session)
session.run("pip", "install", "-r", "requirements/dev.txt", silent=SILENT)
def _black_cmd():
black = ["black", "."]
if not FIX:
black += ["--check"]
return black
def _isort_cmd():
isort = ["isort", "."]
if not FIX:
isort += ["--check", "--diff"]
return isort
@nox.session(python=PYTHON_VERSIONS)
def lint(session):
install_dev_deps(session)
install_hydra(session, ["pip", "install", "-e"])
apps = _get_standalone_apps_dirs()
session.log("Installing standalone apps")
for subdir in apps:
session.chdir(str(subdir))
session.run(*_black_cmd(), silent=SILENT)
session.run(*_isort_cmd(), silent=SILENT)
session.chdir(BASE)
session.run(*_black_cmd(), silent=SILENT)
skiplist = apps + [
".git",
"website",
"plugins",
"tools",
".nox",
"hydra/grammar/gen",
"tools/configen/example/gen",
"tools/configen/tests/test_modules/expected",
]
isort = _isort_cmd() + [f"--skip={skip}" for skip in skiplist]
session.run(*isort, silent=SILENT)
session.run("mypy", ".", "--strict", silent=SILENT)
session.run("flake8", "--config", ".flake8")
session.run("yamllint", ".")
example_dirs = [
"examples/advanced/",
"examples/configure_hydra",
"examples/patterns",
"examples/instantiate",
"examples/tutorials/basic/your_first_hydra_app",
"examples/tutorials/basic/running_your_hydra_app",
"examples/tutorials/structured_configs/",
]
for edir in example_dirs:
dirs = find_dirs(path=edir)
for d in dirs:
session.run("mypy", d, "--strict", silent=SILENT)
# lint example plugins
lint_plugins_in_dir(session=session, directory="examples/plugins")
# bandit static security analysis
session.run("bandit", "--exclude", "./.nox/**", "-ll", "-r", ".", silent=SILENT)
@nox.session(python=PYTHON_VERSIONS)
def lint_plugins(session):
lint_plugins_in_dir(session, "plugins")
def lint_plugins_in_dir(session, directory: str) -> None:
install_cmd = ["pip", "install", "-e"]
install_hydra(session, install_cmd)
plugins = select_plugins(session=session, directory=directory)
# plugin linting requires the plugins and their dependencies to be installed
for plugin in plugins:
cmd = install_cmd + [os.path.join(directory, plugin.path)]
session.run(*cmd, silent=SILENT)
install_dev_deps(session)
session.run("flake8", "--config", ".flake8", directory)
# Mypy for plugins
for plugin in plugins:
path = os.path.join(directory, plugin.path)
session.chdir(path)
session.run(*_black_cmd(), silent=SILENT)
session.run(*_isort_cmd(), silent=SILENT)
session.chdir(BASE)
files = []
for file in ["tests", "example"]:
abs = os.path.join(path, file)
if os.path.exists(abs):
files.append(abs)
session.run(
"mypy",
"--strict",
f"{path}/hydra_plugins",
"--config-file",
f"{BASE}/.mypy.ini",
silent=SILENT,
)
session.run(
"mypy",
"--strict",
"--namespace-packages",
"--config-file",
f"{BASE}/.mypy.ini",
*files,
silent=SILENT,
)
@nox.session(python=PYTHON_VERSIONS)
def test_tools(session):
install_cmd = ["pip", "install"]
_upgrade_basic(session)
session.install("pytest")
install_hydra(session, install_cmd)
tools = [
x
for x in sorted(os.listdir(os.path.join(BASE, "tools")))
if not os.path.isfile(x)
]
for tool in tools:
tool_path = os.path.join("tools", tool)
session.chdir(BASE)
if (Path(tool_path) / "setup.py").exists():
cmd = list(install_cmd) + ["-e", tool_path]
session.run(*cmd, silent=SILENT)
session.run("pytest", tool_path)
session.chdir(BASE)
def _get_standalone_apps_dirs():
standalone_apps_dir = Path(f"{BASE}/tests/standalone_apps")
apps = [standalone_apps_dir / subdir for subdir in os.listdir(standalone_apps_dir)]
apps.append(f"{BASE}/examples/advanced/hydra_app_example")
return apps
@nox.session(python=PYTHON_VERSIONS)
def test_core(session):
_upgrade_basic(session)
install_hydra(session, INSTALL_COMMAND)
session.install("pytest")
if not SKIP_CORE_TESTS:
run_pytest(session, "build_helpers", "tests", *session.posargs)
else:
session.log("Skipping Hydra core tests")
apps = _get_standalone_apps_dirs()
session.log("Testing standalone apps")
for subdir in apps:
session.chdir(subdir)
session.run(*INSTALL_COMMAND, ".", silent=SILENT)
run_pytest(session, ".")
session.chdir(BASE)
test_plugins_in_directory(
session,
install_cmd=INSTALL_COMMAND,
directory="examples/plugins",
test_hydra_core=False,
)
@nox.session(python=PYTHON_VERSIONS)
def test_plugins(session):
test_plugins_in_directory(
session=session,
install_cmd=INSTALL_COMMAND,
directory="plugins",
test_hydra_core=True,
)
def test_plugins_in_directory(
session, install_cmd, directory: str, test_hydra_core: bool
):
_upgrade_basic(session)
session.install("pytest")
install_hydra(session, install_cmd)
selected_plugin = select_plugins(session=session, directory=directory)
for plugin in selected_plugin:
cmd = list(install_cmd) + [os.path.join(directory, plugin.path)]
session.run(*cmd, silent=SILENT)
if not SILENT:
session.run("pipdeptree", "-p", plugin.name)
# Test that we can import Hydra
session.run("python", "-c", "from hydra import main", silent=SILENT)
# Test that we can import all installed plugins
for plugin in selected_plugin:
session.run("python", "-c", f"import {plugin.module}")
# Run Hydra tests to verify installed plugins did not break anything
if test_hydra_core:
if not SKIP_CORE_TESTS:
run_pytest(session, "tests")
else:
session.log("Skipping Hydra core tests")
# Run tests for all installed plugins
for plugin in selected_plugin:
# install all other plugins that are compatible with the current Python version
session.chdir(os.path.join(BASE, directory, plugin.path))
run_pytest(session)
@nox.session(python="3.8")
def coverage(session):
coverage_env = {
"COVERAGE_HOME": BASE,
"COVERAGE_FILE": f"{BASE}/.coverage",
"COVERAGE_RCFILE": f"{BASE}/.coveragerc",
}
_upgrade_basic(session)
session.install("coverage", "pytest")
install_hydra(session, ["pip", "install", "-e"])
session.run("coverage", "erase", env=coverage_env)
for directory in ["plugins", "examples/plugins"]:
selected_plugins = select_plugins(session=session, directory=directory)
for plugin in selected_plugins:
session.run(
"pip",
"install",
"-e",
os.path.join(directory, plugin.path),
silent=SILENT,
)
# run plugin coverage
for plugin in selected_plugins:
session.chdir(os.path.join(directory, plugin.path))
cov_args = ["coverage", "run", "--append", "-m"]
cov_args.extend(pytest_args())
session.run(*cov_args, silent=SILENT, env=coverage_env)
session.chdir(BASE)
# run hydra-core coverage
session.run(
"coverage",
"run",
"--append",
"-m",
silent=SILENT,
env=coverage_env,
*pytest_args(),
)
# Increase the fail_under as coverage improves
session.run("coverage", "report", "--fail-under=80", env=coverage_env)
session.run("coverage", "erase", env=coverage_env)
@nox.session(python=PYTHON_VERSIONS)
def test_jupyter_notebooks(session):
versions = copy.copy(DEFAULT_PYTHON_VERSIONS)
if session.python not in versions:
session.skip(
f"Not testing Jupyter notebook on Python {session.python}, supports [{','.join(versions)}]"
)
session.install("jupyter", "nbval")
install_hydra(session, ["pip", "install", "-e"])
args = pytest_args(
"--nbval", "examples/jupyter_notebooks/compose_configs_in_notebook.ipynb"
)
# Jupyter notebook test on Windows yield warnings
args = [x for x in args if x != "-Werror"]
session.run(*args, silent=SILENT)
notebooks_dir = Path("tests/jupyter")
for notebook in [
file for file in notebooks_dir.iterdir() if str(file).endswith(".ipynb")
]:
args = pytest_args("--nbval", str(notebook))
args = [x for x in args if x != "-Werror"]
session.run(*args, silent=SILENT)
|
[] |
[] |
[
"FIX",
"SKIP_CORE_TESTS",
"NOX_PYTHON_VERSIONS",
"INSTALL_EDITABLE_MODE",
"PLUGINS",
"VERBOSE"
] |
[]
|
["FIX", "SKIP_CORE_TESTS", "NOX_PYTHON_VERSIONS", "INSTALL_EDITABLE_MODE", "PLUGINS", "VERBOSE"]
|
python
| 6 | 0 | |
examples/cyclops/main.go
|
// Port of http://members.shaw.ca/el.supremo/MagickWand/cyclops.htm to Go
package main
import (
"os"
"gopkg.in/gographics/imagick.v2/imagick"
)
func main() {
var err error
imagick.Initialize()
defer imagick.Terminate()
mw := imagick.NewMagickWand()
bg := imagick.NewPixelWand()
fg := imagick.NewPixelWand()
err = mw.ReadImage("cyclops_sm.gif")
if err != nil {
panic(err)
}
bg.SetColor("white")
mw.BorderImage(bg, 1, 1)
mw.SetImageAlphaChannel(imagick.ALPHA_CHANNEL_SET)
fg.SetColor("none")
channel := imagick.CHANNELS_RGB | imagick.CHANNEL_ALPHA
// Floodfill the "background" colour with the "foreground" colour
// starting at coordinate 0,0 using a fuzz of 20
mw.FloodfillPaintImage(channel, fg, 20, bg, 0, 0, false)
mw.ShaveImage(1, 1)
mw.DisplayImage(os.Getenv("DISPLAY"))
if err != nil {
panic(err)
}
}
|
[
"\"DISPLAY\""
] |
[] |
[
"DISPLAY"
] |
[]
|
["DISPLAY"]
|
go
| 1 | 0 | |
vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
|
package hcs
import (
"context"
"encoding/json"
"errors"
"os"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/Microsoft/hcsshim/internal/cow"
"github.com/Microsoft/hcsshim/internal/log"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/Microsoft/hcsshim/internal/schema1"
"github.com/Microsoft/hcsshim/internal/timeout"
"github.com/Microsoft/hcsshim/internal/vmcompute"
"go.opencensus.io/trace"
)
// currentContainerStarts is used to limit the number of concurrent container
// starts.
var currentContainerStarts containerStarts
type containerStarts struct {
maxParallel int
inProgress int
sync.Mutex
}
func init() {
mpsS := os.Getenv("HCSSHIM_MAX_PARALLEL_START")
if len(mpsS) > 0 {
mpsI, err := strconv.Atoi(mpsS)
if err != nil || mpsI < 0 {
return
}
currentContainerStarts.maxParallel = mpsI
}
}
type System struct {
handleLock sync.RWMutex
handle vmcompute.HcsSystem
id string
callbackNumber uintptr
closedWaitOnce sync.Once
waitBlock chan struct{}
waitError error
exitError error
os, typ string
}
func newSystem(id string) *System {
return &System{
id: id,
waitBlock: make(chan struct{}),
}
}
// CreateComputeSystem creates a new compute system with the given configuration but does not start it.
func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface interface{}) (_ *System, err error) {
operation := "hcsshim::CreateComputeSystem"
// hcsCreateComputeSystemContext is an async operation. Start the outer span
// here to measure the full create time.
ctx, span := trace.StartSpan(ctx, operation)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("cid", id))
computeSystem := newSystem(id)
hcsDocumentB, err := json.Marshal(hcsDocumentInterface)
if err != nil {
return nil, err
}
hcsDocument := string(hcsDocumentB)
var (
identity syscall.Handle
resultJSON string
createError error
)
computeSystem.handle, resultJSON, createError = vmcompute.HcsCreateComputeSystem(ctx, id, hcsDocument, identity)
if createError == nil || IsPending(createError) {
defer func() {
if err != nil {
computeSystem.Close()
}
}()
if err = computeSystem.registerCallback(ctx); err != nil {
// Terminate the compute system if it still exists. We're okay to
// ignore a failure here.
computeSystem.Terminate(ctx)
return nil, makeSystemError(computeSystem, operation, "", err, nil)
}
}
events, err := processAsyncHcsResult(ctx, createError, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemCreateCompleted, &timeout.SystemCreate)
if err != nil {
if err == ErrTimeout {
// Terminate the compute system if it still exists. We're okay to
// ignore a failure here.
computeSystem.Terminate(ctx)
}
return nil, makeSystemError(computeSystem, operation, hcsDocument, err, events)
}
go computeSystem.waitBackground()
if err = computeSystem.getCachedProperties(ctx); err != nil {
return nil, err
}
return computeSystem, nil
}
// OpenComputeSystem opens an existing compute system by ID.
func OpenComputeSystem(ctx context.Context, id string) (*System, error) {
operation := "hcsshim::OpenComputeSystem"
computeSystem := newSystem(id)
handle, resultJSON, err := vmcompute.HcsOpenComputeSystem(ctx, id)
events := processHcsResult(ctx, resultJSON)
if err != nil {
return nil, makeSystemError(computeSystem, operation, "", err, events)
}
computeSystem.handle = handle
defer func() {
if err != nil {
computeSystem.Close()
}
}()
if err = computeSystem.registerCallback(ctx); err != nil {
return nil, makeSystemError(computeSystem, operation, "", err, nil)
}
go computeSystem.waitBackground()
if err = computeSystem.getCachedProperties(ctx); err != nil {
return nil, err
}
return computeSystem, nil
}
func (computeSystem *System) getCachedProperties(ctx context.Context) error {
props, err := computeSystem.Properties(ctx)
if err != nil {
return err
}
computeSystem.typ = strings.ToLower(props.SystemType)
computeSystem.os = strings.ToLower(props.RuntimeOSType)
if computeSystem.os == "" && computeSystem.typ == "container" {
// Pre-RS5 HCS did not return the OS, but it only supported containers
// that ran Windows.
computeSystem.os = "windows"
}
return nil
}
// OS returns the operating system of the compute system, "linux" or "windows".
func (computeSystem *System) OS() string {
return computeSystem.os
}
// IsOCI returns whether processes in the compute system should be created via
// OCI.
func (computeSystem *System) IsOCI() bool {
return computeSystem.os == "linux" && computeSystem.typ == "container"
}
// GetComputeSystems gets a list of the compute systems on the system that match the query
func GetComputeSystems(ctx context.Context, q schema1.ComputeSystemQuery) ([]schema1.ContainerProperties, error) {
operation := "hcsshim::GetComputeSystems"
queryb, err := json.Marshal(q)
if err != nil {
return nil, err
}
computeSystemsJSON, resultJSON, err := vmcompute.HcsEnumerateComputeSystems(ctx, string(queryb))
events := processHcsResult(ctx, resultJSON)
if err != nil {
return nil, &HcsError{Op: operation, Err: err, Events: events}
}
if computeSystemsJSON == "" {
return nil, ErrUnexpectedValue
}
computeSystems := []schema1.ContainerProperties{}
if err = json.Unmarshal([]byte(computeSystemsJSON), &computeSystems); err != nil {
return nil, err
}
return computeSystems, nil
}
// Start synchronously starts the computeSystem.
func (computeSystem *System) Start(ctx context.Context) (err error) {
operation := "hcsshim::System::Start"
// hcsStartComputeSystemContext is an async operation. Start the outer span
// here to measure the full start time.
ctx, span := trace.StartSpan(ctx, operation)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil)
}
// This is a very simple backoff-retry loop to limit the number
// of parallel container starts if environment variable
// HCSSHIM_MAX_PARALLEL_START is set to a positive integer.
// It should generally only be used as a workaround to various
// platform issues that exist between RS1 and RS4 as of Aug 2018
if currentContainerStarts.maxParallel > 0 {
for {
currentContainerStarts.Lock()
if currentContainerStarts.inProgress < currentContainerStarts.maxParallel {
currentContainerStarts.inProgress++
currentContainerStarts.Unlock()
break
}
if currentContainerStarts.inProgress == currentContainerStarts.maxParallel {
currentContainerStarts.Unlock()
time.Sleep(100 * time.Millisecond)
}
}
// Make sure we decrement the count when we are done.
defer func() {
currentContainerStarts.Lock()
currentContainerStarts.inProgress--
currentContainerStarts.Unlock()
}()
}
resultJSON, err := vmcompute.HcsStartComputeSystem(ctx, computeSystem.handle, "")
events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemStartCompleted, &timeout.SystemStart)
if err != nil {
return makeSystemError(computeSystem, operation, "", err, events)
}
return nil
}
// ID returns the compute system's identifier.
func (computeSystem *System) ID() string {
return computeSystem.id
}
// Shutdown requests a compute system shutdown.
func (computeSystem *System) Shutdown(ctx context.Context) error {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
operation := "hcsshim::System::Shutdown"
if computeSystem.handle == 0 {
return nil
}
resultJSON, err := vmcompute.HcsShutdownComputeSystem(ctx, computeSystem.handle, "")
events := processHcsResult(ctx, resultJSON)
switch err {
case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending:
default:
return makeSystemError(computeSystem, operation, "", err, events)
}
return nil
}
// Terminate requests a compute system terminate.
func (computeSystem *System) Terminate(ctx context.Context) error {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
operation := "hcsshim::System::Terminate"
if computeSystem.handle == 0 {
return nil
}
resultJSON, err := vmcompute.HcsTerminateComputeSystem(ctx, computeSystem.handle, "")
events := processHcsResult(ctx, resultJSON)
switch err {
case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending:
default:
return makeSystemError(computeSystem, operation, "", err, events)
}
return nil
}
// waitBackground waits for the compute system exit notification. Once received
// sets `computeSystem.waitError` (if any) and unblocks all `Wait` calls.
//
// This MUST be called exactly once per `computeSystem.handle` but `Wait` is
// safe to call multiple times.
func (computeSystem *System) waitBackground() {
operation := "hcsshim::System::waitBackground"
ctx, span := trace.StartSpan(context.Background(), operation)
defer span.End()
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
err := waitForNotification(ctx, computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
switch err {
case nil:
log.G(ctx).Debug("system exited")
case ErrVmcomputeUnexpectedExit:
log.G(ctx).Debug("unexpected system exit")
computeSystem.exitError = makeSystemError(computeSystem, operation, "", err, nil)
err = nil
default:
err = makeSystemError(computeSystem, operation, "", err, nil)
}
computeSystem.closedWaitOnce.Do(func() {
computeSystem.waitError = err
close(computeSystem.waitBlock)
})
oc.SetSpanStatus(span, err)
}
// Wait synchronously waits for the compute system to shutdown or terminate. If
// the compute system has already exited returns the previous error (if any).
func (computeSystem *System) Wait() error {
<-computeSystem.waitBlock
return computeSystem.waitError
}
// ExitError returns an error describing the reason the compute system terminated.
func (computeSystem *System) ExitError() error {
select {
case <-computeSystem.waitBlock:
if computeSystem.waitError != nil {
return computeSystem.waitError
}
return computeSystem.exitError
default:
return errors.New("container not exited")
}
}
func (computeSystem *System) Properties(ctx context.Context, types ...schema1.PropertyType) (*schema1.ContainerProperties, error) {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
operation := "hcsshim::System::Properties"
queryBytes, err := json.Marshal(schema1.PropertyQuery{PropertyTypes: types})
if err != nil {
return nil, makeSystemError(computeSystem, operation, "", err, nil)
}
propertiesJSON, resultJSON, err := vmcompute.HcsGetComputeSystemProperties(ctx, computeSystem.handle, string(queryBytes))
events := processHcsResult(ctx, resultJSON)
if err != nil {
return nil, makeSystemError(computeSystem, operation, "", err, events)
}
if propertiesJSON == "" {
return nil, ErrUnexpectedValue
}
properties := &schema1.ContainerProperties{}
if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil {
return nil, makeSystemError(computeSystem, operation, "", err, nil)
}
return properties, nil
}
// Pause pauses the execution of the computeSystem. This feature is not enabled in TP5.
func (computeSystem *System) Pause(ctx context.Context) (err error) {
operation := "hcsshim::System::Pause"
// hcsPauseComputeSystemContext is an async peration. Start the outer span
// here to measure the full pause time.
ctx, span := trace.StartSpan(ctx, operation)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil)
}
resultJSON, err := vmcompute.HcsPauseComputeSystem(ctx, computeSystem.handle, "")
events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemPauseCompleted, &timeout.SystemPause)
if err != nil {
return makeSystemError(computeSystem, operation, "", err, events)
}
return nil
}
// Resume resumes the execution of the computeSystem. This feature is not enabled in TP5.
func (computeSystem *System) Resume(ctx context.Context) (err error) {
operation := "hcsshim::System::Resume"
// hcsResumeComputeSystemContext is an async operation. Start the outer span
// here to measure the full restore time.
ctx, span := trace.StartSpan(ctx, operation)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil)
}
resultJSON, err := vmcompute.HcsResumeComputeSystem(ctx, computeSystem.handle, "")
events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemResumeCompleted, &timeout.SystemResume)
if err != nil {
return makeSystemError(computeSystem, operation, "", err, events)
}
return nil
}
func (computeSystem *System) createProcess(ctx context.Context, operation string, c interface{}) (*Process, *vmcompute.HcsProcessInformation, error) {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
if computeSystem.handle == 0 {
return nil, nil, makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil)
}
configurationb, err := json.Marshal(c)
if err != nil {
return nil, nil, makeSystemError(computeSystem, operation, "", err, nil)
}
configuration := string(configurationb)
processInfo, processHandle, resultJSON, err := vmcompute.HcsCreateProcess(ctx, computeSystem.handle, configuration)
events := processHcsResult(ctx, resultJSON)
if err != nil {
return nil, nil, makeSystemError(computeSystem, operation, configuration, err, events)
}
log.G(ctx).WithField("pid", processInfo.ProcessId).Debug("created process pid")
return newProcess(processHandle, int(processInfo.ProcessId), computeSystem), &processInfo, nil
}
// CreateProcessNoStdio launches a new process within the computeSystem. The
// Stdio handles are not cached on the process struct.
func (computeSystem *System) CreateProcessNoStdio(c interface{}) (_ cow.Process, err error) {
operation := "hcsshim::System::CreateProcessNoStdio"
ctx, span := trace.StartSpan(context.Background(), operation)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
process, processInfo, err := computeSystem.createProcess(ctx, operation, c)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
process.Close()
}
}()
// We don't do anything with these handles. Close them so they don't leak.
syscall.Close(processInfo.StdInput)
syscall.Close(processInfo.StdOutput)
syscall.Close(processInfo.StdError)
if err = process.registerCallback(ctx); err != nil {
return nil, makeSystemError(computeSystem, operation, "", err, nil)
}
go process.waitBackground()
return process, nil
}
// CreateProcess launches a new process within the computeSystem.
func (computeSystem *System) CreateProcess(ctx context.Context, c interface{}) (cow.Process, error) {
operation := "hcsshim::System::CreateProcess"
process, processInfo, err := computeSystem.createProcess(ctx, operation, c)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
process.Close()
}
}()
pipes, err := makeOpenFiles([]syscall.Handle{processInfo.StdInput, processInfo.StdOutput, processInfo.StdError})
if err != nil {
return nil, makeSystemError(computeSystem, operation, "", err, nil)
}
process.stdin = pipes[0]
process.stdout = pipes[1]
process.stderr = pipes[2]
if err = process.registerCallback(ctx); err != nil {
return nil, makeSystemError(computeSystem, operation, "", err, nil)
}
go process.waitBackground()
return process, nil
}
// OpenProcess gets an interface to an existing process within the computeSystem.
func (computeSystem *System) OpenProcess(ctx context.Context, pid int) (*Process, error) {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
operation := "hcsshim::System::OpenProcess"
if computeSystem.handle == 0 {
return nil, makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil)
}
processHandle, resultJSON, err := vmcompute.HcsOpenProcess(ctx, computeSystem.handle, uint32(pid))
events := processHcsResult(ctx, resultJSON)
if err != nil {
return nil, makeSystemError(computeSystem, operation, "", err, events)
}
process := newProcess(processHandle, pid, computeSystem)
if err = process.registerCallback(ctx); err != nil {
return nil, makeSystemError(computeSystem, operation, "", err, nil)
}
go process.waitBackground()
return process, nil
}
// Close cleans up any state associated with the compute system but does not terminate or wait for it.
func (computeSystem *System) Close() (err error) {
operation := "hcsshim::System::Close"
ctx, span := trace.StartSpan(context.Background(), operation)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
computeSystem.handleLock.Lock()
defer computeSystem.handleLock.Unlock()
// Don't double free this
if computeSystem.handle == 0 {
return nil
}
if err = computeSystem.unregisterCallback(ctx); err != nil {
return makeSystemError(computeSystem, operation, "", err, nil)
}
err = vmcompute.HcsCloseComputeSystem(ctx, computeSystem.handle)
if err != nil {
return makeSystemError(computeSystem, operation, "", err, nil)
}
computeSystem.handle = 0
computeSystem.closedWaitOnce.Do(func() {
computeSystem.waitError = ErrAlreadyClosed
close(computeSystem.waitBlock)
})
return nil
}
func (computeSystem *System) registerCallback(ctx context.Context) error {
callbackContext := ¬ifcationWatcherContext{
channels: newSystemChannels(),
systemID: computeSystem.id,
}
callbackMapLock.Lock()
callbackNumber := nextCallback
nextCallback++
callbackMap[callbackNumber] = callbackContext
callbackMapLock.Unlock()
callbackHandle, err := vmcompute.HcsRegisterComputeSystemCallback(ctx, computeSystem.handle, notificationWatcherCallback, callbackNumber)
if err != nil {
return err
}
callbackContext.handle = callbackHandle
computeSystem.callbackNumber = callbackNumber
return nil
}
func (computeSystem *System) unregisterCallback(ctx context.Context) error {
callbackNumber := computeSystem.callbackNumber
callbackMapLock.RLock()
callbackContext := callbackMap[callbackNumber]
callbackMapLock.RUnlock()
if callbackContext == nil {
return nil
}
handle := callbackContext.handle
if handle == 0 {
return nil
}
// hcsUnregisterComputeSystemCallback has its own syncronization
// to wait for all callbacks to complete. We must NOT hold the callbackMapLock.
err := vmcompute.HcsUnregisterComputeSystemCallback(ctx, handle)
if err != nil {
return err
}
closeChannels(callbackContext.channels)
callbackMapLock.Lock()
delete(callbackMap, callbackNumber)
callbackMapLock.Unlock()
handle = 0
return nil
}
// Modify the System by sending a request to HCS
func (computeSystem *System) Modify(ctx context.Context, config interface{}) error {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
operation := "hcsshim::System::Modify"
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil)
}
requestBytes, err := json.Marshal(config)
if err != nil {
return err
}
requestJSON := string(requestBytes)
resultJSON, err := vmcompute.HcsModifyComputeSystem(ctx, computeSystem.handle, requestJSON)
events := processHcsResult(ctx, resultJSON)
if err != nil {
return makeSystemError(computeSystem, operation, requestJSON, err, events)
}
return nil
}
|
[
"\"HCSSHIM_MAX_PARALLEL_START\""
] |
[] |
[
"HCSSHIM_MAX_PARALLEL_START"
] |
[]
|
["HCSSHIM_MAX_PARALLEL_START"]
|
go
| 1 | 0 | |
cluster/cluster_test.go
|
package cluster_test
import (
"encoding/json"
"fmt"
"math"
"os"
"reflect"
"strings"
"testing"
"time"
"github.com/garyburd/redigo/redis"
"github.com/soundcloud/roshi/cluster"
"github.com/soundcloud/roshi/common"
"github.com/soundcloud/roshi/pool"
)
func TestInsertSelectOffsetKeys(t *testing.T) {
addresses := os.Getenv("TEST_REDIS_ADDRESSES")
if addresses == "" {
t.Logf("To run this test, set the TEST_REDIS_ADDRESSES environment variable")
return
}
// Build a new cluster with a high max size.
c := integrationCluster(t, addresses, 1000)
// Make a bunch of inserts.
if err := c.Insert([]common.KeyScoreMember{
{"foo", 50, "alpha"},
{"foo", 99, "beta"},
{"foo", 11, "delta"},
{"bar", 45, "gamma"},
{"bar", 21, "kappa"},
{"bar", 76, "iota"},
{"baz", 33, "sigma"},
{"baz", 34, "omicron"},
{"baz", 35, "nu"},
}); err != nil {
t.Fatal(err)
}
// Select everything.
m := map[string][]common.KeyScoreMember{}
for e := range c.SelectOffset([]string{"foo", "bar", "baz"}, 0, 10) {
if e.Error != nil {
t.Errorf("during Select: key %q: %s", e.Key, e.Error)
}
m[e.Key] = e.KeyScoreMembers
}
for key, expected := range map[string][]common.KeyScoreMember{
"foo": []common.KeyScoreMember{
{"foo", 99, "beta"},
{"foo", 50, "alpha"},
{"foo", 11, "delta"},
},
"bar": []common.KeyScoreMember{
{"bar", 76, "iota"},
{"bar", 45, "gamma"},
{"bar", 21, "kappa"},
},
"baz": []common.KeyScoreMember{
{"baz", 35, "nu"},
{"baz", 34, "omicron"},
{"baz", 33, "sigma"},
},
} {
if got := m[key]; !reflect.DeepEqual(expected, got) {
t.Errorf("%s: expected\n %v, got\n %v", key, expected, got)
}
t.Logf("%s: %v OK", key, expected)
}
// Just select the first element from each key.
m = map[string][]common.KeyScoreMember{}
for e := range c.SelectOffset([]string{"foo", "bar", "baz"}, 0, 1) {
if e.Error != nil {
t.Errorf("during Select: key %q: %s", e.Key, e.Error)
}
m[e.Key] = e.KeyScoreMembers
}
for key, expected := range map[string][]common.KeyScoreMember{
"foo": []common.KeyScoreMember{
{"foo", 99, "beta"},
},
"bar": []common.KeyScoreMember{
{"bar", 76, "iota"},
},
"baz": []common.KeyScoreMember{
{"baz", 35, "nu"},
},
} {
if got := m[key]; !reflect.DeepEqual(expected, got) {
t.Errorf("%s: expected\n %v, got\n %v", key, expected, got)
continue
}
t.Logf("%s: %v OK", key, expected)
}
// Just select the second element from each key.
m = map[string][]common.KeyScoreMember{}
for e := range c.SelectOffset([]string{"foo", "bar", "baz"}, 1, 1) {
if e.Error != nil {
t.Errorf("during Select: key %q: %s", e.Key, e.Error)
}
m[e.Key] = e.KeyScoreMembers
}
for key, expected := range map[string][]common.KeyScoreMember{
"foo": []common.KeyScoreMember{
{"foo", 50, "alpha"},
},
"bar": []common.KeyScoreMember{
{"bar", 45, "gamma"},
},
"baz": []common.KeyScoreMember{
{"baz", 34, "omicron"},
},
} {
if got := m[key]; !reflect.DeepEqual(expected, got) {
t.Errorf("%s: expected\n %v, got\n %v", key, expected, got)
continue
}
t.Logf("%s: %v OK", key, expected)
}
keysChannel := c.Keys(1)
keys := map[string]bool{}
for batch := range keysChannel {
for _, key := range batch {
keys[key] = true
}
}
if got, expected := keys, map[string]bool{"foo": true, "bar": true, "baz": true}; !reflect.DeepEqual(got, expected) {
t.Errorf("Expected key set %+v, got %+v", expected, got)
}
}
func TestInsertIdempotency(t *testing.T) {
addresses := os.Getenv("TEST_REDIS_ADDRESSES")
if addresses == "" {
t.Logf("To run this test, set the TEST_REDIS_ADDRESSES environment variable")
return
}
// Build a new cluster with a low max size.
c := integrationCluster(t, addresses, 3)
// Make an inserts.
if err := c.Insert([]common.KeyScoreMember{
{"foo", 50, "alpha"},
{"foo", 99, "beta"},
{"foo", 11, "delta"},
}); err != nil {
t.Fatal(err)
}
// An older insert on foo-alpha should be rejected.
c.Insert([]common.KeyScoreMember{{"foo", 48, "alpha"}})
m := map[string][]common.KeyScoreMember{}
for e := range c.SelectOffset([]string{"foo"}, 0, 10) {
if e.Error != nil {
t.Errorf("during Select: key %q: %s", e.Key, e.Error)
}
m[e.Key] = e.KeyScoreMembers
}
if expected, got := []common.KeyScoreMember{
{"foo", 99, "beta"},
{"foo", 50, "alpha"},
{"foo", 11, "delta"},
}, m["foo"]; !reflect.DeepEqual(expected, got) {
t.Fatalf("after older insert, expected\n %v, got\n %v", expected, got)
}
// An older delete on foo-alpha should be rejected
c.Delete([]common.KeyScoreMember{{"foo", 49, "alpha"}})
m = map[string][]common.KeyScoreMember{}
for e := range c.SelectOffset([]string{"foo"}, 0, 10) {
if e.Error != nil {
t.Errorf("during Select: key %q: %s", e.Key, e.Error)
}
m[e.Key] = e.KeyScoreMembers
}
if expected, got := []common.KeyScoreMember{
{"foo", 99, "beta"},
{"foo", 50, "alpha"},
{"foo", 11, "delta"},
}, m["foo"]; !reflect.DeepEqual(expected, got) {
t.Fatalf("after older delete, expected\n %v, got\n %v", expected, got)
}
// A newer insert on foo-alpha should be accepted.
c.Insert([]common.KeyScoreMember{{"foo", 50.2, "alpha"}})
m = map[string][]common.KeyScoreMember{}
for e := range c.SelectOffset([]string{"foo"}, 0, 10) {
if e.Error != nil {
t.Errorf("during Select: key %q: %s", e.Key, e.Error)
}
m[e.Key] = e.KeyScoreMembers
}
if expected, got := []common.KeyScoreMember{
{"foo", 99, "beta"},
{"foo", 50.2, "alpha"},
{"foo", 11, "delta"},
}, m["foo"]; !reflect.DeepEqual(expected, got) {
t.Fatalf("after newer insert, expected\n %v, got\n %v", expected, got)
}
// A newer delete on foo-alpha should be accepted.
c.Delete([]common.KeyScoreMember{{"foo", 50.3, "alpha"}})
m = map[string][]common.KeyScoreMember{}
for e := range c.SelectOffset([]string{"foo"}, 0, 10) {
if e.Error != nil {
t.Errorf("during Select: key %q: %s", e.Key, e.Error)
}
m[e.Key] = e.KeyScoreMembers
}
if expected, got := []common.KeyScoreMember{
{"foo", 99, "beta"},
{"foo", 11, "delta"},
}, m["foo"]; !reflect.DeepEqual(expected, got) {
t.Fatalf("after newer delete, expected\n %v, got\n %v", expected, got)
}
}
func TestInsertMaxSize(t *testing.T) {
addresses := os.Getenv("TEST_REDIS_ADDRESSES")
if addresses == "" {
t.Logf("To run this test, set the TEST_REDIS_ADDRESSES environment variable")
return
}
// Build a new cluster with a low max size.
c := integrationCluster(t, addresses, 3)
// Make a bunch of inserts on a single key.
if err := c.Insert([]common.KeyScoreMember{
{"foo", 50, "alpha"},
{"foo", 99, "beta"},
{"foo", 11, "delta"},
{"foo", 45, "gamma"},
{"foo", 21, "kappa"},
{"foo", 76, "iota"},
{"foo", 33, "sigma"},
{"foo", 34, "omicron"},
{"foo", 35, "nu"},
}); err != nil {
t.Fatal(err)
}
// Select everything.
m := map[string][]common.KeyScoreMember{}
for e := range c.SelectOffset([]string{"foo"}, 0, 10) {
if e.Error != nil {
t.Errorf("during Select: key %q: %s", e.Key, e.Error)
}
m[e.Key] = e.KeyScoreMembers
}
for key, expected := range map[string][]common.KeyScoreMember{
"foo": []common.KeyScoreMember{
{"foo", 99, "beta"},
{"foo", 76, "iota"},
{"foo", 50, "alpha"},
},
} {
if got := m[key]; !reflect.DeepEqual(expected, got) {
t.Errorf("%s: expected\n %v, got\n %v", key, expected, got)
continue
}
t.Logf("%s: %v OK", key, expected)
}
// Make another single insert with a new score, overwriting a key.
c.Insert([]common.KeyScoreMember{{"foo", 51, "alpha"}})
// Should have the same output with an updated score.
m = map[string][]common.KeyScoreMember{}
for e := range c.SelectOffset([]string{"foo"}, 0, 10) {
if e.Error != nil {
t.Errorf("during Select: key %q: %s", e.Key, e.Error)
}
m[e.Key] = e.KeyScoreMembers
}
for key, expected := range map[string][]common.KeyScoreMember{
"foo": []common.KeyScoreMember{
{"foo", 99, "beta"},
{"foo", 76, "iota"},
{"foo", 51, "alpha"},
},
} {
if got := m[key]; !reflect.DeepEqual(expected, got) {
t.Errorf("%s: expected\n %v, got\n %v", key, expected, got)
continue
}
t.Logf("%s: %v OK", key, expected)
}
// Make another single insert of a brand new key.
c.Insert([]common.KeyScoreMember{{"foo", 60, "woop"}})
// Should have new output.
m = map[string][]common.KeyScoreMember{}
for e := range c.SelectOffset([]string{"foo"}, 0, 10) {
if e.Error != nil {
t.Errorf("during Select: key %q: %s", e.Key, e.Error)
}
m[e.Key] = e.KeyScoreMembers
}
for key, expected := range map[string][]common.KeyScoreMember{
"foo": []common.KeyScoreMember{
{"foo", 99, "beta"},
{"foo", 76, "iota"},
{"foo", 60, "woop"},
},
} {
if got := m[key]; !reflect.DeepEqual(expected, got) {
t.Errorf("%s: expected\n %v, got\n %v", key, expected, got)
continue
}
t.Logf("%s: %v OK", key, expected)
}
}
func TestJSONMarshalling(t *testing.T) {
ksm := common.KeyScoreMember{
Key: "This is incorrect UTF-8: " + string([]byte{0, 192, 0, 193}),
Score: 99,
Member: "This is still incorrect UTF-8: " + string([]byte{0, 192, 0, 193}),
}
jsonData, err := json.Marshal(ksm)
if err != nil {
t.Fatal(err)
}
var unmarshalledKSM common.KeyScoreMember
if err := json.Unmarshal(jsonData, &unmarshalledKSM); err != nil {
t.Fatal(err)
}
if ksm != unmarshalledKSM {
t.Errorf(
"JSON marschalling/unmarshalling roundtrip failed. Original: %v, JSON: %v, result: %v",
ksm,
jsonData,
unmarshalledKSM,
)
}
}
func TestSelectRange(t *testing.T) {
addresses := os.Getenv("TEST_REDIS_ADDRESSES")
if addresses == "" {
t.Logf("To run this test, set the TEST_REDIS_ADDRESSES environment variable")
return
}
// Build a new cluster.
c := integrationCluster(t, addresses, 1000)
// Make a bunch of inserts.
if err := c.Insert([]common.KeyScoreMember{
{"foo", 50.1, "alpha"},
{"foo", 99.2, "beta"},
{"foo", 11.3, "delta"},
{"foo", 45.4, "gamma"},
{"foo", 21.5, "kappa"},
{"foo", 76.6, "iota"},
{"foo", 33.7, "sigma"},
{"foo", 34.8, "omicron"},
{"foo", 35.9, "nu"},
{"bar", 66.6, "rho"},
{"bar", 33.3, "psi"},
{"bar", 99.9, "tau"},
}); err != nil {
t.Fatal(err)
}
// Middle of the list, a real element cursor.
ch := c.SelectRange([]string{"foo"}, common.Cursor{Score: 45.4, Member: "gamma"}, common.Cursor{}, 100)
expected := []common.KeyScoreMember{
{"foo", 35.9, "nu"},
{"foo", 34.8, "omicron"},
{"foo", 33.7, "sigma"},
{"foo", 21.5, "kappa"},
{"foo", 11.3, "delta"},
}
e := <-ch
if e.Error != nil {
t.Fatalf("key %q: %s", e.Key, e.Error)
}
if got := e.KeyScoreMembers; !reflect.DeepEqual(expected, got) {
t.Fatalf("key %q: expected \n\t%+v, got \n\t%+v", e.Key, expected, got)
}
if _, ok := <-ch; ok {
t.Fatalf("key %q: expected 1 element on the channel, got multiple", e.Key)
}
// Top of the list.
ch = c.SelectRange([]string{"foo"}, common.Cursor{Score: math.MaxFloat64}, common.Cursor{}, 100)
expected = []common.KeyScoreMember{
{"foo", 99.2, "beta"},
{"foo", 76.6, "iota"},
{"foo", 50.1, "alpha"},
{"foo", 45.4, "gamma"},
{"foo", 35.9, "nu"},
{"foo", 34.8, "omicron"},
{"foo", 33.7, "sigma"},
{"foo", 21.5, "kappa"},
{"foo", 11.3, "delta"},
}
e = <-ch
if e.Error != nil {
t.Fatalf("key %q: %s", e.Key, e.Error)
}
if got := e.KeyScoreMembers; !reflect.DeepEqual(expected, got) {
t.Fatalf("key %q: expected \n\t%+v, got \n\t%+v", e.Key, expected, got)
}
if _, ok := <-ch; ok {
t.Fatalf("key %q: expected 1 element on the channel, got multiple", e.Key)
}
// Restricted limit.
ch = c.SelectRange([]string{"foo"}, common.Cursor{Score: 50.1, Member: "alpha"}, common.Cursor{}, 3)
expected = []common.KeyScoreMember{
{"foo", 45.4, "gamma"},
{"foo", 35.9, "nu"},
{"foo", 34.8, "omicron"},
}
e = <-ch
if e.Error != nil {
t.Fatalf("key %q: %s", e.Key, e.Error)
}
if got := e.KeyScoreMembers; !reflect.DeepEqual(expected, got) {
t.Fatalf("key %q: expected \n\t%+v, got \n\t%+v", e.Key, expected, got)
}
if _, ok := <-ch; ok {
t.Fatalf("key %q: expected 1 element on the channel, got multiple", e.Key)
}
// Multiple keys, top of the list, all elements.
ch = c.SelectRange([]string{"bar", "foo"}, common.Cursor{Score: math.MaxFloat64, Member: ""}, common.Cursor{}, 100)
m := map[string][]common.KeyScoreMember{}
for e := range ch {
if e.Error != nil {
t.Errorf("during Select: key %q: %s", e.Key, e.Error)
}
m[e.Key] = e.KeyScoreMembers
}
for key, expected := range map[string][]common.KeyScoreMember{
"foo": []common.KeyScoreMember{
{"foo", 99.2, "beta"},
{"foo", 76.6, "iota"},
{"foo", 50.1, "alpha"},
{"foo", 45.4, "gamma"},
{"foo", 35.9, "nu"},
{"foo", 34.8, "omicron"},
{"foo", 33.7, "sigma"},
{"foo", 21.5, "kappa"},
{"foo", 11.3, "delta"},
},
"bar": []common.KeyScoreMember{
{"bar", 99.9, "tau"},
{"bar", 66.6, "rho"},
{"bar", 33.3, "psi"},
},
} {
if got := m[key]; !reflect.DeepEqual(expected, got) {
t.Errorf("key %q: expected \n\t%v, got \n\t%v", key, expected, got)
continue
}
}
// Multiple keys, middle of the list, all elements.
ch = c.SelectRange([]string{"bar", "foo"}, common.Cursor{Score: 66.6, Member: "rho"}, common.Cursor{}, 100)
m = map[string][]common.KeyScoreMember{}
for e := range ch {
if e.Error != nil {
t.Errorf("during Select: key %q: %s", e.Key, e.Error)
}
m[e.Key] = e.KeyScoreMembers
}
for key, expected := range map[string][]common.KeyScoreMember{
"foo": []common.KeyScoreMember{
{"foo", 50.1, "alpha"},
{"foo", 45.4, "gamma"},
{"foo", 35.9, "nu"},
{"foo", 34.8, "omicron"},
{"foo", 33.7, "sigma"},
{"foo", 21.5, "kappa"},
{"foo", 11.3, "delta"},
},
"bar": []common.KeyScoreMember{
{"bar", 33.3, "psi"},
},
} {
if got := m[key]; !reflect.DeepEqual(expected, got) {
t.Errorf("key %q: expected \n\t%v, got \n\t%v", key, expected, got)
continue
}
}
// Multiple keys, middle of the list, limited elements.
ch = c.SelectRange([]string{"bar", "foo"}, common.Cursor{Score: 66.6, Member: "rho"}, common.Cursor{}, 1)
m = map[string][]common.KeyScoreMember{}
for e := range ch {
if e.Error != nil {
t.Errorf("during Select: key %q: %s", e.Key, e.Error)
}
m[e.Key] = e.KeyScoreMembers
}
for key, expected := range map[string][]common.KeyScoreMember{
"foo": []common.KeyScoreMember{
{"foo", 50.1, "alpha"},
},
"bar": []common.KeyScoreMember{
{"bar", 33.3, "psi"},
},
} {
if got := m[key]; !reflect.DeepEqual(expected, got) {
t.Errorf("key %q: expected \n\t%v, got \n\t%v", key, expected, got)
continue
}
}
// Top of the list, using the stopcursor.
ch = c.SelectRange([]string{"foo"}, common.Cursor{Score: math.MaxFloat64}, common.Cursor{Score: 45.4, Member: "gamma"}, 100)
expected = []common.KeyScoreMember{
{"foo", 99.2, "beta"},
{"foo", 76.6, "iota"},
{"foo", 50.1, "alpha"},
}
e = <-ch
if e.Error != nil {
t.Fatalf("key %q: %s", e.Key, e.Error)
}
if got := e.KeyScoreMembers; !reflect.DeepEqual(expected, got) {
t.Fatalf("key %q: expected \n\t%+v, got \n\t%+v", e.Key, expected, got)
}
if _, ok := <-ch; ok {
t.Fatalf("key %q: expected 1 element on the channel, got multiple", e.Key)
}
// Middle of the list, using the stopcursor.
ch = c.SelectRange([]string{"foo"}, common.Cursor{Score: 35.9, Member: "nu"}, common.Cursor{Score: 21.5, Member: "kappa"}, 100)
expected = []common.KeyScoreMember{
{"foo", 34.8, "omicron"},
{"foo", 33.7, "sigma"},
}
e = <-ch
if e.Error != nil {
t.Fatalf("key %q: %s", e.Key, e.Error)
}
if got := e.KeyScoreMembers; !reflect.DeepEqual(expected, got) {
t.Fatalf("key %q: expected \n\t%+v, got \n\t%+v", e.Key, expected, got)
}
if _, ok := <-ch; ok {
t.Fatalf("key %q: expected 1 element on the channel, got multiple", e.Key)
}
}
func TestCursorRetries(t *testing.T) {
addresses := os.Getenv("TEST_REDIS_ADDRESSES")
if addresses == "" {
t.Logf("To run this test, set the TEST_REDIS_ADDRESSES environment variable")
return
}
// Build a new cluster.
c := integrationCluster(t, addresses, 1000)
elements := []common.KeyScoreMember{}
for i := 0; i < 50; i++ {
elements = append(elements, common.KeyScoreMember{
Key: "foo",
Score: 1.23,
Member: fmt.Sprintf("%03d", i)},
)
}
// Insert many elements with the same score.
if err := c.Insert(elements); err != nil {
t.Fatal(err)
}
// A Select with a low limit should still work, due to retries.
element := <-c.SelectRange([]string{"foo"}, common.Cursor{Score: 1.23, Member: "003"}, common.Cursor{}, 5)
if element.Error != nil {
t.Errorf("got unexpected error: %s", element.Error)
} else {
t.Logf("OK: %v", element.KeyScoreMembers)
}
}
func integrationCluster(t *testing.T, addresses string, maxSize int) cluster.Cluster {
p := pool.New(
strings.Split(addresses, ","),
1*time.Second, // connect timeout
1*time.Second, // read timeout
1*time.Second, // write timeout
10, // max connections per instance
pool.Murmur3, // hash
)
for i := 0; i < p.Size(); i++ {
p.WithIndex(i, func(conn redis.Conn) error {
_, err := conn.Do("FLUSHDB")
if err != nil {
t.Fatal(err)
}
return nil
})
}
return cluster.New(p, maxSize, 0, nil)
}
|
[
"\"TEST_REDIS_ADDRESSES\"",
"\"TEST_REDIS_ADDRESSES\"",
"\"TEST_REDIS_ADDRESSES\"",
"\"TEST_REDIS_ADDRESSES\"",
"\"TEST_REDIS_ADDRESSES\""
] |
[] |
[
"TEST_REDIS_ADDRESSES"
] |
[]
|
["TEST_REDIS_ADDRESSES"]
|
go
| 1 | 0 | |
cmd/yaegi/yaegi_test.go
|
package main
import (
"bytes"
"context"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"testing"
"time"
)
const (
// CITimeoutMultiplier is the multiplier for all timeouts in the CI.
CITimeoutMultiplier = 3
)
// Sleep pauses the current goroutine for at least the duration d.
func Sleep(d time.Duration) {
d = applyCIMultiplier(d)
time.Sleep(d)
}
func applyCIMultiplier(timeout time.Duration) time.Duration {
ci := os.Getenv("CI")
if ci == "" {
return timeout
}
b, err := strconv.ParseBool(ci)
if err != nil || !b {
return timeout
}
return time.Duration(float64(timeout) * CITimeoutMultiplier)
}
func TestYaegiCmdCancel(t *testing.T) {
tmp, err := ioutil.TempDir("", "yaegi-")
if err != nil {
t.Fatalf("failed to create tmp directory: %v", err)
}
defer func() {
err = os.RemoveAll(tmp)
if err != nil {
t.Errorf("failed to clean up %v: %v", tmp, err)
}
}()
yaegi := filepath.Join(tmp, "yaegi")
build := exec.Command("go", "build", "-race", "-o", yaegi, ".")
out, err := build.CombinedOutput()
if err != nil {
t.Fatalf("failed to build yaegi command: %v: %s", err, out)
}
// Test src must be terminated by a single newline.
tests := []string{
"for {}\n",
"select {}\n",
}
for _, src := range tests {
cmd := exec.Command(yaegi)
in, err := cmd.StdinPipe()
if err != nil {
t.Errorf("failed to get stdin pipe to yaegi command: %v", err)
}
var outBuf, errBuf bytes.Buffer
cmd.Stdout = &outBuf
cmd.Stderr = &errBuf
// https://golang.org/doc/articles/race_detector.html#Options
cmd.Env = []string{`GORACE="halt_on_error=1"`}
err = cmd.Start()
if err != nil {
t.Fatalf("failed to start yaegi command: %v", err)
}
_, err = in.Write([]byte(src))
if err != nil {
t.Errorf("failed pipe test source to yaegi command: %v", err)
}
Sleep(200 * time.Millisecond)
err = cmd.Process.Signal(os.Interrupt)
if err != nil {
t.Errorf("failed to send os.Interrupt to yaegi command: %v", err)
}
_, err = in.Write([]byte("1+1\n"))
if err != nil {
t.Errorf("failed to probe race: %v", err)
}
err = in.Close()
if err != nil {
t.Errorf("failed to close stdin pipe: %v", err)
}
err = cmd.Wait()
if err != nil {
if cmd.ProcessState.ExitCode() == 66 { // See race_detector.html article.
t.Errorf("race detected running yaegi command canceling %q: %v", src, err)
if testing.Verbose() {
t.Log(&errBuf)
}
} else {
t.Errorf("error running yaegi command for %q: %v", src, err)
}
continue
}
if strings.TrimSuffix(errBuf.String(), "\n") != context.Canceled.Error() {
t.Errorf("unexpected error: %q", &errBuf)
}
}
}
|
[
"\"CI\""
] |
[] |
[
"CI"
] |
[]
|
["CI"]
|
go
| 1 | 0 | |
artisan/doorman/main.go
|
/*
Onix Config Manager - Artisan's Doorman
Copyright (c) 2018-Present by www.gatblau.org
Licensed under the Apache License, Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0
Contributors to this project, hereby assign copyright in this code to the project,
to be licensed under the same terms as the rest of the code.
*/
package main
import (
"fmt"
"github.com/gatblau/onix/artisan/doorman/core"
"github.com/gatblau/onix/oxlib/httpserver"
"github.com/gatblau/onix/oxlib/oxc"
"github.com/gorilla/mux"
"net/http"
"os"
"time"
)
var defaultAuth func(r http.Request) *oxc.UserPrincipal
func main() {
// creates a generic http server
s := httpserver.New("doorman")
// add handlers
s.Http = func(router *mux.Router) {
// enable encoded path vars
router.UseEncodedPath()
// conditionally enable middleware
if len(os.Getenv("DOORMAN_LOGGING")) > 0 {
router.Use(s.LoggingMiddleware)
}
// apply authentication
router.Use(s.AuthenticationMiddleware)
// admin facing endpoints
router.HandleFunc("/key", upsertKeyHandler).Methods("PUT")
router.HandleFunc("/command", upsertCommandHandler).Methods("PUT")
router.HandleFunc("/route/in", upsertInboundRouteHandler).Methods("PUT")
router.HandleFunc("/route/out", upsertOutboundRouteHandler).Methods("PUT")
router.HandleFunc("/notification", upsertNotificationHandler).Methods("PUT")
router.HandleFunc("/notification", getAllNotificationsHandler).Methods("GET")
router.HandleFunc("/notification-template", upsertNotificationTemplateHandler).Methods("PUT")
router.HandleFunc("/notification-template", getAllNotificationTemplatesHandler).Methods("GET")
router.HandleFunc("/pipe", upsertPipelineHandler).Methods("PUT")
router.HandleFunc("/pipe/{name}", getPipelineHandler).Methods("GET")
router.HandleFunc("/pipe", getAllPipelinesHandler).Methods("GET")
router.HandleFunc("/job", getTopJobsHandler).Methods("GET")
// doorman proxy facing endpoints
router.HandleFunc("/event/{service-id}/{bucket-name}/{folder-name}", eventHandler).Methods("POST")
router.HandleFunc("/token/{token-value}", getWebhookAuthInfoHandler).Methods("GET")
router.HandleFunc("/token", getWebhookAllAuthInfoHandler).Methods("GET")
}
// grab a reference to default auth to use it in the proxy override below
defaultAuth = s.DefaultAuth
// set up specific authentication for doorman proxy
s.Auth = map[string]func(http.Request) *oxc.UserPrincipal{
"^/token.*": dProxyAuth,
"^/event/.*": dProxyAuth,
}
// https://textkool.com/en/ascii-art-generator?hl=default&vl=default&font=Broadway%20KB&text=dproxy%0A
fmt.Print(`
++++++++++++++| ONIX CONFIG MANAGER |+++++++++++++++
| ___ ___ ___ ___ _ __ _ |
| | | \ / / \ / / \ | |_) | |\/| / /\ | |\ | |
| |_|_/ \_\_/ \_\_/ |_| \ |_| | /_/--\ |_| \| |
| |
+++++++++++| the artisan's doorman |++++++++++++++
`)
s.Serve()
}
// dProxyAuth authenticates doorman's proxy requests using either proxy specific or admin credentials
func dProxyAuth(r http.Request) *oxc.UserPrincipal {
user, userErr := core.GetProxyUser()
if userErr != nil {
fmt.Printf("cannot authenticate proxy: %s", userErr)
return nil
}
pwd, pwdErr := core.GetProxyPwd()
if pwdErr != nil {
fmt.Printf("cannot authenticate proxy: %s", pwdErr)
return nil
}
// try proxy specific credentials first
if r.Header.Get("Authorization") == httpserver.BasicToken(user, pwd) {
return &oxc.UserPrincipal{
Username: user,
Created: time.Now(),
}
} else if defaultAuth != nil {
// try admin credentials
if principal := defaultAuth(r); principal != nil {
return principal
}
}
// otherwise, fail authentication
return nil
}
|
[
"\"DOORMAN_LOGGING\""
] |
[] |
[
"DOORMAN_LOGGING"
] |
[]
|
["DOORMAN_LOGGING"]
|
go
| 1 | 0 | |
examples/connectorModify/main.go
|
package main
import (
"context"
"fmt"
"log"
"os"
"github.com/fivetran/go-fivetran"
)
func main() {
apiKey := os.Getenv("FIVETRAN_APIKEY")
apiSecret := os.Getenv("FIVETRAN_APISECRET")
fivetran.Debug(true)
client := fivetran.New(apiKey, apiSecret)
svc := client.NewConnectorModify()
connConfig := fivetran.NewConnectorConfig().
NamedRange("range1")
cTables1 := fivetran.NewConnectorConfigCustomTables().Aggregation("aggregation1").TableName("theName")
cTables2 := fivetran.NewConnectorConfigCustomTables().TableName("theName").Fields([]string{"FIELD ONE", "FIELD TWO"})
connConfig.CustomTables([]*fivetran.ConnectorConfigCustomTables{cTables1, cTables2})
svc.ConnectorID("grateful_vertices")
svc.Paused(true)
svc.SyncFrequency(5)
svc.Config(connConfig)
value, err := svc.Do(context.Background())
if err != nil {
fmt.Printf("%+v\n", value)
log.Fatal(err)
}
fmt.Printf("%+v\n", value)
}
|
[
"\"FIVETRAN_APIKEY\"",
"\"FIVETRAN_APISECRET\""
] |
[] |
[
"FIVETRAN_APIKEY",
"FIVETRAN_APISECRET"
] |
[]
|
["FIVETRAN_APIKEY", "FIVETRAN_APISECRET"]
|
go
| 2 | 0 | |
registry.go
|
// Copyright 2019 dfuse Platform Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package logging
import (
"fmt"
"os"
"regexp"
"strings"
"sync"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
type registerConfig struct {
shortName string
isTraceEnabled *bool
onUpdate func(newLogger *zap.Logger)
}
// RegisterOption are option parameters that you can set when registering a new logger
// in the system using `Register` function.
type RegisterOption interface {
apply(config *registerConfig)
}
type registerOptionFunc func(config *registerConfig)
func (f registerOptionFunc) apply(config *registerConfig) {
f(config)
}
// RegisterOnUpdate enable you to have a hook function that will receive the new logger
// that is going to be assigned to your logger instance. This is useful in some situation
// where you need to update other instances or re-configuring a bit the logger when
// a new one is attached.
//
// This is called **after** the instance has been re-assigned.
func RegisterOnUpdate(onUpdate func(newLogger *zap.Logger)) RegisterOption {
return registerOptionFunc(func(config *registerConfig) {
config.onUpdate = onUpdate
})
}
func registerShortName(shortName string) RegisterOption {
return registerOptionFunc(func(config *registerConfig) {
config.shortName = shortName
})
}
func registerWithTracer(isEnabled *bool) RegisterOption {
return registerOptionFunc(func(config *registerConfig) {
config.isTraceEnabled = isEnabled
})
}
type LoggerExtender func(*zap.Logger) *zap.Logger
type registryEntry struct {
packageID string
shortName string
atomicLevel zap.AtomicLevel
traceEnabled *bool
logPtr **zap.Logger
onUpdate func(newLogger *zap.Logger)
}
func (e *registryEntry) String() string {
loggerPtr := "<nil>"
if e.logPtr != nil {
loggerPtr = fmt.Sprintf("%p", *e.logPtr)
}
traceEnabled := false
if e.traceEnabled != nil {
traceEnabled = *e.traceEnabled
}
return fmt.Sprintf("%s @ %s (level: %s, trace?: %t, ptr: %s)", e.shortName, e.packageID, e.atomicLevel.Level(), traceEnabled, loggerPtr)
}
var globalRegistry = newRegistry("global")
var defaultLogger = zap.NewNop()
func Register(packageID string, zlogPtr **zap.Logger, options ...RegisterOption) {
register(globalRegistry, packageID, zlogPtr, options...)
}
func register2(registry *registry, shortName string, packageID string, zlogPtr **zap.Logger, options ...RegisterOption) Tracer {
tracer := boolTracer{new(bool)}
allOptions := append([]RegisterOption{
registerShortName(shortName),
registerWithTracer(tracer.value),
}, options...)
register(registry, packageID, zlogPtr, allOptions...)
return tracer
}
func register(registry *registry, packageID string, zlogPtr **zap.Logger, options ...RegisterOption) {
if zlogPtr == nil {
panic("the zlog pointer (of type **zap.Logger) must be set")
}
config := registerConfig{}
for _, opt := range options {
opt.apply(&config)
}
entry := ®istryEntry{
packageID: packageID,
shortName: config.shortName,
traceEnabled: config.isTraceEnabled,
atomicLevel: zap.NewAtomicLevelAt(zapcore.ErrorLevel),
logPtr: zlogPtr,
onUpdate: config.onUpdate,
}
registry.registerEntry(entry)
logger := defaultLogger
if *zlogPtr != nil {
logger = *zlogPtr
}
// The tracing has already been set, so we can go unspecified here to not change anything
setLogger(entry, logger, unspecifiedTracing)
}
func Set(logger *zap.Logger, regexps ...string) {
for name, entry := range globalRegistry.entriesByPackageID {
if len(regexps) == 0 {
setLogger(entry, logger, unspecifiedTracing)
} else {
for _, re := range regexps {
regex, err := regexp.Compile(re)
if (err == nil && regex.MatchString(name)) || (err != nil && name == re) {
setLogger(entry, logger, unspecifiedTracing)
}
}
}
}
}
// Extend is different than `Set` by being able to re-configure the existing logger set for
// all registered logger in the registry. This is useful for example to add a field to the
// currently set logger:
//
// ```
// logger.Extend(func (current *zap.Logger) { return current.With("name", "value") }, "github.com/dfuse-io/app.*")
// ```
func Extend(extender LoggerExtender, regexps ...string) {
extend(extender, unspecifiedTracing, regexps...)
}
func extend(extender LoggerExtender, tracing tracingType, regexps ...string) {
for name, entry := range globalRegistry.entriesByPackageID {
if *entry.logPtr == nil {
continue
}
if len(regexps) == 0 {
setLogger(entry, extender(*entry.logPtr), tracing)
} else {
for _, re := range regexps {
if regexp.MustCompile(re).MatchString(name) {
setLogger(entry, extender(*entry.logPtr), tracing)
}
}
}
}
}
// Override sets the given logger on previously registered and next
// registrations. Useful in tests.
func Override(logger *zap.Logger) {
defaultLogger = logger
Set(logger)
}
// TestingOverride calls `Override` (or `Set`, see below) with a development
// logger setup correctly with the right level based on some environment variables.
//
// By default, override using a `zap.NewDevelopment` logger (`info`), if
// environment variable `DEBUG` is set to anything or environment variable `TRACE`
// is set to `true`, logger is set in `debug` level.
//
// If `DEBUG` is set to something else than `true` and/or if `TRACE` is set
// to something else than
func TestingOverride() {
debug := os.Getenv("DEBUG")
trace := os.Getenv("TRACE")
if debug == "" && trace == "" {
return
}
logger, _ := zap.NewDevelopment()
regex := ""
if debug != "true" {
regex = debug
}
if regex == "" && trace != "true" {
regex = trace
}
if regex == "" {
Override(logger)
} else {
for _, regexPart := range strings.Split(regex, ",") {
regexPart = strings.TrimSpace(regexPart)
if regexPart != "" {
Set(logger, regexPart)
}
}
}
}
type tracingType uint8
const (
unspecifiedTracing tracingType = iota
enableTracing
disableTracing
)
func setLogger(entry *registryEntry, logger *zap.Logger, tracing tracingType) {
if entry == nil || logger == nil {
return
}
*entry.logPtr = logger
if entry.traceEnabled != nil && tracing != unspecifiedTracing {
switch tracing {
case enableTracing:
*entry.traceEnabled = true
case disableTracing:
*entry.traceEnabled = false
}
}
if entry.onUpdate != nil {
entry.onUpdate(logger)
}
}
type registry struct {
sync.RWMutex
name string
factory loggerFactory
entriesByPackageID map[string]*registryEntry
entriesByShortName map[string][]*registryEntry
dbgLogger *zap.Logger
}
func newRegistry(name string) *registry {
return ®istry{
name: name,
entriesByPackageID: make(map[string]*registryEntry),
entriesByShortName: make(map[string][]*registryEntry),
factory: func(name string, level zap.AtomicLevel) *zap.Logger {
loggerOptions := newLoggerOptions("", WithAtomicLevel(level))
if name != "" {
loggerOptions.loggerName = name
}
return newLogger(&loggerOptions)
},
dbgLogger: dbgZlog.With(zap.String("registry", name)),
}
}
func (r *registry) registerEntry(entry *registryEntry) {
if entry == nil {
panic("refusing to add a nil registry entry")
}
id := validateEntryIdentifier("package ID", entry.packageID, false)
shortName := validateEntryIdentifier("short name", entry.shortName, true)
if actual := r.entriesByPackageID[id]; actual != nil {
panic(fmt.Sprintf("packageID %q is already registered", id))
}
entry.packageID = id
entry.shortName = shortName
r.entriesByPackageID[id] = entry
if shortName != "" {
r.entriesByShortName[shortName] = append(r.entriesByShortName[shortName], entry)
}
r.dbgLogger.Info("registered entry", zap.String("short_name", shortName), zap.String("id", id))
}
func (r *registry) forAllEntries(callback func(entry *registryEntry)) {
for _, entry := range r.entriesByPackageID {
callback(entry)
}
}
func (r *registry) forAllEntriesMatchingSpec(spec *logLevelSpec, callback func(entry *registryEntry, level zapcore.Level, trace bool)) {
for _, specForKey := range spec.sortedSpecs() {
if specForKey.key == "true" || specForKey.key == "*" {
for _, entry := range r.entriesByPackageID {
callback(entry, specForKey.level, specForKey.trace)
}
continue
}
r.forEntriesMatchingSpec(specForKey, callback)
}
}
func (r *registry) forEntriesMatchingSpec(spec *levelSpec, callback func(entry *registryEntry, level zapcore.Level, trace bool)) {
entries, found := r.entriesByShortName[spec.key]
if found {
for _, entry := range entries {
callback(entry, spec.level, spec.trace)
}
return
}
entry, found := r.entriesByPackageID[spec.key]
if found {
callback(entry, spec.level, spec.trace)
return
}
regex, err := regexp.Compile(spec.key)
for packageID, entry := range globalRegistry.entriesByPackageID {
if (err == nil && regex.MatchString(packageID)) || (err != nil && packageID == spec.key) {
callback(entry, spec.level, spec.trace)
}
}
}
func (r *registry) setLoggerForEntry(entry *registryEntry, level zapcore.Level, trace bool) {
if entry == nil {
return
}
entry.atomicLevel.SetLevel(level)
logger := r.factory(entry.shortName, entry.atomicLevel)
*entry.logPtr = logger
// It's possible for an entry to have no tracer registered, for example if the legacy
// register method is used. We must protect from this and not set anything.
if entry.traceEnabled != nil {
*entry.traceEnabled = trace
}
if entry.onUpdate != nil {
entry.onUpdate(logger)
}
r.dbgLogger.Info("set logger on entry", zap.Stringer("entry", entry), zap.Stringer("to_level", level), zap.Bool("trace_enabled", trace))
}
func (r *registry) setLevelForEntry(entry *registryEntry, level zapcore.Level, trace bool) {
if entry == nil {
return
}
entry.atomicLevel.SetLevel(level)
// It's possible for an entry to have no tracer registered, for example if the legacy
// register method is used. We must protect from this and not set anything.
if entry.traceEnabled != nil {
*entry.traceEnabled = trace
}
r.dbgLogger.Info("set level on entry", zap.Stringer("entry", entry))
}
func validateEntryIdentifier(tag string, rawInput string, allowEmpty bool) string {
input := strings.TrimSpace(rawInput)
if input == "" && !allowEmpty {
panic(fmt.Errorf("the %s %q is invalid, must not be empty", tag, input))
}
if input == "true" {
panic(fmt.Errorf("the %s %q is invalid, the identifier 'true' is reserved", tag, input))
}
if input == "*" {
panic(fmt.Errorf("the %s %q is invalid, the identifier '*' is reserved", tag, input))
}
if strings.HasPrefix(input, "-") {
panic(fmt.Errorf("the %s %q is invalid, must not starts with the '-' character", tag, input))
}
if strings.Contains(input, ",") {
panic(fmt.Errorf("the %s %q is invalid, must not contain the ',' character", tag, input))
}
if strings.Contains(input, "=") {
panic(fmt.Errorf("the %s %q is invalid, must not contain the '=' character", tag, input))
}
return input
}
|
[
"\"DEBUG\"",
"\"TRACE\""
] |
[] |
[
"TRACE",
"DEBUG"
] |
[]
|
["TRACE", "DEBUG"]
|
go
| 2 | 0 | |
pylib/cqlshlib/test/test_cqlsh_output.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# to configure behavior, define $CQL_TEST_HOST to the destination address
# and $CQL_TEST_PORT to the associated port.
from __future__ import unicode_literals, with_statement
import locale
import os
import re
import subprocess
import sys
import unittest
from .basecase import (BaseTestCase, TEST_HOST, TEST_PORT,
at_a_time, cqlsh, cqlshlog, dedent)
from .cassconnect import (cassandra_cursor, create_db, get_keyspace,
quote_name, remove_db, split_cql_commands,
testcall_cqlsh, testrun_cqlsh)
from .ansi_colors import (ColoredText, ansi_seq, lookup_colorcode,
lookup_colorname, lookup_colorletter)
CONTROL_C = '\x03'
CONTROL_D = '\x04'
has_python27 = not subprocess.call(['python2.7', '--version'])
has_python3 = not subprocess.call(['python3', '--version'])
class TestCqlshOutput(BaseTestCase):
@classmethod
def setUpClass(cls):
create_db()
@classmethod
def tearDownClass(cls):
remove_db()
def setUp(self):
env = os.environ.copy()
env['COLUMNS'] = '100000'
# carry forward or override locale LC_CTYPE for UTF-8 encoding
if (locale.getpreferredencoding() != 'UTF-8'):
env['LC_CTYPE'] = 'en_US.utf8'
else:
env['LC_CTYPE'] = os.environ.get('LC_CTYPE', 'en_US.utf8')
if ('PATH' in os.environ.keys()):
env['PATH'] = os.environ['PATH']
self.default_env = env
def tearDown(self):
pass
def assertNoHasColors(self, text, msg=None):
self.assertNotRegex(text, ansi_seq, msg='ANSI CSI sequence found in %r' % text)
def assertHasColors(self, text, msg=None):
self.assertRegex(text, ansi_seq, msg=msg)
def assertColored(self, coloredtext, colorname):
wanted_colorcode = lookup_colorcode(colorname)
for num, c in enumerate(coloredtext):
if not c.isspace():
ccolor = c.colorcode()
self.assertEqual(ccolor, wanted_colorcode,
msg='Output text %r (char #%d) is colored %s, not %s'
% (coloredtext, num, lookup_colorname(ccolor), colorname))
def assertColorFromTags(self, coloredtext, tags):
for (char, tag) in zip(coloredtext, tags):
if char.isspace():
continue
if tag.isspace():
tag = 'n' # neutral
self.assertEqual(char.colorcode(), lookup_colorletter(tag),
msg='Coloring mismatch.\nExpected coloring: %s\n'
'Actually got: %s\ncolor code: %s'
% (tags, coloredtext.colored_version(), coloredtext.colortags()))
def assertQueriesGiveColoredOutput(self, queries_and_expected_outputs, env=None, **kwargs):
"""
Allow queries and expected output to be specified in structured tuples,
along with expected color information.
"""
if env is None:
env = self.default_env
with testrun_cqlsh(tty=True, env=env, **kwargs) as c:
for query, expected in queries_and_expected_outputs:
cqlshlog.debug('Testing %r' % (query,))
output = c.cmd_and_response(query).lstrip("\r\n")
c_output = ColoredText(output)
pairs = at_a_time(dedent(expected).split('\n'), 2)
outlines = c_output.splitlines()
for (plain, colorcodes), outputline in zip(pairs, outlines):
self.assertEqual(outputline.plain().rstrip(), plain)
self.assertColorFromTags(outputline, colorcodes)
def strip_read_repair_chance(self, describe_statement):
"""
Remove read_repair_chance and dclocal_read_repair_chance options
from output of DESCRIBE statements. The resulting string may be
reused as a CREATE statement.
Useful after CASSANDRA-13910, which removed read_repair_chance
options from CREATE statements but did not remove them completely
from the system.
"""
describe_statement = re.sub(r"( AND)? (dclocal_)?read_repair_chance = [\d\.]+", "", describe_statement)
describe_statement = re.sub(r"WITH[\s]*;", "", describe_statement)
return describe_statement
def test_no_color_output(self):
env = self.default_env
for termname in ('', 'dumb', 'vt100'):
cqlshlog.debug('TERM=%r' % termname)
env['TERM'] = termname
with testrun_cqlsh(tty=True, env=env,
win_force_colors=False) as c:
c.send('select * from has_all_types;\n')
self.assertNoHasColors(c.read_to_next_prompt())
c.send('select count(*) from has_all_types;\n')
self.assertNoHasColors(c.read_to_next_prompt())
c.send('totally invalid cql;\n')
self.assertNoHasColors(c.read_to_next_prompt())
def test_no_prompt_or_colors_output(self):
env = self.default_env
for termname in ('', 'dumb', 'vt100', 'xterm'):
cqlshlog.debug('TERM=%r' % termname)
env['TERM'] = termname
query = 'select * from has_all_types limit 1;'
output, result = testcall_cqlsh(prompt=None, env=env,
tty=False, input=query + '\n')
output = output.splitlines()
for line in output:
self.assertNoHasColors(line)
self.assertNotRegex(line, r'^cqlsh\S*>')
self.assertEqual(len(output), 6,
msg='output: %r' % '\n'.join(output))
self.assertEqual(output[0], '')
self.assertNicelyFormattedTableHeader(output[1])
self.assertNicelyFormattedTableRule(output[2])
self.assertNicelyFormattedTableData(output[3])
self.assertEqual(output[4].strip(), '')
self.assertEqual(output[5].strip(), '(1 rows)')
def test_color_output(self):
env = self.default_env
for termname in ('xterm', 'unknown-garbage'):
cqlshlog.debug('TERM=%r' % termname)
env['TERMNAME'] = termname
env['TERM'] = termname
with testrun_cqlsh(tty=True, env=env) as c:
c.send('select * from has_all_types;\n')
self.assertHasColors(c.read_to_next_prompt())
c.send('select count(*) from has_all_types;\n')
self.assertHasColors(c.read_to_next_prompt())
c.send('totally invalid cql;\n')
self.assertHasColors(c.read_to_next_prompt())
def test_count_output(self):
self.assertQueriesGiveColoredOutput((
('select count(*) from has_all_types;', """
count
MMMMM
-------
5
G
(1 rows)
nnnnnnnn
"""),
('select COUNT(*) FROM empty_table;', """
count
MMMMM
-------
0
G
(1 rows)
nnnnnnnn
"""),
('select COUNT(*) FROM empty_composite_table;', """
count
MMMMM
-------
0
G
(1 rows)
nnnnnnnn
"""),
('select COUNT(*) FROM twenty_rows_table limit 10;', """
count
MMMMM
-------
20
GG
(1 rows)
nnnnnnnn
"""),
('select COUNT(*) FROM twenty_rows_table limit 1000000;', """
count
MMMMM
-------
20
GG
(1 rows)
nnnnnnnn
"""),
))
q = 'select COUNT(*) FROM twenty_rows_composite_table limit 1000000;'
self.assertQueriesGiveColoredOutput((
(q, """
count
MMMMM
-------
20
GG
(1 rows)
nnnnnnnn
"""),
))
def test_static_cf_output(self):
self.assertQueriesGiveColoredOutput((
("select a, b from twenty_rows_table where a in ('1', '13', '2');", """
a | b
RR MM
----+----
1 | 1
YY YY
13 | 13
YY YY
2 | 2
YY YY
(3 rows)
nnnnnnnn
"""),
))
self.assertQueriesGiveColoredOutput((
('select * from dynamic_columns;', """
somekey | column1 | value
RRRRRRR CCCCCCC MMMMM
---------+---------+-------------------------
1 | 1.2 | one point two
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
2 | 2.3 | two point three
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
3 | -0.0001 | negative ten thousandth
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
3 | 3.46 | three point four six
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
3 | 99 | ninety-nine point oh
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
(5 rows)
nnnnnnnn
"""),
))
def test_empty_cf_output(self):
# we print the header after CASSANDRA-6910
self.assertQueriesGiveColoredOutput((
('select * from empty_table;', """
lonelykey | lonelycol
RRRRRRRRR MMMMMMMMM
-----------+-----------
(0 rows)
"""),
))
q = 'select * from has_all_types where num = 999;'
# same query should show up as empty in cql 3
self.assertQueriesGiveColoredOutput((
(q, """
num | asciicol | bigintcol | blobcol | booleancol | decimalcol | doublecol | floatcol | intcol | smallintcol | textcol | timestampcol | tinyintcol | uuidcol | varcharcol | varintcol
RRR MMMMMMMM MMMMMMMMM MMMMMMM MMMMMMMMMM MMMMMMMMMM MMMMMMMMM MMMMMMMM MMMMMM MMMMMMMMMMM MMMMMMM MMMMMMMMMMMM MMMMMMMMMM MMMMMMM MMMMMMMMMM MMMMMMMMM
-----+----------+-----------+---------+------------+------------+-----------+----------+--------+-------------+---------+--------------+------------+---------+------------+-----------
(0 rows)
"""),
))
def test_columnless_key_output(self):
q = "select a from twenty_rows_table where a in ('1', '2', '-9192');"
self.assertQueriesGiveColoredOutput((
(q, """
a
R
---
1
Y
2
Y
(2 rows)
nnnnnnnn
"""),
))
def test_numeric_output(self):
self.assertQueriesGiveColoredOutput((
('''select intcol, bigintcol, varintcol \
from has_all_types \
where num in (0, 1, 2, 3, 4);''', """
intcol | bigintcol | varintcol
MMMMMM MMMMMMMMM MMMMMMMMM
-------------+----------------------+-----------------------------
-12 | 1234567890123456789 | 10000000000000000000000000
GGGGGGGGGGG GGGGGGGGGGGGGGGGGGGG GGGGGGGGGGGGGGGGGGGGGGGGGGG
2147483647 | 9223372036854775807 | 9
GGGGGGGGGGG GGGGGGGGGGGGGGGGGGGG GGGGGGGGGGGGGGGGGGGGGGGGGGG
0 | 0 | 0
GGGGGGGGGGG GGGGGGGGGGGGGGGGGGGG GGGGGGGGGGGGGGGGGGGGGGGGGGG
-2147483648 | -9223372036854775808 | -10000000000000000000000000
GGGGGGGGGGG GGGGGGGGGGGGGGGGGGGG GGGGGGGGGGGGGGGGGGGGGGGGGGG
| |
nnnnnnnnnnn nnnnnnnnnnnnnnnnnnnn nnnnnnnnnnnnnnnnnnnnnnnnnnn
(5 rows)
nnnnnnnn
"""),
('''select decimalcol, doublecol, floatcol \
from has_all_types \
where num in (0, 1, 2, 3, 4);''', """
decimalcol | doublecol | floatcol
MMMMMMMMMM MMMMMMMMM MMMMMMMM
------------------+-----------+----------
19952.11882 | 1 | -2.1
GGGGGGGGGGGGGGGG GGGGGGG GGGGG
1E-14 | 1e+07 | 1e+05
GGGGGGGGGGGGGGGG GGGGGGG GGGGG
0.0 | 0 | 0
GGGGGGGGGGGGGGGG GGGGGGG GGGGG
10.0000000000000 | -1004.1 | 1e+08
GGGGGGGGGGGGGGGG GGGGGGG GGGGG
| |
nnnnnnnnnnnnnnnn nnnnnnn nnnnn
(5 rows)
nnnnnnnn
"""),
))
def test_timestamp_output(self):
env = self.default_env
env['TZ'] = 'Etc/UTC'
self.assertQueriesGiveColoredOutput((
('''select timestampcol from has_all_types where num = 0;''', """
timestampcol
MMMMMMMMMMMM
---------------------------------
2012-05-14 12:53:20.000000+0000
GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG
(1 rows)
nnnnnnnn
"""),
), env=env)
try:
import pytz # test only if pytz is available on PYTHONPATH
env['TZ'] = 'America/Sao_Paulo'
self.assertQueriesGiveColoredOutput((
('''select timestampcol from has_all_types where num = 0;''', """
timestampcol
MMMMMMMMMMMM
---------------------------------
2012-05-14 09:53:20.000000-0300
GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG
(1 rows)
nnnnnnnn
"""),
), env=env)
except ImportError:
pass
def test_boolean_output(self):
self.assertQueriesGiveColoredOutput((
('select num, booleancol from has_all_types where num in (0, 1, 2, 3);', """
num | booleancol
RRR MMMMMMMMMM
-----+------------
0 | True
G GGGGG
1 | True
G GGGGG
2 | False
G GGGGG
3 | False
G GGGGG
(4 rows)
nnnnnnnn
"""),
))
def test_null_output(self):
# column with metainfo but no values
self.assertQueriesGiveColoredOutput((
("select k, c, notthere from undefined_values_table where k in ('k1', 'k2');", """
k | c | notthere
R M MMMMMMMM
----+----+----------
k1 | c1 | null
YY YY RRRR
k2 | c2 | null
YY YY RRRR
(2 rows)
nnnnnnnn
"""),
))
# all-columns, including a metainfo column has no values (cql3)
self.assertQueriesGiveColoredOutput((
("select * from undefined_values_table where k in ('k1', 'k2');", """
k | c | notthere
R M MMMMMMMM
----+----+----------
k1 | c1 | null
YY YY RRRR
k2 | c2 | null
YY YY RRRR
(2 rows)
nnnnnnnn
"""),
))
def test_string_output_ascii(self):
self.assertQueriesGiveColoredOutput((
("select * from ascii_with_special_chars where k in (0, 1, 2, 3);", r"""
k | val
R MMM
---+-----------------------------------------------
0 | newline:\n
G YYYYYYYYmm
1 | return\rand null\x00!
G YYYYYYmmYYYYYYYYmmmmY
2 | \x00\x01\x02\x03\x04\x05control chars\x06\x07
G mmmmmmmmmmmmmmmmmmmmmmmmYYYYYYYYYYYYYmmmmmmmm
3 | fake special chars\x00\n
G YYYYYYYYYYYYYYYYYYYYYYYY
(4 rows)
nnnnnnnn
"""),
))
def test_string_output_utf8(self):
# many of these won't line up visually here, to keep the source code
# here ascii-only. note that some of the special Unicode characters
# here will render as double-width or zero-width in unicode-aware
# terminals, but the color-checking machinery here will still treat
# it as one character, so those won't seem to line up visually either.
env = self.default_env
env['LANG'] = 'en_US.UTF-8'
self.assertQueriesGiveColoredOutput((
("select * from utf8_with_special_chars where k in (0, 1, 2, 3, 4, 5, 6);", """
k | val
R MMM
---+-------------------------------
0 | Normal string
G YYYYYYYYYYYYY
1 | Text with\\nnewlines\\n
G YYYYYYYYYmmYYYYYYYYmm
2 | Text with embedded \\x01 char
G YYYYYYYYYYYYYYYYYYYmmmmYYYYY
3 | \u24c8\u24c5\u24ba\u24b8\u24be\u24b6\u24c1\u2008\u249e\u24a3\u249c\u24ad\u24ae and normal ones
G YYYYYYYYYYYYYYYYYYYYYYYYYYYYY
4 | double wides: \u2f91\u2fa4\u2f9a
G YYYYYYYYYYYYYYYYY
5 | zero width\u200bspace
G YYYYYYYYYYYYYYYY
6 | fake special chars\\x00\\n
G YYYYYYYYYYYYYYYYYYYYYYYY
(7 rows)
nnnnnnnn
"""),
), env=env)
def test_blob_output(self):
self.assertQueriesGiveColoredOutput((
("select num, blobcol from has_all_types where num in (0, 1, 2, 3);", r"""
num | blobcol
RRR MMMMMMM
-----+----------------------
0 | 0x000102030405fffefd
G mmmmmmmmmmmmmmmmmmmm
1 | 0xffffffffffffffffff
G mmmmmmmmmmmmmmmmmmmm
2 | 0x
G mmmmmmmmmmmmmmmmmmmm
3 | 0x80
G mmmmmmmmmmmmmmmmmmmm
(4 rows)
nnnnnnnn
""", ),
))
def test_prompt(self):
with testrun_cqlsh(tty=True, keyspace=None, env=self.default_env) as c:
self.assertTrue(c.output_header.splitlines()[-1].endswith('cqlsh> '))
c.send('\n')
output = c.read_to_next_prompt().replace('\r\n', '\n')
self.assertTrue(output.endswith('cqlsh> '))
cmd = "USE \"%s\";\n" % get_keyspace().replace('"', '""')
c.send(cmd)
output = c.read_to_next_prompt().replace('\r\n', '\n')
self.assertTrue(output.endswith('cqlsh:%s> ' % (get_keyspace())))
c.send('use system;\n')
output = c.read_to_next_prompt().replace('\r\n', '\n')
self.assertTrue(output.endswith('cqlsh:system> '))
c.send('use NONEXISTENTKEYSPACE;\n')
outputlines = c.read_to_next_prompt().splitlines()
start_index = 0
if c.realtty:
self.assertEqual(outputlines[start_index], 'use NONEXISTENTKEYSPACE;')
start_index = 1
self.assertTrue(outputlines[start_index+1].endswith('cqlsh:system> '))
midline = ColoredText(outputlines[start_index])
self.assertEqual(midline.plain(),
'InvalidRequest: Error from server: code=2200 [Invalid query] message="Keyspace \'nonexistentkeyspace\' does not exist"')
self.assertColorFromTags(midline,
"RRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRR")
def test_describe_keyspace_output(self):
with testrun_cqlsh(tty=True, env=self.default_env) as c:
ks = get_keyspace()
qks = quote_name(ks)
for cmd in ('describe keyspace', 'desc keyspace'):
for givename in ('system', '', qks):
for semicolon in ('', ';'):
fullcmd = cmd + (' ' if givename else '') + givename + semicolon
desc = c.cmd_and_response(fullcmd)
self.check_describe_keyspace_output(desc, givename or qks)
# try to actually execute that last keyspace description, with a
# new keyspace name
new_ks_name = 'COPY_OF_' + ks
copy_desc = desc.replace(ks, new_ks_name)
statements = split_cql_commands(copy_desc)
do_drop = True
with cassandra_cursor() as curs:
try:
for stmt in statements:
stmt = self.strip_read_repair_chance(stmt)
cqlshlog.debug('TEST EXEC: %s' % stmt)
curs.execute(stmt)
finally:
curs.execute('use system')
if do_drop:
curs.execute('drop keyspace {}'.format(new_ks_name))
def check_describe_keyspace_output(self, output, qksname):
expected_bits = [r'(?im)^CREATE KEYSPACE %s WITH\b' % re.escape(qksname),
r';\s*$',
r'\breplication = {\'class\':']
for expr in expected_bits:
self.assertRegex(output, expr)
def test_describe_columnfamily_output(self):
# we can change these to regular expressions if/when it makes sense
# to do so; these will likely be subject to lots of adjustments.
# note columns are now comparator-ordered instead of original-order.
table_desc3 = dedent("""
CREATE TABLE %s.has_all_types (
num int PRIMARY KEY,
asciicol ascii,
bigintcol bigint,
blobcol blob,
booleancol boolean,
decimalcol decimal,
doublecol double,
floatcol float,
intcol int,
smallintcol smallint,
textcol text,
timestampcol timestamp,
tinyintcol tinyint,
uuidcol uuid,
varcharcol text,
varintcol varint
) WITH additional_write_policy = '99p'
AND bloom_filter_fp_chance = 0.01
AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}
AND cdc = false
AND comment = ''
AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'}
AND compression = {'chunk_length_in_kb': '16', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'}
AND memtable = {}
AND crc_check_chance = 1.0
AND default_time_to_live = 0
AND extensions = {}
AND gc_grace_seconds = 864000
AND max_index_interval = 2048
AND memtable_flush_period_in_ms = 0
AND min_index_interval = 128
AND read_repair = 'BLOCKING'
AND speculative_retry = '99p';""" % quote_name(get_keyspace()))
with testrun_cqlsh(tty=True, env=self.default_env) as c:
for cmdword in ('describe table', 'desc columnfamily'):
for semicolon in (';', ''):
output = c.cmd_and_response('%s has_all_types%s' % (cmdword, semicolon))
self.assertNoHasColors(output)
self.assertSequenceEqual(dedent(output).split('\n'), table_desc3.split('\n'))
def test_describe_columnfamilies_output(self):
output_re = r'''
\n? Keyspace [ ] (?P<ksname> \S+ ) \n
-----------* \n
(?P<cfnames> ( ( ["']?\w+["']? [^\w\n]* )+ \n )* )
'''
ks = get_keyspace()
with testrun_cqlsh(tty=True, keyspace=None, env=self.default_env) as c:
# when not in a keyspace
for cmdword in ('DESCRIBE COLUMNFAMILIES', 'desc tables'):
for semicolon in (';', ''):
ksnames = []
output = c.cmd_and_response(cmdword + semicolon)
self.assertNoHasColors(output)
self.assertRegex(output, '(?xs) ^ ( %s )+ $' % output_re)
for section in re.finditer('(?xs)' + output_re, output):
ksname = section.group('ksname')
ksnames.append(ksname)
cfnames = section.group('cfnames')
self.assertNotIn('\n\n', cfnames)
if ksname == ks:
self.assertIn('ascii_with_special_chars', cfnames)
self.assertIn('system', ksnames)
self.assertIn(quote_name(ks), ksnames)
# when in a keyspace
c.send('USE %s;\n' % quote_name(ks))
c.read_to_next_prompt()
for cmdword in ('DESCRIBE COLUMNFAMILIES', 'desc tables'):
for semicolon in (';', ''):
output = c.cmd_and_response(cmdword + semicolon)
self.assertNoHasColors(output)
self.assertEqual(output[0], '\n')
self.assertEqual(output[-1], '\n')
self.assertNotIn('Keyspace %s' % quote_name(ks), output)
self.assertIn('undefined_values_table', output)
def test_describe_cluster_output(self):
output_re = r'''(?x)
^
\n
Cluster: [ ] (?P<clustername> .* ) \n
Partitioner: [ ] (?P<partitionername> .* ) \n
Snitch: [ ] (?P<snitchname> .* ) \n
\n
'''
ringinfo_re = r'''
Range[ ]ownership: \n
(
[ ] .*? [ ][ ] \[ .*? / ( \d+ \. ){3} \d+ : \d+ \] \n
)+
\n
'''
with testrun_cqlsh(tty=True, keyspace=None, env=self.default_env) as c:
# not in a keyspace
for semicolon in ('', ';'):
output = c.cmd_and_response('describe cluster' + semicolon)
self.assertNoHasColors(output)
self.assertRegex(output, output_re + '$')
c.send('USE %s;\n' % quote_name(get_keyspace()))
c.read_to_next_prompt()
for semicolon in ('', ';'):
output = c.cmd_and_response('describe cluster' + semicolon)
self.assertNoHasColors(output)
self.assertRegex(output, output_re + ringinfo_re + '$')
def test_describe_schema_output(self):
with testrun_cqlsh(tty=True, env=self.default_env) as c:
for semicolon in ('', ';'):
output = c.cmd_and_response('desc full schema' + semicolon)
self.assertNoHasColors(output)
# Since CASSANDRA-7622 'DESC FULL SCHEMA' also shows all VIRTUAL keyspaces
self.assertIn('VIRTUAL KEYSPACE system_virtual_schema', output)
self.assertIn("\nCREATE KEYSPACE system_auth WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} AND durable_writes = true;\n",
output)
self.assertRegex(output, '.*\s*$')
def test_show_output(self):
with testrun_cqlsh(tty=True, env=self.default_env) as c:
output = c.cmd_and_response('show version;')
self.assertRegex(output,
'^\[cqlsh \S+ \| Cassandra \S+ \| CQL spec \S+ \| Native protocol \S+\]$')
output = c.cmd_and_response('show host;')
self.assertHasColors(output)
self.assertRegex(output, '^Connected to .* at %s:%d$'
% (re.escape(TEST_HOST), TEST_PORT))
@unittest.skipIf(not has_python27, 'Python 2.7 not available to test warning')
def test_warn_py2(self):
env = self.default_env.copy()
env['USER_SPECIFIED_PYTHON'] = 'python2.7'
# has the warning
with testrun_cqlsh(tty=True, env=env) as c:
self.assertIn('Python 2.7 support is deprecated.', c.output_header, 'cqlsh did not output expected warning.')
# can suppress
env['CQLSH_NO_WARN_PY2'] = '1'
with testrun_cqlsh(tty=True, env=env) as c:
self.assertNotIn('Python 2.7 support is deprecated.', c.output_header, 'cqlsh did not output expected warning.')
@unittest.skipIf(not (has_python27 and has_python3), 'Python 3 and 2.7 not available to test preference')
def test_no_warn_both_py_present(self):
with testrun_cqlsh(tty=True, env=self.default_env) as c:
self.assertNotIn('Python 2.7 support is deprecated.', c.output_header, 'cqlsh did not output expected warning.')
@unittest.skipIf(sys.platform == "win32", 'EOF signaling not supported on Windows')
def test_eof_prints_newline(self):
with testrun_cqlsh(tty=True, env=self.default_env) as c:
c.send(CONTROL_D)
out = c.read_lines(1)[0].replace('\r', '')
self.assertEqual(out, '\n')
with self.assertRaises(BaseException) as cm:
c.read_lines(1)
self.assertIn(type(cm.exception), (EOFError, OSError))
def test_exit_prints_no_newline(self):
for semicolon in ('', ';'):
with testrun_cqlsh(tty=True, env=self.default_env) as c:
cmd = 'exit%s\n' % semicolon
c.send(cmd)
if c.realtty:
out = c.read_lines(1)[0].replace('\r', '')
self.assertEqual(out, cmd)
with self.assertRaises(BaseException) as cm:
c.read_lines(1)
self.assertIn(type(cm.exception), (EOFError, OSError))
def test_help_types(self):
with testrun_cqlsh(tty=True, env=self.default_env) as c:
c.cmd_and_response('help types')
def test_help(self):
pass
def test_printing_parse_error(self):
pass
def test_printing_lex_error(self):
pass
def test_multiline_statements(self):
pass
def test_cancel_statement(self):
pass
def test_printing_integrity_error(self):
pass
def test_printing_cql_error(self):
pass
def test_empty_line(self):
pass
def test_user_types_output(self):
self.assertQueriesGiveColoredOutput((
("select addresses from users;", r"""
addresses
MMMMMMMMM
--------------------------------------------------------------------------------------------------------------------------------------------
{{city: 'Chelyabinsk', address: '3rd street', zip: null}, {city: 'Chigirinsk', address: null, zip: '676722'}}
BBYYYYBBYYYYYYYYYYYYYBBYYYYYYYBBYYYYYYYYYYYYBBYYYBBRRRRBBBBYYYYBBYYYYYYYYYYYYBBYYYYYYYBBRRRRBBYYYBBYYYYYYYYBB
{{city: 'Austin', address: '902 East 5th St. #202', zip: '78702'}, {city: 'Sunnyvale', address: '292 Gibraltar Drive #107', zip: '94089'}}
BBYYYYBBYYYYYYYYBBYYYYYYYBBYYYYYYYYYYYYYYYYYYYYYYYBBYYYBBYYYYYYYBBBBYYYYBBYYYYYYYYYYYBBYYYYYYYBBYYYYYYYYYYYYYYYYYYYYYYYYYYBBYYYBBYYYYYYYBB
(2 rows)
nnnnnnnn
"""),
))
self.assertQueriesGiveColoredOutput((
("select phone_numbers from users;", r"""
phone_numbers
MMMMMMMMMMMMM
-------------------------------------------------------------------------------------
{{country: null, number: '03'}, {country: '+7', number: null}}
BBYYYYYYYBBRRRRBBYYYYYYBBYYYYBBBBYYYYYYYBBYYYYBBYYYYYYBBRRRRBB
{{country: '+1', number: '512-537-7809'}, {country: '+44', number: '208 622 3021'}}
BBYYYYYYYBBYYYYBBYYYYYYBBYYYYYYYYYYYYYYBBBBYYYYYYYBBYYYYYBBYYYYYYBBYYYYYYYYYYYYYYBB
(2 rows)
nnnnnnnn
"""),
))
def test_user_types_with_collections(self):
self.assertQueriesGiveColoredOutput((
("select info from songs;", r"""
info
MMMM
-------------------------------------------------------------------------------------------------------------------------------------------------------------------
{founded: 188694000, members: {'Adrian Smith', 'Bruce Dickinson', 'Dave Murray', 'Janick Gers', 'Nicko McBrain', 'Steve Harris'}, description: 'Pure evil metal'}
BYYYYYYYBBGGGGGGGGGBBYYYYYYYBBBYYYYYYYYYYYYYYBBYYYYYYYYYYYYYYYYYBBYYYYYYYYYYYYYBBYYYYYYYYYYYYYBBYYYYYYYYYYYYYYYBBYYYYYYYYYYYYYYBBBYYYYYYYYYYYBBYYYYYYYYYYYYYYYYYB
(1 rows)
nnnnnnnn
"""),
))
self.assertQueriesGiveColoredOutput((
("select tags from songs;", r"""
tags
MMMM
-------------------------------------------------
{tags: {'genre': 'metal', 'origin': 'england'}}
BYYYYBBBYYYYYYYBBYYYYYYYBBYYYYYYYYBBYYYYYYYYYBB
(1 rows)
nnnnnnnn
"""),
))
self.assertQueriesGiveColoredOutput((
("select tags as my_tags from songs;", r"""
my_tags
MMMMMMM
-------------------------------------------------
{tags: {'genre': 'metal', 'origin': 'england'}}
BYYYYBBBYYYYYYYBBYYYYYYYBBYYYYYYYYBBYYYYYYYYYBB
(1 rows)
nnnnnnnn
"""),
))
def test_expanded_output_counts_past_page(self):
query = "PAGING 5; EXPAND ON; SELECT * FROM twenty_rows_table;"
output, result = testcall_cqlsh(prompt=None, env=self.default_env,
tty=False, input=query)
self.assertEqual(0, result)
# format is "@ Row 1"
row_headers = [s for s in output.splitlines() if "@ Row" in s]
row_ids = [int(s.split(' ')[2]) for s in row_headers]
self.assertEqual([i for i in range(1, 21)], row_ids)
|
[] |
[] |
[
"PATH",
"LC_CTYPE"
] |
[]
|
["PATH", "LC_CTYPE"]
|
python
| 2 | 0 | |
drivers/virtualbox/disk.go
|
package virtualbox
import (
"fmt"
"io"
"os"
"os/exec"
"github.com/hsartoris-bard/machine/libmachine/log"
"github.com/hsartoris-bard/machine/libmachine/mcnutils"
)
type VirtualDisk struct {
UUID string
Path string
}
type DiskCreator interface {
Create(size int, publicSSHKeyPath, diskPath string) error
}
func NewDiskCreator() DiskCreator {
return &defaultDiskCreator{}
}
type defaultDiskCreator struct{}
// Make a boot2docker VM disk image.
func (c *defaultDiskCreator) Create(size int, publicSSHKeyPath, diskPath string) error {
log.Debugf("Creating %d MB hard disk image...", size)
tarBuf, err := mcnutils.MakeDiskImage(publicSSHKeyPath)
if err != nil {
return err
}
log.Debug("Calling inner createDiskImage")
return createDiskImage(diskPath, size, tarBuf)
}
// createDiskImage makes a disk image at dest with the given size in MB. If r is
// not nil, it will be read as a raw disk image to convert from.
func createDiskImage(dest string, size int, r io.Reader) error {
// Convert a raw image from stdin to the dest VMDK image.
sizeBytes := int64(size) << 20 // usually won't fit in 32-bit int (max 2GB)
// FIXME: why isn't this just using the vbm*() functions?
cmd := exec.Command(vboxManageCmd, "convertfromraw", "stdin", dest,
fmt.Sprintf("%d", sizeBytes), "--format", "VMDK")
log.Debug(cmd)
if os.Getenv("MACHINE_DEBUG") != "" {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
}
stdin, err := cmd.StdinPipe()
if err != nil {
return err
}
log.Debug("Starting command")
if err := cmd.Start(); err != nil {
return err
}
log.Debug("Copying to stdin")
n, err := io.Copy(stdin, r)
if err != nil {
return err
}
log.Debug("Filling zeroes")
// The total number of bytes written to stdin must match sizeBytes, or
// VBoxManage.exe on Windows will fail. Fill remaining with zeros.
if left := sizeBytes - n; left > 0 {
if err := zeroFill(stdin, left); err != nil {
return err
}
}
log.Debug("Closing STDIN")
// cmd won't exit until the stdin is closed.
if err := stdin.Close(); err != nil {
return err
}
log.Debug("Waiting on cmd")
return cmd.Wait()
}
// zeroFill writes n zero bytes into w.
func zeroFill(w io.Writer, n int64) error {
const blocksize = 32 << 10
zeros := make([]byte, blocksize)
var k int
var err error
for n > 0 {
if n > blocksize {
k, err = w.Write(zeros)
} else {
k, err = w.Write(zeros[:n])
}
if err != nil {
return err
}
n -= int64(k)
}
return nil
}
func getVMDiskInfo(name string, vbox VBoxManager) (*VirtualDisk, error) {
out, err := vbox.vbmOut("showvminfo", name, "--machinereadable")
if err != nil {
return nil, err
}
disk := &VirtualDisk{}
err = parseKeyValues(out, reEqualQuoteLine, func(key, val string) error {
switch key {
case "SATA-1-0":
disk.Path = val
case "SATA-ImageUUID-1-0":
disk.UUID = val
}
return nil
})
if err != nil {
return nil, err
}
return disk, nil
}
|
[
"\"MACHINE_DEBUG\""
] |
[] |
[
"MACHINE_DEBUG"
] |
[]
|
["MACHINE_DEBUG"]
|
go
| 1 | 0 | |
test/unit/common/test_manager.py
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from test.unit import temptree
import os
import sys
import resource
import signal
import errno
from collections import defaultdict
from threading import Thread
from time import sleep, time
from swift.common import manager
DUMMY_SIG = 1
class MockOs():
RAISE_EPERM_SIG = 99
def __init__(self, pids):
self.running_pids = pids
self.pid_sigs = defaultdict(list)
self.closed_fds = []
self.child_pid = 9999 # fork defaults to test parent process path
self.execlp_called = False
def kill(self, pid, sig):
if sig == self.RAISE_EPERM_SIG:
raise OSError(errno.EPERM, 'Operation not permitted')
if pid not in self.running_pids:
raise OSError(3, 'No such process')
self.pid_sigs[pid].append(sig)
def __getattr__(self, name):
# I only over-ride portions of the os module
try:
return object.__getattr__(self, name)
except AttributeError:
return getattr(os, name)
def pop_stream(f):
"""read everything out of file from the top and clear it out
"""
f.flush()
f.seek(0)
output = f.read()
f.seek(0)
f.truncate()
#print >> sys.stderr, output
return output
class TestManagerModule(unittest.TestCase):
def test_servers(self):
main_plus_rest = set(manager.MAIN_SERVERS + manager.REST_SERVERS)
self.assertEquals(set(manager.ALL_SERVERS), main_plus_rest)
# make sure there's no server listed in both
self.assertEquals(len(main_plus_rest), len(manager.MAIN_SERVERS) +
len(manager.REST_SERVERS))
def test_setup_env(self):
class MockResource():
def __init__(self, error=None):
self.error = error
self.called_with_args = []
def setrlimit(self, resource, limits):
if self.error:
raise self.error
self.called_with_args.append((resource, limits))
def __getattr__(self, name):
# I only over-ride portions of the resource module
try:
return object.__getattr__(self, name)
except AttributeError:
return getattr(resource, name)
_orig_resource = manager.resource
_orig_environ = os.environ
try:
manager.resource = MockResource()
manager.os.environ = {}
manager.setup_env()
expected = [
(resource.RLIMIT_NOFILE, (manager.MAX_DESCRIPTORS,
manager.MAX_DESCRIPTORS)),
(resource.RLIMIT_DATA, (manager.MAX_MEMORY,
manager.MAX_MEMORY)),
(resource.RLIMIT_NPROC, (manager.MAX_PROCS,
manager.MAX_PROCS)),
]
self.assertEquals(manager.resource.called_with_args, expected)
self.assertTrue(
manager.os.environ['PYTHON_EGG_CACHE'].startswith('/tmp'))
# test error condition
manager.resource = MockResource(error=ValueError())
manager.os.environ = {}
manager.setup_env()
self.assertEquals(manager.resource.called_with_args, [])
self.assertTrue(
manager.os.environ['PYTHON_EGG_CACHE'].startswith('/tmp'))
manager.resource = MockResource(error=OSError())
manager.os.environ = {}
self.assertRaises(OSError, manager.setup_env)
self.assertEquals(manager.os.environ.get('PYTHON_EGG_CACHE'), None)
finally:
manager.resource = _orig_resource
os.environ = _orig_environ
def test_command_wrapper(self):
@manager.command
def myfunc(arg1):
"""test doc
"""
return arg1
self.assertEquals(myfunc.__doc__.strip(), 'test doc')
self.assertEquals(myfunc(1), 1)
self.assertEquals(myfunc(0), 0)
self.assertEquals(myfunc(True), 1)
self.assertEquals(myfunc(False), 0)
self.assert_(hasattr(myfunc, 'publicly_accessible'))
self.assert_(myfunc.publicly_accessible)
def test_watch_server_pids(self):
class MockOs():
WNOHANG = os.WNOHANG
def __init__(self, pid_map={}):
self.pid_map = {}
for pid, v in pid_map.items():
self.pid_map[pid] = (x for x in v)
def waitpid(self, pid, options):
try:
rv = self.pid_map[pid].next()
except StopIteration:
raise OSError(errno.ECHILD, os.strerror(errno.ECHILD))
except KeyError:
raise OSError(errno.ESRCH, os.strerror(errno.ESRCH))
if isinstance(rv, Exception):
raise rv
else:
return rv
class MockTime():
def __init__(self, ticks=None):
self.tock = time()
if not ticks:
ticks = []
self.ticks = (t for t in ticks)
def time(self):
try:
self.tock += self.ticks.next()
except StopIteration:
self.tock += 1
return self.tock
def sleep(*args):
return
class MockServer():
def __init__(self, pids, run_dir=manager.RUN_DIR, zombie=0):
self.heartbeat = (pids for _ in range(zombie))
def get_running_pids(self):
try:
rv = self.heartbeat.next()
return rv
except StopIteration:
return {}
_orig_os = manager.os
_orig_time = manager.time
_orig_server = manager.Server
try:
manager.time = MockTime()
manager.os = MockOs()
# this server always says it's dead when you ask for running pids
server = MockServer([1])
# list of pids keyed on servers to watch
server_pids = {
server: [1],
}
# basic test, server dies
gen = manager.watch_server_pids(server_pids)
expected = [(server, 1)]
self.assertEquals([x for x in gen], expected)
# start long running server and short interval
server = MockServer([1], zombie=15)
server_pids = {
server: [1],
}
gen = manager.watch_server_pids(server_pids)
self.assertEquals([x for x in gen], [])
# wait a little longer
gen = manager.watch_server_pids(server_pids, interval=15)
self.assertEquals([x for x in gen], [(server, 1)])
# zombie process
server = MockServer([1], zombie=200)
server_pids = {
server: [1],
}
# test weird os error
manager.os = MockOs({1: [OSError()]})
gen = manager.watch_server_pids(server_pids)
self.assertRaises(OSError, lambda: [x for x in gen])
# test multi-server
server1 = MockServer([1, 10], zombie=200)
server2 = MockServer([2, 20], zombie=8)
server_pids = {
server1: [1, 10],
server2: [2, 20],
}
pid_map = {
1: [None for _ in range(10)],
2: [None for _ in range(8)],
20: [None for _ in range(4)],
}
manager.os = MockOs(pid_map)
gen = manager.watch_server_pids(server_pids,
interval=manager.KILL_WAIT)
expected = [
(server2, 2),
(server2, 20),
]
self.assertEquals([x for x in gen], expected)
finally:
manager.os = _orig_os
manager.time = _orig_time
manager.Server = _orig_server
def test_exc(self):
self.assert_(issubclass(manager.UnknownCommandError, Exception))
class TestServer(unittest.TestCase):
def tearDown(self):
reload(manager)
def join_swift_dir(self, path):
return os.path.join(manager.SWIFT_DIR, path)
def join_run_dir(self, path):
return os.path.join(manager.RUN_DIR, path)
def test_create_server(self):
server = manager.Server('proxy')
self.assertEquals(server.server, 'proxy-server')
self.assertEquals(server.type, 'proxy')
self.assertEquals(server.cmd, 'swift-proxy-server')
server = manager.Server('object-replicator')
self.assertEquals(server.server, 'object-replicator')
self.assertEquals(server.type, 'object')
self.assertEquals(server.cmd, 'swift-object-replicator')
def test_server_to_string(self):
server = manager.Server('Proxy')
self.assertEquals(str(server), 'proxy-server')
server = manager.Server('object-replicator')
self.assertEquals(str(server), 'object-replicator')
def test_server_repr(self):
server = manager.Server('proxy')
self.assert_(server.__class__.__name__ in repr(server))
self.assert_(str(server) in repr(server))
def test_server_equality(self):
server1 = manager.Server('Proxy')
server2 = manager.Server('proxy-server')
self.assertEquals(server1, server2)
# it is NOT a string
self.assertNotEquals(server1, 'proxy-server')
def test_get_pid_file_name(self):
server = manager.Server('proxy')
conf_file = self.join_swift_dir('proxy-server.conf')
pid_file = self.join_run_dir('proxy-server.pid')
self.assertEquals(pid_file, server.get_pid_file_name(conf_file))
server = manager.Server('object-replicator')
conf_file = self.join_swift_dir('object-server/1.conf')
pid_file = self.join_run_dir('object-replicator/1.pid')
self.assertEquals(pid_file, server.get_pid_file_name(conf_file))
server = manager.Server('container-auditor')
conf_file = self.join_swift_dir(
'container-server/1/container-auditor.conf')
pid_file = self.join_run_dir(
'container-auditor/1/container-auditor.pid')
self.assertEquals(pid_file, server.get_pid_file_name(conf_file))
def test_get_custom_pid_file_name(self):
random_run_dir = "/random/dir"
get_random_run_dir = lambda x: os.path.join(random_run_dir, x)
server = manager.Server('proxy', run_dir=random_run_dir)
conf_file = self.join_swift_dir('proxy-server.conf')
pid_file = get_random_run_dir('proxy-server.pid')
self.assertEquals(pid_file, server.get_pid_file_name(conf_file))
server = manager.Server('object-replicator', run_dir=random_run_dir)
conf_file = self.join_swift_dir('object-server/1.conf')
pid_file = get_random_run_dir('object-replicator/1.pid')
self.assertEquals(pid_file, server.get_pid_file_name(conf_file))
server = manager.Server('container-auditor', run_dir=random_run_dir)
conf_file = self.join_swift_dir(
'container-server/1/container-auditor.conf')
pid_file = get_random_run_dir(
'container-auditor/1/container-auditor.pid')
self.assertEquals(pid_file, server.get_pid_file_name(conf_file))
def test_get_conf_file_name(self):
server = manager.Server('proxy')
conf_file = self.join_swift_dir('proxy-server.conf')
pid_file = self.join_run_dir('proxy-server.pid')
self.assertEquals(conf_file, server.get_conf_file_name(pid_file))
server = manager.Server('object-replicator')
conf_file = self.join_swift_dir('object-server/1.conf')
pid_file = self.join_run_dir('object-replicator/1.pid')
self.assertEquals(conf_file, server.get_conf_file_name(pid_file))
server = manager.Server('container-auditor')
conf_file = self.join_swift_dir(
'container-server/1/container-auditor.conf')
pid_file = self.join_run_dir(
'container-auditor/1/container-auditor.pid')
self.assertEquals(conf_file, server.get_conf_file_name(pid_file))
server_name = manager.STANDALONE_SERVERS[0]
server = manager.Server(server_name)
conf_file = self.join_swift_dir(server_name + '.conf')
pid_file = self.join_run_dir(server_name + '.pid')
self.assertEquals(conf_file, server.get_conf_file_name(pid_file))
def test_conf_files(self):
# test get single conf file
conf_files = (
'proxy-server.conf',
'proxy-server.ini',
'auth-server.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server('proxy')
conf_files = server.conf_files()
self.assertEquals(len(conf_files), 1)
conf_file = conf_files[0]
proxy_conf = self.join_swift_dir('proxy-server.conf')
self.assertEquals(conf_file, proxy_conf)
# test multi server conf files & grouping of server-type config
conf_files = (
'object-server1.conf',
'object-server/2.conf',
'object-server/object3.conf',
'object-server/conf/server4.conf',
'object-server.txt',
'proxy-server.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server('object-replicator')
conf_files = server.conf_files()
self.assertEquals(len(conf_files), 4)
c1 = self.join_swift_dir('object-server1.conf')
c2 = self.join_swift_dir('object-server/2.conf')
c3 = self.join_swift_dir('object-server/object3.conf')
c4 = self.join_swift_dir('object-server/conf/server4.conf')
for c in [c1, c2, c3, c4]:
self.assert_(c in conf_files)
# test configs returned sorted
sorted_confs = sorted([c1, c2, c3, c4])
self.assertEquals(conf_files, sorted_confs)
# test get single numbered conf
conf_files = (
'account-server/1.conf',
'account-server/2.conf',
'account-server/3.conf',
'account-server/4.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server('account')
conf_files = server.conf_files(number=2)
self.assertEquals(len(conf_files), 1)
conf_file = conf_files[0]
self.assertEquals(conf_file,
self.join_swift_dir('account-server/2.conf'))
# test missing config number
conf_files = server.conf_files(number=5)
self.assertFalse(conf_files)
# test verbose & quiet
conf_files = (
'auth-server.ini',
'container-server/1.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
old_stdout = sys.stdout
try:
with open(os.path.join(t, 'output'), 'w+') as f:
sys.stdout = f
server = manager.Server('auth')
# check warn "unable to locate"
conf_files = server.conf_files()
self.assertFalse(conf_files)
self.assert_('unable to locate' in pop_stream(f).lower())
# check quiet will silence warning
conf_files = server.conf_files(verbose=True, quiet=True)
self.assertEquals(pop_stream(f), '')
# check found config no warning
server = manager.Server('container-auditor')
conf_files = server.conf_files()
self.assertEquals(pop_stream(f), '')
# check missing config number warn "unable to locate"
conf_files = server.conf_files(number=2)
self.assert_('unable to locate' in pop_stream(f).lower())
# check verbose lists configs
conf_files = server.conf_files(number=2, verbose=True)
c1 = self.join_swift_dir('container-server/1.conf')
self.assert_(c1 in pop_stream(f))
finally:
sys.stdout = old_stdout
# test standalone conf file
server_name = manager.STANDALONE_SERVERS[0]
conf_files = (server_name + '.conf',)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server(server_name)
conf_files = server.conf_files()
self.assertEquals(len(conf_files), 1)
conf_file = conf_files[0]
conf = self.join_swift_dir(server_name + '.conf')
self.assertEquals(conf_file, conf)
def test_proxy_conf_dir(self):
conf_files = (
'proxy-server.conf.d/00.conf',
'proxy-server.conf.d/01.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server('proxy')
conf_dirs = server.conf_files()
self.assertEquals(len(conf_dirs), 1)
conf_dir = conf_dirs[0]
proxy_conf_dir = self.join_swift_dir('proxy-server.conf.d')
self.assertEquals(proxy_conf_dir, conf_dir)
def test_conf_dir(self):
conf_files = (
'object-server/object-server.conf-base',
'object-server/1.conf.d/base.conf',
'object-server/1.conf.d/1.conf',
'object-server/2.conf.d/base.conf',
'object-server/2.conf.d/2.conf',
'object-server/3.conf.d/base.conf',
'object-server/3.conf.d/3.conf',
'object-server/4.conf.d/base.conf',
'object-server/4.conf.d/4.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server('object-replicator')
conf_dirs = server.conf_files()
self.assertEquals(len(conf_dirs), 4)
c1 = self.join_swift_dir('object-server/1.conf.d')
c2 = self.join_swift_dir('object-server/2.conf.d')
c3 = self.join_swift_dir('object-server/3.conf.d')
c4 = self.join_swift_dir('object-server/4.conf.d')
for c in [c1, c2, c3, c4]:
self.assert_(c in conf_dirs)
# test configs returned sorted
sorted_confs = sorted([c1, c2, c3, c4])
self.assertEquals(conf_dirs, sorted_confs)
def test_iter_pid_files(self):
"""
Server.iter_pid_files is kinda boring, test the
Server.pid_files stuff here as well
"""
pid_files = (
('proxy-server.pid', 1),
('auth-server.pid', 'blah'),
('object-replicator/1.pid', 11),
('object-replicator/2.pid', 12),
)
files, contents = zip(*pid_files)
with temptree(files, contents) as t:
manager.RUN_DIR = t
server = manager.Server('proxy', run_dir=t)
# test get one file
iter = server.iter_pid_files()
pid_file, pid = iter.next()
self.assertEquals(pid_file, self.join_run_dir('proxy-server.pid'))
self.assertEquals(pid, 1)
# ... and only one file
self.assertRaises(StopIteration, iter.next)
# test invalid value in pid file
server = manager.Server('auth', run_dir=t)
self.assertRaises(ValueError, server.iter_pid_files().next)
# test object-server doesn't steal pids from object-replicator
server = manager.Server('object', run_dir=t)
self.assertRaises(StopIteration, server.iter_pid_files().next)
# test multi-pid iter
server = manager.Server('object-replicator', run_dir=t)
real_map = {
11: self.join_run_dir('object-replicator/1.pid'),
12: self.join_run_dir('object-replicator/2.pid'),
}
pid_map = {}
for pid_file, pid in server.iter_pid_files():
pid_map[pid] = pid_file
self.assertEquals(pid_map, real_map)
# test get pid_files by number
conf_files = (
'object-server/1.conf',
'object-server/2.conf',
'object-server/3.conf',
'object-server/4.conf',
)
pid_files = (
('object-server/1.pid', 1),
('object-server/2.pid', 2),
('object-server/5.pid', 5),
)
with temptree(conf_files) as swift_dir:
manager.SWIFT_DIR = swift_dir
files, pids = zip(*pid_files)
with temptree(files, pids) as t:
manager.RUN_DIR = t
server = manager.Server('object', run_dir=t)
# test get all pid files
real_map = {
1: self.join_run_dir('object-server/1.pid'),
2: self.join_run_dir('object-server/2.pid'),
5: self.join_run_dir('object-server/5.pid'),
}
pid_map = {}
for pid_file, pid in server.iter_pid_files():
pid_map[pid] = pid_file
self.assertEquals(pid_map, real_map)
# test get pid with matching conf
pids = list(server.iter_pid_files(number=2))
self.assertEquals(len(pids), 1)
pid_file, pid = pids[0]
self.assertEquals(pid, 2)
pid_two = self.join_run_dir('object-server/2.pid')
self.assertEquals(pid_file, pid_two)
# try to iter on a pid number with a matching conf but no pid
pids = list(server.iter_pid_files(number=3))
self.assertFalse(pids)
# test get pids w/o matching conf
pids = list(server.iter_pid_files(number=5))
self.assertFalse(pids)
def test_signal_pids(self):
pid_files = (
('proxy-server.pid', 1),
('auth-server.pid', 2),
('object-server.pid', 3),
)
files, pids = zip(*pid_files)
with temptree(files, pids) as t:
manager.RUN_DIR = t
# mock os with both pids running
manager.os = MockOs([1, 2])
server = manager.Server('proxy', run_dir=t)
pids = server.signal_pids(DUMMY_SIG)
self.assertEquals(len(pids), 1)
self.assert_(1 in pids)
self.assertEquals(manager.os.pid_sigs[1], [DUMMY_SIG])
# make sure other process not signaled
self.assertFalse(2 in pids)
self.assertFalse(2 in manager.os.pid_sigs)
# capture stdio
old_stdout = sys.stdout
try:
with open(os.path.join(t, 'output'), 'w+') as f:
sys.stdout = f
#test print details
pids = server.signal_pids(DUMMY_SIG)
output = pop_stream(f)
self.assert_('pid: %s' % 1 in output)
self.assert_('signal: %s' % DUMMY_SIG in output)
# test no details on signal.SIG_DFL
pids = server.signal_pids(signal.SIG_DFL)
self.assertEquals(pop_stream(f), '')
# reset mock os so only the other server is running
manager.os = MockOs([2])
# test pid not running
pids = server.signal_pids(signal.SIG_DFL)
self.assert_(1 not in pids)
self.assert_(1 not in manager.os.pid_sigs)
# test remove stale pid file
self.assertFalse(os.path.exists(
self.join_run_dir('proxy-server.pid')))
# reset mock os with no running pids
manager.os = MockOs([])
server = manager.Server('auth', run_dir=t)
# test verbose warns on removing pid file
pids = server.signal_pids(signal.SIG_DFL, verbose=True)
output = pop_stream(f)
self.assert_('stale pid' in output.lower())
auth_pid = self.join_run_dir('auth-server.pid')
self.assert_(auth_pid in output)
# test warning with insufficient permissions
server = manager.Server('object', run_dir=t)
pids = server.signal_pids(manager.os.RAISE_EPERM_SIG)
output = pop_stream(f)
self.assert_('no permission to signal pid 3' in
output.lower(), output)
finally:
sys.stdout = old_stdout
def test_get_running_pids(self):
# test only gets running pids
pid_files = (
('test-server1.pid', 1),
('test-server2.pid', 2),
)
with temptree(*zip(*pid_files)) as t:
manager.RUN_DIR = t
server = manager.Server('test-server', run_dir=t)
# mock os, only pid '1' is running
manager.os = MockOs([1])
running_pids = server.get_running_pids()
self.assertEquals(len(running_pids), 1)
self.assert_(1 in running_pids)
self.assert_(2 not in running_pids)
# test persistent running pid files
self.assert_(os.path.exists(os.path.join(t, 'test-server1.pid')))
# test clean up stale pids
pid_two = self.join_swift_dir('test-server2.pid')
self.assertFalse(os.path.exists(pid_two))
# reset mock os, no pids running
manager.os = MockOs([])
running_pids = server.get_running_pids()
self.assertFalse(running_pids)
# and now all pid files are cleaned out
pid_one = self.join_run_dir('test-server1.pid')
self.assertFalse(os.path.exists(pid_one))
all_pids = os.listdir(t)
self.assertEquals(len(all_pids), 0)
# test only get pids for right server
pid_files = (
('thing-doer.pid', 1),
('thing-sayer.pid', 2),
('other-doer.pid', 3),
('other-sayer.pid', 4),
)
files, pids = zip(*pid_files)
with temptree(files, pids) as t:
manager.RUN_DIR = t
# all pids are running
manager.os = MockOs(pids)
server = manager.Server('thing-doer', run_dir=t)
running_pids = server.get_running_pids()
# only thing-doer.pid, 1
self.assertEquals(len(running_pids), 1)
self.assert_(1 in running_pids)
# no other pids returned
for n in (2, 3, 4):
self.assert_(n not in running_pids)
# assert stale pids for other servers ignored
manager.os = MockOs([1]) # only thing-doer is running
running_pids = server.get_running_pids()
for f in ('thing-sayer.pid', 'other-doer.pid', 'other-sayer.pid'):
# other server pid files persist
self.assert_(os.path.exists, os.path.join(t, f))
# verify that servers are in fact not running
for server_name in ('thing-sayer', 'other-doer', 'other-sayer'):
server = manager.Server(server_name, run_dir=t)
running_pids = server.get_running_pids()
self.assertFalse(running_pids)
# and now all OTHER pid files are cleaned out
all_pids = os.listdir(t)
self.assertEquals(len(all_pids), 1)
self.assert_(os.path.exists(os.path.join(t, 'thing-doer.pid')))
def test_kill_running_pids(self):
pid_files = (
('object-server.pid', 1),
('object-replicator1.pid', 11),
('object-replicator2.pid', 12),
)
files, running_pids = zip(*pid_files)
with temptree(files, running_pids) as t:
manager.RUN_DIR = t
server = manager.Server('object', run_dir=t)
# test no servers running
manager.os = MockOs([])
pids = server.kill_running_pids()
self.assertFalse(pids, pids)
files, running_pids = zip(*pid_files)
with temptree(files, running_pids) as t:
manager.RUN_DIR = t
server.run_dir = t
# start up pid
manager.os = MockOs([1])
server = manager.Server('object', run_dir=t)
# test kill one pid
pids = server.kill_running_pids()
self.assertEquals(len(pids), 1)
self.assert_(1 in pids)
self.assertEquals(manager.os.pid_sigs[1], [signal.SIGTERM])
# reset os mock
manager.os = MockOs([1])
# test shutdown
self.assert_('object-server' in
manager.GRACEFUL_SHUTDOWN_SERVERS)
pids = server.kill_running_pids(graceful=True)
self.assertEquals(len(pids), 1)
self.assert_(1 in pids)
self.assertEquals(manager.os.pid_sigs[1], [signal.SIGHUP])
# start up other servers
manager.os = MockOs([11, 12])
# test multi server kill & ignore graceful on unsupported server
self.assertFalse('object-replicator' in
manager.GRACEFUL_SHUTDOWN_SERVERS)
server = manager.Server('object-replicator', run_dir=t)
pids = server.kill_running_pids(graceful=True)
self.assertEquals(len(pids), 2)
for pid in (11, 12):
self.assert_(pid in pids)
self.assertEquals(manager.os.pid_sigs[pid],
[signal.SIGTERM])
# and the other pid is of course not signaled
self.assert_(1 not in manager.os.pid_sigs)
def test_status(self):
conf_files = (
'test-server/1.conf',
'test-server/2.conf',
'test-server/3.conf',
'test-server/4.conf',
)
pid_files = (
('test-server/1.pid', 1),
('test-server/2.pid', 2),
('test-server/3.pid', 3),
('test-server/4.pid', 4),
)
with temptree(conf_files) as swift_dir:
manager.SWIFT_DIR = swift_dir
files, pids = zip(*pid_files)
with temptree(files, pids) as t:
manager.RUN_DIR = t
# setup running servers
server = manager.Server('test', run_dir=t)
# capture stdio
old_stdout = sys.stdout
try:
with open(os.path.join(t, 'output'), 'w+') as f:
sys.stdout = f
# test status for all running
manager.os = MockOs(pids)
self.assertEquals(server.status(), 0)
output = pop_stream(f).strip().splitlines()
self.assertEquals(len(output), 4)
for line in output:
self.assert_('test-server running' in line)
# test get single server by number
self.assertEquals(server.status(number=4), 0)
output = pop_stream(f).strip().splitlines()
self.assertEquals(len(output), 1)
line = output[0]
self.assert_('test-server running' in line)
conf_four = self.join_swift_dir(conf_files[3])
self.assert_('4 - %s' % conf_four in line)
# test some servers not running
manager.os = MockOs([1, 2, 3])
self.assertEquals(server.status(), 0)
output = pop_stream(f).strip().splitlines()
self.assertEquals(len(output), 3)
for line in output:
self.assert_('test-server running' in line)
# test single server not running
manager.os = MockOs([1, 2])
self.assertEquals(server.status(number=3), 1)
output = pop_stream(f).strip().splitlines()
self.assertEquals(len(output), 1)
line = output[0]
self.assert_('not running' in line)
conf_three = self.join_swift_dir(conf_files[2])
self.assert_(conf_three in line)
# test no running pids
manager.os = MockOs([])
self.assertEquals(server.status(), 1)
output = pop_stream(f).lower()
self.assert_('no test-server running' in output)
# test use provided pids
pids = {
1: '1.pid',
2: '2.pid',
}
# shouldn't call get_running_pids
called = []
def mock(*args, **kwargs):
called.append(True)
server.get_running_pids = mock
status = server.status(pids=pids)
self.assertEquals(status, 0)
self.assertFalse(called)
output = pop_stream(f).strip().splitlines()
self.assertEquals(len(output), 2)
for line in output:
self.assert_('test-server running' in line)
finally:
sys.stdout = old_stdout
def test_spawn(self):
# mocks
class MockProcess():
NOTHING = 'default besides None'
STDOUT = 'stdout'
PIPE = 'pipe'
def __init__(self, pids=None):
if pids is None:
pids = []
self.pids = (p for p in pids)
def Popen(self, args, **kwargs):
return MockProc(self.pids.next(), args, **kwargs)
class MockProc():
def __init__(self, pid, args, stdout=MockProcess.NOTHING,
stderr=MockProcess.NOTHING):
self.pid = pid
self.args = args
self.stdout = stdout
if stderr == MockProcess.STDOUT:
self.stderr = self.stdout
else:
self.stderr = stderr
# setup running servers
server = manager.Server('test')
with temptree(['test-server.conf']) as swift_dir:
manager.SWIFT_DIR = swift_dir
with temptree([]) as t:
manager.RUN_DIR = t
server.run_dir = t
old_subprocess = manager.subprocess
try:
# test single server process calls spawn once
manager.subprocess = MockProcess([1])
conf_file = self.join_swift_dir('test-server.conf')
# spawn server no kwargs
server.spawn(conf_file)
# test pid file
pid_file = self.join_run_dir('test-server.pid')
self.assert_(os.path.exists(pid_file))
pid_on_disk = int(open(pid_file).read().strip())
self.assertEquals(pid_on_disk, 1)
# assert procs args
self.assert_(server.procs)
self.assertEquals(len(server.procs), 1)
proc = server.procs[0]
expected_args = [
'swift-test-server',
conf_file,
]
self.assertEquals(proc.args, expected_args)
# assert stdout is piped
self.assertEquals(proc.stdout, MockProcess.PIPE)
self.assertEquals(proc.stderr, proc.stdout)
# test multi server process calls spawn multiple times
manager.subprocess = MockProcess([11, 12, 13, 14])
conf1 = self.join_swift_dir('test-server/1.conf')
conf2 = self.join_swift_dir('test-server/2.conf')
conf3 = self.join_swift_dir('test-server/3.conf')
conf4 = self.join_swift_dir('test-server/4.conf')
server = manager.Server('test', run_dir=t)
# test server run once
server.spawn(conf1, once=True)
self.assert_(server.procs)
self.assertEquals(len(server.procs), 1)
proc = server.procs[0]
expected_args = ['swift-test-server', conf1, 'once']
# assert stdout is piped
self.assertEquals(proc.stdout, MockProcess.PIPE)
self.assertEquals(proc.stderr, proc.stdout)
# test server not daemon
server.spawn(conf2, daemon=False)
self.assert_(server.procs)
self.assertEquals(len(server.procs), 2)
proc = server.procs[1]
expected_args = ['swift-test-server', conf2, 'verbose']
self.assertEquals(proc.args, expected_args)
# assert stdout is not changed
self.assertEquals(proc.stdout, None)
self.assertEquals(proc.stderr, None)
# test server wait
server.spawn(conf3, wait=False)
self.assert_(server.procs)
self.assertEquals(len(server.procs), 3)
proc = server.procs[2]
# assert stdout is /dev/null
self.assert_(isinstance(proc.stdout, file))
self.assertEquals(proc.stdout.name, os.devnull)
self.assertEquals(proc.stdout.mode, 'w+b')
self.assertEquals(proc.stderr, proc.stdout)
# test not daemon over-rides wait
server.spawn(conf4, wait=False, daemon=False, once=True)
self.assert_(server.procs)
self.assertEquals(len(server.procs), 4)
proc = server.procs[3]
expected_args = ['swift-test-server', conf4, 'once',
'verbose']
self.assertEquals(proc.args, expected_args)
# daemon behavior should trump wait, once shouldn't matter
self.assertEquals(proc.stdout, None)
self.assertEquals(proc.stderr, None)
# assert pids
for i, proc in enumerate(server.procs):
pid_file = self.join_run_dir('test-server/%d.pid' %
(i + 1))
pid_on_disk = int(open(pid_file).read().strip())
self.assertEquals(pid_on_disk, proc.pid)
finally:
manager.subprocess = old_subprocess
def test_wait(self):
server = manager.Server('test')
self.assertEquals(server.wait(), 0)
class MockProcess(Thread):
def __init__(self, delay=0.1, fail_to_start=False):
Thread.__init__(self)
# setup pipe
rfd, wfd = os.pipe()
# subprocess connection to read stdout
self.stdout = os.fdopen(rfd)
# real process connection to write stdout
self._stdout = os.fdopen(wfd, 'w')
self.delay = delay
self.finished = False
self.returncode = None
if fail_to_start:
self._returncode = 1
self.run = self.fail
else:
self._returncode = 0
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
if self.isAlive():
self.join()
def close_stdout(self):
self._stdout.flush()
with open(os.devnull, 'wb') as nullfile:
try:
os.dup2(nullfile.fileno(), self._stdout.fileno())
except OSError:
pass
def fail(self):
print >>self._stdout, 'mock process started'
sleep(self.delay) # perform setup processing
print >>self._stdout, 'mock process failed to start'
self.close_stdout()
def poll(self):
self.returncode = self._returncode
return self.returncode or None
def run(self):
print >>self._stdout, 'mock process started'
sleep(self.delay) # perform setup processing
print >>self._stdout, 'setup complete!'
self.close_stdout()
sleep(self.delay) # do some more processing
print >>self._stdout, 'mock process finished'
self.finished = True
class MockTime():
def time(self):
return time()
def sleep(self, *args, **kwargs):
pass
with temptree([]) as t:
old_stdout = sys.stdout
old_wait = manager.WARNING_WAIT
old_time = manager.time
try:
manager.WARNING_WAIT = 0.01
manager.time = MockTime()
with open(os.path.join(t, 'output'), 'w+') as f:
# acctually capture the read stdout (for prints)
sys.stdout = f
# test closing pipe in subprocess unblocks read
with MockProcess() as proc:
server.procs = [proc]
status = server.wait()
self.assertEquals(status, 0)
# wait should return before process exits
self.assert_(proc.isAlive())
self.assertFalse(proc.finished)
self.assert_(proc.finished) # make sure it did finish...
# test output kwarg prints subprocess output
with MockProcess() as proc:
server.procs = [proc]
status = server.wait(output=True)
output = pop_stream(f)
self.assert_('mock process started' in output)
self.assert_('setup complete' in output)
# make sure we don't get prints after stdout was closed
self.assert_('mock process finished' not in output)
# test process which fails to start
with MockProcess(fail_to_start=True) as proc:
server.procs = [proc]
status = server.wait()
self.assertEquals(status, 1)
self.assert_('failed' in pop_stream(f))
# test multiple procs
procs = [MockProcess(delay=.5) for i in range(3)]
for proc in procs:
proc.start()
server.procs = procs
status = server.wait()
self.assertEquals(status, 0)
for proc in procs:
self.assert_(proc.isAlive())
for proc in procs:
proc.join()
finally:
sys.stdout = old_stdout
manager.WARNING_WAIT = old_wait
manager.time = old_time
def test_interact(self):
class MockProcess():
def __init__(self, fail=False):
self.returncode = None
if fail:
self._returncode = 1
else:
self._returncode = 0
def communicate(self):
self.returncode = self._returncode
return '', ''
server = manager.Server('test')
server.procs = [MockProcess()]
self.assertEquals(server.interact(), 0)
server.procs = [MockProcess(fail=True)]
self.assertEquals(server.interact(), 1)
procs = []
for fail in (False, True, True):
procs.append(MockProcess(fail=fail))
server.procs = procs
self.assert_(server.interact() > 0)
def test_launch(self):
# stubs
conf_files = (
'proxy-server.conf',
'auth-server.conf',
'object-server/1.conf',
'object-server/2.conf',
'object-server/3.conf',
'object-server/4.conf',
)
pid_files = (
('proxy-server.pid', 1),
('proxy-server/2.pid', 2),
)
#mocks
class MockSpawn():
def __init__(self, pids=None):
self.conf_files = []
self.kwargs = []
if not pids:
def one_forever():
while True:
yield 1
self.pids = one_forever()
else:
self.pids = (x for x in pids)
def __call__(self, conf_file, **kwargs):
self.conf_files.append(conf_file)
self.kwargs.append(kwargs)
rv = self.pids.next()
if isinstance(rv, Exception):
raise rv
else:
return rv
with temptree(conf_files) as swift_dir:
manager.SWIFT_DIR = swift_dir
files, pids = zip(*pid_files)
with temptree(files, pids) as t:
manager.RUN_DIR = t
old_stdout = sys.stdout
try:
with open(os.path.join(t, 'output'), 'w+') as f:
sys.stdout = f
# can't start server w/o an conf
server = manager.Server('test', run_dir=t)
self.assertFalse(server.launch())
# start mock os running all pids
manager.os = MockOs(pids)
server = manager.Server('proxy', run_dir=t)
# can't start server if it's already running
self.assertFalse(server.launch())
output = pop_stream(f)
self.assert_('running' in output)
conf_file = self.join_swift_dir('proxy-server.conf')
self.assert_(conf_file in output)
pid_file = self.join_run_dir('proxy-server/2.pid')
self.assert_(pid_file in output)
self.assert_('already started' in output)
# no running pids
manager.os = MockOs([])
# test ignore once for non-start-once server
mock_spawn = MockSpawn([1])
server.spawn = mock_spawn
conf_file = self.join_swift_dir('proxy-server.conf')
expected = {
1: conf_file,
}
self.assertEquals(server.launch(once=True), expected)
self.assertEquals(mock_spawn.conf_files, [conf_file])
expected = {
'once': False,
}
self.assertEquals(mock_spawn.kwargs, [expected])
output = pop_stream(f)
self.assert_('Starting' in output)
self.assert_('once' not in output)
# test multi-server kwarg once
server = manager.Server('object-replicator')
mock_spawn = MockSpawn([1, 2, 3, 4])
server.spawn = mock_spawn
conf1 = self.join_swift_dir('object-server/1.conf')
conf2 = self.join_swift_dir('object-server/2.conf')
conf3 = self.join_swift_dir('object-server/3.conf')
conf4 = self.join_swift_dir('object-server/4.conf')
expected = {
1: conf1,
2: conf2,
3: conf3,
4: conf4,
}
self.assertEquals(server.launch(once=True), expected)
self.assertEquals(mock_spawn.conf_files, [
conf1, conf2, conf3, conf4])
expected = {
'once': True,
}
self.assertEquals(len(mock_spawn.kwargs), 4)
for kwargs in mock_spawn.kwargs:
self.assertEquals(kwargs, expected)
# test number kwarg
mock_spawn = MockSpawn([4])
server.spawn = mock_spawn
expected = {
4: conf4,
}
self.assertEquals(server.launch(number=4), expected)
self.assertEquals(mock_spawn.conf_files, [conf4])
expected = {
'number': 4
}
self.assertEquals(mock_spawn.kwargs, [expected])
# test cmd does not exist
server = manager.Server('auth')
mock_spawn = MockSpawn([OSError(errno.ENOENT, 'blah')])
server.spawn = mock_spawn
self.assertEquals(server.launch(), {})
self.assert_('swift-auth-server does not exist' in
pop_stream(f))
finally:
sys.stdout = old_stdout
def test_stop(self):
conf_files = (
'account-server/1.conf',
'account-server/2.conf',
'account-server/3.conf',
'account-server/4.conf',
)
pid_files = (
('account-reaper/1.pid', 1),
('account-reaper/2.pid', 2),
('account-reaper/3.pid', 3),
('account-reaper/4.pid', 4),
)
with temptree(conf_files) as swift_dir:
manager.SWIFT_DIR = swift_dir
files, pids = zip(*pid_files)
with temptree(files, pids) as t:
manager.RUN_DIR = t
# start all pids in mock os
manager.os = MockOs(pids)
server = manager.Server('account-reaper', run_dir=t)
# test kill all running pids
pids = server.stop()
self.assertEquals(len(pids), 4)
for pid in (1, 2, 3, 4):
self.assert_(pid in pids)
self.assertEquals(manager.os.pid_sigs[pid],
[signal.SIGTERM])
conf1 = self.join_swift_dir('account-reaper/1.conf')
conf2 = self.join_swift_dir('account-reaper/2.conf')
conf3 = self.join_swift_dir('account-reaper/3.conf')
conf4 = self.join_swift_dir('account-reaper/4.conf')
# reset mock os with only 2 running pids
manager.os = MockOs([3, 4])
pids = server.stop()
self.assertEquals(len(pids), 2)
for pid in (3, 4):
self.assert_(pid in pids)
self.assertEquals(manager.os.pid_sigs[pid],
[signal.SIGTERM])
self.assertFalse(os.path.exists(conf1))
self.assertFalse(os.path.exists(conf2))
# test number kwarg
manager.os = MockOs([3, 4])
pids = server.stop(number=3)
self.assertEquals(len(pids), 1)
expected = {
3: conf3,
}
self.assert_(pids, expected)
self.assertEquals(manager.os.pid_sigs[3], [signal.SIGTERM])
self.assertFalse(os.path.exists(conf4))
self.assertFalse(os.path.exists(conf3))
class TestManager(unittest.TestCase):
def test_create(self):
m = manager.Manager(['test'])
self.assertEquals(len(m.servers), 1)
server = m.servers.pop()
self.assert_(isinstance(server, manager.Server))
self.assertEquals(server.server, 'test-server')
# test multi-server and simple dedupe
servers = ['object-replicator', 'object-auditor', 'object-replicator']
m = manager.Manager(servers)
self.assertEquals(len(m.servers), 2)
for server in m.servers:
self.assert_(server.server in servers)
# test all
m = manager.Manager(['all'])
self.assertEquals(len(m.servers), len(manager.ALL_SERVERS))
for server in m.servers:
self.assert_(server.server in manager.ALL_SERVERS)
# test main
m = manager.Manager(['main'])
self.assertEquals(len(m.servers), len(manager.MAIN_SERVERS))
for server in m.servers:
self.assert_(server.server in manager.MAIN_SERVERS)
# test rest
m = manager.Manager(['rest'])
self.assertEquals(len(m.servers), len(manager.REST_SERVERS))
for server in m.servers:
self.assert_(server.server in manager.REST_SERVERS)
# test main + rest == all
m = manager.Manager(['main', 'rest'])
self.assertEquals(len(m.servers), len(manager.ALL_SERVERS))
for server in m.servers:
self.assert_(server.server in manager.ALL_SERVERS)
# test dedupe
m = manager.Manager(['main', 'rest', 'proxy', 'object',
'container', 'account'])
self.assertEquals(len(m.servers), len(manager.ALL_SERVERS))
for server in m.servers:
self.assert_(server.server in manager.ALL_SERVERS)
# test glob
m = manager.Manager(['object-*'])
object_servers = [s for s in manager.ALL_SERVERS if
s.startswith('object')]
self.assertEquals(len(m.servers), len(object_servers))
for s in m.servers:
self.assert_(str(s) in object_servers)
m = manager.Manager(['*-replicator'])
replicators = [s for s in manager.ALL_SERVERS if
s.endswith('replicator')]
for s in m.servers:
self.assert_(str(s) in replicators)
def test_status(self):
class MockServer():
def __init__(self, server, run_dir=manager.RUN_DIR):
self.server = server
self.called_kwargs = []
def status(self, **kwargs):
self.called_kwargs.append(kwargs)
if 'error' in self.server:
return 1
else:
return 0
old_server_class = manager.Server
try:
manager.Server = MockServer
m = manager.Manager(['test'])
status = m.status()
self.assertEquals(status, 0)
m = manager.Manager(['error'])
status = m.status()
self.assertEquals(status, 1)
# test multi-server
m = manager.Manager(['test', 'error'])
kwargs = {'key': 'value'}
status = m.status(**kwargs)
self.assertEquals(status, 1)
for server in m.servers:
self.assertEquals(server.called_kwargs, [kwargs])
finally:
manager.Server = old_server_class
def test_start(self):
def mock_setup_env():
getattr(mock_setup_env, 'called', []).append(True)
class MockServer():
def __init__(self, server, run_dir=manager.RUN_DIR):
self.server = server
self.called = defaultdict(list)
def launch(self, **kwargs):
self.called['launch'].append(kwargs)
def wait(self, **kwargs):
self.called['wait'].append(kwargs)
return int('error' in self.server)
def stop(self, **kwargs):
self.called['stop'].append(kwargs)
def interact(self, **kwargs):
self.called['interact'].append(kwargs)
if 'raise' in self.server:
raise KeyboardInterrupt
elif 'error' in self.server:
return 1
else:
return 0
old_setup_env = manager.setup_env
old_swift_server = manager.Server
try:
manager.setup_env = mock_setup_env
manager.Server = MockServer
# test no errors on launch
m = manager.Manager(['proxy'])
status = m.start()
self.assertEquals(status, 0)
for server in m.servers:
self.assertEquals(server.called['launch'], [{}])
# test error on launch
m = manager.Manager(['proxy', 'error'])
status = m.start()
self.assertEquals(status, 1)
for server in m.servers:
self.assertEquals(server.called['launch'], [{}])
self.assertEquals(server.called['wait'], [{}])
# test interact
m = manager.Manager(['proxy', 'error'])
kwargs = {'daemon': False}
status = m.start(**kwargs)
self.assertEquals(status, 1)
for server in m.servers:
self.assertEquals(server.called['launch'], [kwargs])
self.assertEquals(server.called['interact'], [kwargs])
m = manager.Manager(['raise'])
kwargs = {'daemon': False}
status = m.start(**kwargs)
finally:
manager.setup_env = old_setup_env
manager.Server = old_swift_server
def test_no_wait(self):
class MockServer():
def __init__(self, server, run_dir=manager.RUN_DIR):
self.server = server
self.called = defaultdict(list)
def launch(self, **kwargs):
self.called['launch'].append(kwargs)
def wait(self, **kwargs):
self.called['wait'].append(kwargs)
return int('error' in self.server)
orig_swift_server = manager.Server
try:
manager.Server = MockServer
# test success
init = manager.Manager(['proxy'])
status = init.no_wait()
self.assertEquals(status, 0)
for server in init.servers:
self.assertEquals(len(server.called['launch']), 1)
called_kwargs = server.called['launch'][0]
self.assertFalse(called_kwargs['wait'])
self.assertFalse(server.called['wait'])
# test no errocode status even on error
init = manager.Manager(['error'])
status = init.no_wait()
self.assertEquals(status, 0)
for server in init.servers:
self.assertEquals(len(server.called['launch']), 1)
called_kwargs = server.called['launch'][0]
self.assert_('wait' in called_kwargs)
self.assertFalse(called_kwargs['wait'])
self.assertFalse(server.called['wait'])
# test wait with once option
init = manager.Manager(['updater', 'replicator-error'])
status = init.no_wait(once=True)
self.assertEquals(status, 0)
for server in init.servers:
self.assertEquals(len(server.called['launch']), 1)
called_kwargs = server.called['launch'][0]
self.assert_('wait' in called_kwargs)
self.assertFalse(called_kwargs['wait'])
self.assert_('once' in called_kwargs)
self.assert_(called_kwargs['once'])
self.assertFalse(server.called['wait'])
finally:
manager.Server = orig_swift_server
def test_no_daemon(self):
class MockServer():
def __init__(self, server, run_dir=manager.RUN_DIR):
self.server = server
self.called = defaultdict(list)
def launch(self, **kwargs):
self.called['launch'].append(kwargs)
def interact(self, **kwargs):
self.called['interact'].append(kwargs)
return int('error' in self.server)
orig_swift_server = manager.Server
try:
manager.Server = MockServer
# test success
init = manager.Manager(['proxy'])
stats = init.no_daemon()
self.assertEquals(stats, 0)
# test error
init = manager.Manager(['proxy', 'object-error'])
stats = init.no_daemon()
self.assertEquals(stats, 1)
# test once
init = manager.Manager(['proxy', 'object-error'])
stats = init.no_daemon()
for server in init.servers:
self.assertEquals(len(server.called['launch']), 1)
self.assertEquals(len(server.called['wait']), 0)
self.assertEquals(len(server.called['interact']), 1)
finally:
manager.Server = orig_swift_server
def test_once(self):
class MockServer():
def __init__(self, server, run_dir=manager.RUN_DIR):
self.server = server
self.called = defaultdict(list)
def wait(self, **kwargs):
self.called['wait'].append(kwargs)
if 'error' in self.server:
return 1
else:
return 0
def launch(self, **kwargs):
return self.called['launch'].append(kwargs)
orig_swift_server = manager.Server
try:
manager.Server = MockServer
# test no errors
init = manager.Manager(['account-reaper'])
status = init.once()
self.assertEquals(status, 0)
# test error code on error
init = manager.Manager(['error-reaper'])
status = init.once()
self.assertEquals(status, 1)
for server in init.servers:
self.assertEquals(len(server.called['launch']), 1)
called_kwargs = server.called['launch'][0]
self.assertEquals(called_kwargs, {'once': True})
self.assertEquals(len(server.called['wait']), 1)
self.assertEquals(len(server.called['interact']), 0)
finally:
manager.Server = orig_swift_server
def test_stop(self):
class MockServerFactory():
class MockServer():
def __init__(self, pids, run_dir=manager.RUN_DIR):
self.pids = pids
def stop(self, **kwargs):
return self.pids
def __init__(self, server_pids, run_dir=manager.RUN_DIR):
self.server_pids = server_pids
def __call__(self, server, run_dir=manager.RUN_DIR):
return MockServerFactory.MockServer(self.server_pids[server])
def mock_watch_server_pids(server_pids, **kwargs):
for server, pids in server_pids.items():
for pid in pids:
if pid is None:
continue
yield server, pid
_orig_server = manager.Server
_orig_watch_server_pids = manager.watch_server_pids
try:
manager.watch_server_pids = mock_watch_server_pids
# test stop one server
server_pids = {
'test': [1]
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
status = m.stop()
self.assertEquals(status, 0)
# test not running
server_pids = {
'test': []
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
status = m.stop()
self.assertEquals(status, 1)
# test won't die
server_pids = {
'test': [None]
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
status = m.stop()
self.assertEquals(status, 1)
finally:
manager.Server = _orig_server
manager.watch_server_pids = _orig_watch_server_pids
# TODO(clayg): more tests
def test_shutdown(self):
m = manager.Manager(['test'])
m.stop_was_called = False
def mock_stop(*args, **kwargs):
m.stop_was_called = True
expected = {'graceful': True}
self.assertEquals(kwargs, expected)
return 0
m.stop = mock_stop
status = m.shutdown()
self.assertEquals(status, 0)
self.assertEquals(m.stop_was_called, True)
def test_restart(self):
m = manager.Manager(['test'])
m.stop_was_called = False
def mock_stop(*args, **kwargs):
m.stop_was_called = True
return 0
m.start_was_called = False
def mock_start(*args, **kwargs):
m.start_was_called = True
return 0
m.stop = mock_stop
m.start = mock_start
status = m.restart()
self.assertEquals(status, 0)
self.assertEquals(m.stop_was_called, True)
self.assertEquals(m.start_was_called, True)
def test_reload(self):
class MockManager():
called = defaultdict(list)
def __init__(self, servers):
pass
@classmethod
def reset_called(cls):
cls.called = defaultdict(list)
def stop(self, **kwargs):
MockManager.called['stop'].append(kwargs)
return 0
def start(self, **kwargs):
MockManager.called['start'].append(kwargs)
return 0
_orig_manager = manager.Manager
try:
m = _orig_manager(['auth'])
for server in m.servers:
self.assert_(server.server in
manager.GRACEFUL_SHUTDOWN_SERVERS)
manager.Manager = MockManager
status = m.reload()
self.assertEquals(status, 0)
expected = {
'start': [{'graceful': True}],
'stop': [{'graceful': True}],
}
self.assertEquals(MockManager.called, expected)
# test force graceful
MockManager.reset_called()
m = _orig_manager(['*-server'])
self.assertEquals(len(m.servers), 4)
for server in m.servers:
self.assert_(server.server in
manager.GRACEFUL_SHUTDOWN_SERVERS)
manager.Manager = MockManager
status = m.reload(graceful=False)
self.assertEquals(status, 0)
expected = {
'start': [{'graceful': True}] * 4,
'stop': [{'graceful': True}] * 4,
}
self.assertEquals(MockManager.called, expected)
finally:
manager.Manager = _orig_manager
def test_force_reload(self):
m = manager.Manager(['test'])
m.reload_was_called = False
def mock_reload(*args, **kwargs):
m.reload_was_called = True
return 0
m.reload = mock_reload
status = m.force_reload()
self.assertEquals(status, 0)
self.assertEquals(m.reload_was_called, True)
def test_get_command(self):
m = manager.Manager(['test'])
self.assertEquals(m.start, m.get_command('start'))
self.assertEquals(m.force_reload, m.get_command('force-reload'))
self.assertEquals(m.get_command('force-reload'),
m.get_command('force_reload'))
self.assertRaises(manager.UnknownCommandError, m.get_command,
'no_command')
self.assertRaises(manager.UnknownCommandError, m.get_command,
'__init__')
def test_list_commands(self):
for cmd, help in manager.Manager.list_commands():
method = getattr(manager.Manager, cmd.replace('-', '_'), None)
self.assert_(method, '%s is not a command' % cmd)
self.assert_(getattr(method, 'publicly_accessible', False))
self.assertEquals(method.__doc__.strip(), help)
def test_run_command(self):
m = manager.Manager(['test'])
m.cmd_was_called = False
def mock_cmd(*args, **kwargs):
m.cmd_was_called = True
expected = {'kw1': True, 'kw2': False}
self.assertEquals(kwargs, expected)
return 0
mock_cmd.publicly_accessible = True
m.mock_cmd = mock_cmd
kwargs = {'kw1': True, 'kw2': False}
status = m.run_command('mock_cmd', **kwargs)
self.assertEquals(status, 0)
self.assertEquals(m.cmd_was_called, True)
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[
"PYTHON_EGG_CACHE"
] |
[]
|
["PYTHON_EGG_CACHE"]
|
python
| 1 | 0 | |
witviz/src/witviz/SimpleTree.java
|
/*
* Created on Nov 4, 2004n
*
* TODO To change the template for this generated file go to
* Window - Preferences - Java - Code Style - Code Templates
*/
package witviz;
// import org.eclipse.swt.program.*;
import java.io.File;
import java.io.FileInputStream;
import java.lang.reflect.Method;
import java.util.Arrays;
import java.util.Iterator;
import java.util.Set;
import java.util.TreeSet;
import java.util.Vector;
import org.eclipse.draw2d.ChopboxAnchor;
import org.eclipse.draw2d.ColorConstants;
import org.eclipse.draw2d.Figure;
import org.eclipse.draw2d.FigureCanvas;
import org.eclipse.draw2d.IFigure;
import org.eclipse.draw2d.geometry.Point;
import org.eclipse.draw2d.graph.DirectedGraph;
import org.eclipse.gef.examples.pde.AbstractGraphDemo;
import org.eclipse.swt.SWT;
import org.eclipse.swt.custom.CTabFolder;
import org.eclipse.swt.custom.CTabFolder2Listener;
import org.eclipse.swt.custom.CTabFolderEvent;
import org.eclipse.swt.custom.CTabItem;
import org.eclipse.swt.custom.SashForm;
import org.eclipse.swt.custom.StyledText;
import org.eclipse.swt.events.DisposeEvent;
import org.eclipse.swt.events.DisposeListener;
import org.eclipse.swt.events.SelectionEvent;
import org.eclipse.swt.events.SelectionListener;
import org.eclipse.swt.graphics.Cursor;
import org.eclipse.swt.graphics.Font;
import org.eclipse.swt.graphics.GC;
import org.eclipse.swt.graphics.Image;
import org.eclipse.swt.graphics.ImageData;
import org.eclipse.swt.layout.FormAttachment;
import org.eclipse.swt.layout.FormData;
import org.eclipse.swt.layout.FormLayout;
import org.eclipse.swt.layout.GridData;
import org.eclipse.swt.layout.GridLayout;
import org.eclipse.swt.layout.RowLayout;
import org.eclipse.swt.printing.PrintDialog;
import org.eclipse.swt.printing.Printer;
import org.eclipse.swt.printing.PrinterData;
import org.eclipse.swt.widgets.Composite;
import org.eclipse.swt.widgets.Display;
import org.eclipse.swt.widgets.Event;
import org.eclipse.swt.widgets.FileDialog;
import org.eclipse.swt.widgets.Listener;
import org.eclipse.swt.widgets.Menu;
import org.eclipse.swt.widgets.MenuItem;
import org.eclipse.swt.widgets.Shell;
import org.eclipse.swt.widgets.ToolBar;
import org.eclipse.swt.widgets.ToolItem;
import witviz.WitObject.STATUS;
//import com.ibm.cbo.graphPanel.chartbase.SVG;
// import org.eclipse.draw2d.examples.graph.AbstractGraphDemo.TopOrBottomAnchor;
public class SimpleTree extends AbstractGraphDemo {
static Shell shell;
FigureCanvas figureCanvas = null;
IFigure contents;
static WitObject wo = null;
String WitDataDir=null;
String witFileName=null;
boolean doOpalPicture = true;
boolean hasOpalData = false;
//static WitObject wodetail = null;
static StyledText TextBox1;
static StyledText TextBox2;
CreateTreeFigure mainContents;
CreateTreeFigure detailContents;
Composite splitPaneComposite;
static SashForm sash_form;
static Composite rightPanel, leftPanel;
static CTabFolder leftTab, rightTab;
static SashForm rightSash, leftSash;
private FigureCanvas dc;
boolean outputData = false;
boolean detailLast = false;
// static Button refocusButton;
// static Button expandButton;
// static Button displayGlobalButton;
static FileDialog fileDialog;
static ChooseFocusDialog chooseFocusDialog;
static BomOptionsDialog bomOptionDialog;
static BopOptionsDialog bopOptionDialog;
static PartOptionsDialog partOptionDialog;
static DemandEdgeOptionsDialog demandEdgeOptionDialog;
static GlobalOptionsDialog globalOptionDialog;
static OperationOptionsDialog operationOptionDialog;
static SubstituteOptionsDialog substituteOptionDialog;
static GlobalDataDialog globalDataDialog;
static PreferencesDialog preferencesDialog;
static ChooseScenarioDialog scenarioChooser;
static ChooseObjectiveDialog objectiveChooser;
//static ObjectiveDialog objectiveChooser;
boolean fulltree;
// String focusName=null;
// String focusType=null;
Object focusObject = null;
String appName;
Cursor waitCursor;
Composite OpalComposite;
boolean highlightNonDefault;
boolean showOnlyNonDefault;
// private static int port = com.ibm.opal.server.OpalServer.DEFAULTPORT;
// private static DataSource opaldatasource;
// private static String host = "localhost";
// OpalServer opalServer;
// OpalPictureDialog OpalPicture;
int leftMargin, rightMargin, topMargin, bottomMargin;
String tabs;
GC gc;
Display d;
Font font, printerFont;
int tabWidth = 0;
MenuItem focus, openFull, openPartial, print, exit, part, operation,
demand, substitute, global, BOM, BOP;
MenuItem highlight, shownondefault, showglobaldata;//, statistics;
MenuItem postprocess, heurimplosion, optimplosion,stochimplosion,mrp,setScenario,setObjective;
public static void main(String[] args) {
new SimpleTree().run(args);
}
/**
* @see org.eclipse.draw2d.examples.AbstractExample#getContents()
*/
protected IFigure getContents() {
getFigureCanvas().setBackground(ColorConstants.white);
// getFigureCanvas().setVerticalScrollBarVisibility(FigureCanvas.ALWAYS);
// getFigureCanvas().setHorizontalScrollBarVisibility(FigureCanvas.ALWAYS);
mainContents = new CreateTreeFigure(wo, true, this, getFigureCanvas(),
80, 40);
return mainContents;
}
protected IFigure getDetailContents() {
getDetailCanvas().setBackground(ColorConstants.white);
// getDetailCanvas().setVerticalScrollBarVisibility(FigureCanvas.ALWAYS);
// getDetailCanvas().setHorizontalScrollBarVisibility(FigureCanvas.ALWAYS);
detailContents = new CreateTreeFigure(wo, focusObject, this,
getDetailCanvas(), 80, 40);
return detailContents;
}
protected FigureCanvas getDetailCanvas() {
return dc;
}
public void focusChanged() {
// mainContents.SelectedType = focusType;
// mainContents.SelectedName = focusName;
mainContents.SelectedObject = focusObject;
detailContents = (CreateTreeFigure) getDetailContents();
detailContents.setTextBox(TextBox2);
getDetailCanvas().setContents(detailContents);
if (fulltree) {
if (focusObject.getClass().toString()
.equals("class witviz.WitNode")
|| focusObject.getClass().toString().equals(
"class witviz.WitDemandNode")) {
detailContents.ColorSelf(mainContents.SelectedObject);
// detailContents.SelectedType = mainContents.SelectedType;
// detailContents.SelectedName = mainContents.SelectedName;
}
}
}
protected void setDetailCanvas(FigureCanvas canvas) {
this.dc = canvas;
}
protected void run(String[] args) {
d = Display.getDefault();
shell = new Shell(d);
String argfilepath=null;
String envfilepath = System.getenv("WITVIZDATAPATH");
//get some info about it
if (envfilepath !=null) {
File directory = new File(envfilepath);
if ( ! directory.exists() ) {
System.out.println("Specified environment variable WITVIZDATAPATH data path does not exist.");
envfilepath = null;
}
}
try {
if (args.length==1)
argfilepath = new File(args[0]).getCanonicalPath();
}
catch (Exception e) {
e.printStackTrace();
}
if (argfilepath !=null) {
File directory = new File(argfilepath);
if ( ! directory.exists() ) {
System.out.println("Specified command line wit data path does not exist.");
argfilepath = null;
}
}
String overallfilepath = null;
if (argfilepath==null)
overallfilepath = envfilepath;
else
overallfilepath = argfilepath;
//check whether it is a directory or a file
if (overallfilepath !=null) {
File directory = new File(overallfilepath);
if (directory.isDirectory() )
WitDataDir = overallfilepath;
else {
witFileName = overallfilepath;
}
}
Image WitImage = new Image(null, "images/witmodel.gif");
shell.setImage(WitImage);
waitCursor = new Cursor(d, SWT.CURSOR_WAIT);
globalDataDialog = new GlobalDataDialog(shell, this);
// Base.Socketting(0);
// try {
// opalServer = new OpalServer();
// }
// catch (Exception e) {
// e.printStackTrace();
// }
// opaldatasource = new DataSource(host, port+1);
appName = getClass().getName();
appName = appName.substring(appName.lastIndexOf('.') + 1);
shell.setText("WitViz");
shell.setLayout(new GridLayout());
Menu bar = new Menu(shell, SWT.BAR);
shell.setMenuBar(bar);
ToolBar toolbar = new ToolBar(shell, SWT.HORIZONTAL | SWT.FLAT);
GridData gridData = new GridData();
gridData.horizontalAlignment = GridData.FILL;
gridData.grabExcessHorizontalSpace = true;
toolbar.setLayoutData(gridData);
// toolbar.setLayoutData(BorderLayout.NORTH);
final ToolItem openFullItem = new ToolItem(toolbar, SWT.PUSH);
Image fullOpenIcon = new Image(null, "images/newfile_wiz.gif");
openFullItem.setImage(fullOpenIcon);
// openFullItem.setText("Open Full");
openFullItem.setToolTipText("Open Full");
openFullItem.addSelectionListener(new SelectionListener() {
public void widgetSelected(SelectionEvent event) {
OpenFullModel();
}
public void widgetDefaultSelected(SelectionEvent event) {
}
});
final ToolItem openPartialItem = new ToolItem(toolbar, SWT.PUSH);
Image fullPartialIcon = new Image(null, "images/newpartialfile_wiz.gif");
openPartialItem.setImage(fullPartialIcon);
// openPartialItem.setText("Open Partial");
openPartialItem.setToolTipText("Open Partial");
openPartialItem.addSelectionListener(new SelectionListener() {
public void widgetSelected(SelectionEvent event) {
OpenPartialModel();
}
public void widgetDefaultSelected(SelectionEvent event) {
}
});
final ToolItem printItem = new ToolItem(toolbar, SWT.PUSH);
Image PrintIcon = new Image(null, "images/printview_tsk.gif");
printItem.setImage(PrintIcon);
// printItem.setText("Print");
printItem.setToolTipText("Print");
printItem.addSelectionListener(new SelectionListener() {
public void widgetSelected(SelectionEvent event) {
Print();
}
public void widgetDefaultSelected(SelectionEvent event) {
}
});
MenuItem fileItem = new MenuItem(bar, SWT.CASCADE);
fileItem.setText("File");
Menu submenuFile = new Menu(shell, SWT.DROP_DOWN);
fileItem.setMenu(submenuFile);
openFull = new MenuItem(submenuFile, SWT.PUSH);
openFull.addListener(SWT.Selection, new Listener() {
public void handleEvent(Event e) {
OpenFullModel();
}
});
openFull.setText("Open Full Model");
openPartial = new MenuItem(submenuFile, SWT.PUSH);
openPartial.addListener(SWT.Selection, new Listener() {
public void handleEvent(Event e) {
OpenPartialModel();
}
});
openPartial.setText("Open Partial Model");
MenuItem print = new MenuItem(submenuFile, SWT.PUSH);
print.addListener(SWT.Selection, new Listener() {
public void handleEvent(Event e) {
Print();
}
});
print.setText("Print");
// openFull.setText ("Select &All\tCtrl+A");
// item.setAccelerator (SWT.CTRL + 'A');
exit = new MenuItem(submenuFile, SWT.PUSH);
exit.setText("Exit");
exit.addListener(SWT.Selection, new Listener() {
public void handleEvent(Event e) {
shell.dispose();
}
});
MenuItem editItem = new MenuItem(bar, SWT.CASCADE);
editItem.setText("Edit");
Menu submenuEdit = new Menu(shell, SWT.DROP_DOWN);
editItem.setMenu(submenuEdit);
focus = new MenuItem(submenuEdit, SWT.PUSH);
focus.addListener(SWT.Selection, new Listener() {
public void handleEvent(Event e) {
ChooseFocus();
}
});
focus.setText("Choose Focus");
focus.setEnabled(false);
MenuItem options = new MenuItem(submenuEdit, SWT.CASCADE);
options.setText("Options");
Menu optionsDropdown = new Menu(shell, SWT.DROP_DOWN);
options.setMenu(optionsDropdown);
part = new MenuItem(optionsDropdown, SWT.PUSH);
part.addListener(SWT.Selection, new Listener() {
public void handleEvent(Event e) {
partOptionDialog.open();
}
});
part.setText("Part");
operation = new MenuItem(optionsDropdown, SWT.PUSH);
operation.addListener(SWT.Selection, new Listener() {
public void handleEvent(Event e) {
operationOptionDialog.open();
}
});
operation.setText("Operation");
BOM = new MenuItem(optionsDropdown, SWT.PUSH);
BOM.addListener(SWT.Selection, new Listener() {
public void handleEvent(Event e) {
bomOptionDialog.open();
}
});
BOM.setText("BOM");
BOP = new MenuItem(optionsDropdown, SWT.PUSH);
BOP.addListener(SWT.Selection, new Listener() {
public void handleEvent(Event e) {
bopOptionDialog.open();
}
});
BOP.setText("BOP");
demand = new MenuItem(optionsDropdown, SWT.PUSH);
demand.addListener(SWT.Selection, new Listener() {
public void handleEvent(Event e) {
demandEdgeOptionDialog.open();
}
});
demand.setText("Demand");
substitute = new MenuItem(optionsDropdown, SWT.PUSH);
substitute.addListener(SWT.Selection, new Listener() {
public void handleEvent(Event e) {
substituteOptionDialog.open();
}
});
substitute.setText("Substitute");
global = new MenuItem(optionsDropdown, SWT.PUSH);
global.addListener(SWT.Selection, new Listener() {
public void handleEvent(Event e) {
globalOptionDialog.open();
}
});
global.setText("Global");
MenuItem prefs = new MenuItem(submenuEdit, SWT.PUSH);
prefs.setText("Preferences");
prefs.addListener(SWT.Selection, new Listener() {
public void handleEvent(Event e) {
preferencesDialog.open();
}
});
MenuItem viewItem = new MenuItem(bar, SWT.CASCADE);
viewItem.setText("View");
Menu submenuView = new Menu(shell, SWT.DROP_DOWN);
viewItem.setMenu(submenuView);
showglobaldata = new MenuItem(submenuView, SWT.PUSH);
showglobaldata.addListener(SWT.Selection, new Listener() {
public void handleEvent(Event e) {
ShowGlobal();
}
});
showglobaldata.setText("Global Data");
showglobaldata.setEnabled(false);
//statistics = new MenuItem(submenuView, SWT.PUSH);
//statistics.setEnabled(false);
//statistics.addListener(SWT.Selection, new Listener() {
// public void handleEvent(Event e) {
// if (!hasOpalData) {
// PopulateOpalDataSource(wo,witFileName);
// }
// hasOpalData=true;
// OpalPicture.open();
// }
//});
//statistics.setText("Statistics");
MenuItem modelItem = new MenuItem(bar, SWT.CASCADE);
modelItem.setText("Actions");
Menu submenuModel = new Menu(shell, SWT.DROP_DOWN);
modelItem.setMenu(submenuModel);
heurimplosion = new MenuItem(submenuModel, SWT.PUSH);
heurimplosion.addListener(SWT.Selection, new Listener() {
public void handleEvent(Event e) {
try {
HeuristicImplosion();
}
catch (Exception ex) {
ex.printStackTrace();
}
}
});
heurimplosion.setText("Heuristic Implosion");
heurimplosion.setEnabled(false);
optimplosion = new MenuItem(submenuModel, SWT.PUSH);
optimplosion.addListener(SWT.Selection, new Listener() {
public void handleEvent(Event e) {
try {
OptimizingImplosion();
}
catch (Exception ex) {
ex.printStackTrace();
}
}
});
optimplosion.setText("Optimizing Implosion");
optimplosion.setEnabled(false);
stochimplosion = new MenuItem(submenuModel, SWT.PUSH);
stochimplosion.addListener(SWT.Selection, new Listener() {
public void handleEvent(Event e) {
try{
StochasticImplosion();
}
catch (Exception ex) {
ex.printStackTrace();
}
}
});
stochimplosion.setText("Stochastic Implosion");
stochimplosion.setEnabled(false);
mrp = new MenuItem(submenuModel, SWT.PUSH);
mrp.addListener(SWT.Selection, new Listener() {
public void handleEvent(Event e) {
try {
MRP();
}
catch (Exception ex) {
ex.printStackTrace();
}
}
});
mrp.setText("MRP");
mrp.setEnabled(false);
postprocess = new MenuItem(submenuModel, SWT.PUSH);
postprocess.addListener(SWT.Selection, new Listener() {
public void handleEvent(Event e) {
try {
Postprocess();
}
catch (Exception ex) {
ex.printStackTrace();
}
}
});
postprocess.setText("Post process model");
postprocess.setEnabled(false);
MenuItem set = new MenuItem(bar, SWT.CASCADE);
set.setText("Set");
Menu submenuSet = new Menu(shell, SWT.DROP_DOWN);
set.setMenu(submenuSet);
setScenario = new MenuItem(submenuSet, SWT.PUSH);
setScenario.addListener(SWT.Selection, new Listener() {
public void handleEvent(Event e) {
setScenario();
}
});
setScenario.setText("Set Current Scenario");
setScenario.setEnabled(false);
setObjective = new MenuItem(submenuSet, SWT.PUSH);
setObjective.addListener(SWT.Selection, new Listener() {
public void handleEvent(Event e) {
setObjective();
}
});
setObjective.setText("Set Current Objective");
setObjective.setEnabled(false);
// OpalPicture = new OpalPictureDialog(shell,this);
splitPaneComposite = new Composite(shell, 0);
gridData = new GridData();
gridData.horizontalAlignment = GridData.FILL;
gridData.verticalAlignment = GridData.FILL;
gridData.grabExcessHorizontalSpace = true;
gridData.grabExcessVerticalSpace = true;
splitPaneComposite.setLayoutData(gridData);
// splitPaneComposite.setLayoutData(BorderLayout.CENTER);
splitPaneComposite.setLayout(new FormLayout());
sash_form = new SashForm(splitPaneComposite, SWT.HORIZONTAL);
FormData fd3 = new FormData(); // fill the area
fd3.left = new FormAttachment(0, 5);
fd3.top = new FormAttachment(0, 5);
fd3.bottom = new FormAttachment(100, -5);
fd3.right = new FormAttachment(100, -5);
sash_form.setLayoutData(fd3);
leftTab = new CTabFolder(sash_form, SWT.TOP);
leftTab.setSimple(false);
leftTab.setMaximizeVisible(true);
leftTab.setMinimizeVisible(true);
leftTab.addCTabFolder2Listener(new CTabFolder2Listener() {
public void showList(CTabFolderEvent event) {
}
public void maximize(CTabFolderEvent event) {
sash_form.setMaximizedControl(leftTab);
}
public void minimize(CTabFolderEvent event) {
sash_form.setMaximizedControl(rightTab);
}
public void close(CTabFolderEvent event) {
}
public void restore(CTabFolderEvent event) {
sash_form.setMaximizedControl(null);
}
});
CTabItem leftTabItem = new CTabItem(leftTab, SWT.NONE);
leftTabItem.setText("Full Model");
leftTab.setSelection(0);
ToolBar toolbarFull = new ToolBar(leftTab, SWT.HORIZONTAL | SWT.FLAT);
final ToolItem restoreFull = new ToolItem(toolbarFull, SWT.PUSH);
Image restoreIcon = new Image(null, "images/restore.gif");
restoreFull.setImage(restoreIcon);
// restoreFull.setText("Restore");
restoreFull.setToolTipText("Restore");
restoreFull.addSelectionListener(new SelectionListener() {
public void widgetSelected(SelectionEvent event) {
sash_form.setMaximizedControl(null);
}
public void widgetDefaultSelected(SelectionEvent event) {
}
});
leftTab.setTabHeight(Math.max(toolbarFull.computeSize(SWT.DEFAULT,
SWT.DEFAULT).y, leftTab.getTabHeight()));
leftTab.setTopRight(toolbarFull);
leftPanel = new Composite(leftTab, SWT.BORDER); // will hold the top
// stuff, plus a
// splitPane
leftPanel.setLayout(new FormLayout());
FormData fd4 = new FormData();
fd4.left = new FormAttachment(0, 5); // attach to left, to top, and
// to bottom
fd4.top = new FormAttachment(0, 5);
fd4.bottom = new FormAttachment(100, -5);
fd4.right = new FormAttachment(100, -5);
leftPanel.setLayoutData(fd4);
Composite topLeft = new Composite(leftPanel, SWT.NONE);
leftTabItem.setControl(leftPanel);
FormData fd44 = new FormData();
fd44.left = new FormAttachment(0, 5); // attach to left, to top, and
// to right
fd44.top = new FormAttachment(0, 5);
fd44.right = new FormAttachment(100, -5);
topLeft.setLayoutData(fd44);
topLeft.setLayout(new RowLayout());
rightTab = new CTabFolder(sash_form, SWT.TOP);
rightTab.setSimple(false);
rightTab.setMaximizeVisible(true);
rightTab.setMinimizeVisible(true);
rightTab.addCTabFolder2Listener(new CTabFolder2Listener() {
public void showList(CTabFolderEvent event) {
}
public void maximize(CTabFolderEvent event) {
sash_form.setMaximizedControl(rightTab);
}
public void minimize(CTabFolderEvent event) {
sash_form.setMaximizedControl(leftTab);
}
public void close(CTabFolderEvent event) {
}
public void restore(CTabFolderEvent event) {
}
});
CTabItem rightTabItem = new CTabItem(rightTab, SWT.NONE);
rightTabItem.setText("Model Focus");
rightTab.setSelection(0);
rightPanel = new Composite(rightTab, SWT.BORDER);
rightTabItem.setControl(rightPanel);
rightPanel.setLayout(new FormLayout());
FormData fd6 = new FormData();
fd6.top = new FormAttachment(0, 5); // attach to top, to right, and to
// bottom
fd6.right = new FormAttachment(100, -5);
fd6.bottom = new FormAttachment(100, -5);
rightPanel.setLayoutData(fd6);
ToolBar toolbarDetail = new ToolBar(rightTab, SWT.HORIZONTAL | SWT.FLAT);
final ToolItem focus = new ToolItem(toolbarDetail, SWT.PUSH);
Image focusIcon = new Image(null, "images/e_forward.gif");
focus.setImage(focusIcon);
// focus.setText("Focus");
focus.setToolTipText("Focus On Selected");
focus.addSelectionListener(new SelectionListener() {
public void widgetSelected(SelectionEvent event) {
RefocusButtonPushed();
}
public void widgetDefaultSelected(SelectionEvent event) {
}
});
final ToolItem expand = new ToolItem(toolbarDetail, SWT.PUSH);
Image expandIcon = new Image(null, "images/fjexport.gif");
expand.setImage(expandIcon);
// expand.setText("Expand");
expand.setToolTipText("Expand Selected");
expand.addSelectionListener(new SelectionListener() {
public void widgetSelected(SelectionEvent event) {
detailContents = detailContents
.getExpandedDetailContents(detailContents.SelectedObject);
getDetailCanvas().setContents(detailContents);
}
public void widgetDefaultSelected(SelectionEvent event) {
}
});
final ToolItem restoreDetail = new ToolItem(toolbarDetail, SWT.PUSH);
restoreDetail.setImage(restoreIcon);
// restoreDetail.setText("Restore");
restoreDetail.setToolTipText("Restore");
restoreDetail.addSelectionListener(new SelectionListener() {
public void widgetSelected(SelectionEvent event) {
sash_form.setMaximizedControl(null);
}
public void widgetDefaultSelected(SelectionEvent event) {
}
});
rightTab.setTabHeight(Math.max(toolbarDetail.computeSize(SWT.DEFAULT,
SWT.DEFAULT).y, rightTab.getTabHeight()));
rightTab.setTopRight(toolbarDetail);
Composite topRight = new Composite(rightPanel, SWT.NONE);
// need to make an explicity layout so that I can set attributes
RowLayout rowLOright = new RowLayout();
rowLOright.wrap = true;
rowLOright.pack = true;
// rowLOright.justify = true;
// rowLOright.spacing = 10;
topRight.setLayout(rowLOright);
int[] weights = new int[2];
weights[0] = 75;
weights[1] = 25;
sash_form.setWeights(weights);
// make a composite to hold the "open file stuff"
//need to initialize witobject
fileDialog = new FileDialog(shell, SWT.OPEN);
if (WitDataDir !=null)
fileDialog.setFilterPath(WitDataDir);
chooseFocusDialog = new ChooseFocusDialog(shell, this);
preferencesDialog = new PreferencesDialog(shell, this);
partOptionDialog = new PartOptionsDialog(shell, this);
operationOptionDialog = new OperationOptionsDialog(shell, this);
bomOptionDialog = new BomOptionsDialog(shell, this);
bopOptionDialog = new BopOptionsDialog(shell, this);
demandEdgeOptionDialog = new DemandEdgeOptionsDialog(shell, this);
substituteOptionDialog = new SubstituteOptionsDialog(shell, this);
globalOptionDialog = new GlobalOptionsDialog(shell, this);
// add a composite to be used for the opal picture
// make a sashform to hold just the canvas and the text box stuff
leftSash = new SashForm(leftPanel, SWT.VERTICAL);
FormData fdlS = new FormData();
fdlS.top = new FormAttachment(topLeft, 5);
fdlS.left = new FormAttachment(0, 5);
fdlS.right = new FormAttachment(100, -5);
fdlS.bottom = new FormAttachment(100, -5);
leftSash.setLayoutData(fdlS);
FormData fd45 = new FormData();
fd45.left = new FormAttachment(0, 5); // attach to left, to top, and
// to right
fd45.top = new FormAttachment(0, 5);
fd45.right = new FormAttachment(100, -5);
// fd45.bottom = new FormAttachment(rightSash,0);
topRight.setLayoutData(fd45);
rightSash = new SashForm(rightPanel, SWT.VERTICAL);
FormData fdrS = new FormData();
fdrS.top = new FormAttachment(topRight, 5);
fdrS.left = new FormAttachment(0, 5);
fdrS.right = new FormAttachment(100, -5);
fdrS.bottom = new FormAttachment(100, -5);
rightSash.setLayoutData(fdrS);
setFigureCanvas(new FigureCanvas(leftSash));
getFigureCanvas().setContents(contents = getContents());
getFigureCanvas().getViewport().setContentsTracksHeight(true);
getFigureCanvas().getViewport().setContentsTracksWidth(true);
// add a label at the top
setDetailCanvas(new FigureCanvas(rightSash));
getDetailCanvas().setContents(contents = getDetailContents());
getDetailCanvas().getViewport().setContentsTracksHeight(true);
getDetailCanvas().getViewport().setContentsTracksWidth(true);
// now we need two composites to hold the texbox+label
Composite leftText = new Composite(leftSash, SWT.BORDER);
Composite rightText = new Composite(rightSash, SWT.BORDER);
leftText.setLayout(new FormLayout());
rightText.setLayout(new FormLayout());
leftSash.setWeights(weights);
rightSash.setWeights(weights);
org.eclipse.swt.widgets.Label label1 = new org.eclipse.swt.widgets.Label(
leftText, SWT.LEFT);
label1.setText("Details");
FormData fdl1 = new FormData();
fdl1.top = new FormAttachment(0, 5);
fdl1.left = new FormAttachment(0, 5);
fdl1.right = new FormAttachment(100, -5);
label1.setLayoutData(fdl1);
org.eclipse.swt.widgets.Label label2 = new org.eclipse.swt.widgets.Label(
rightText, SWT.LEFT);
label2.setText("Details");
FormData fdl2 = new FormData();
fdl2.top = new FormAttachment(0, 5);
fdl2.left = new FormAttachment(0, 5);
fdl2.right = new FormAttachment(100, -5);
label2.setLayoutData(fdl2);
TextBox1 = new StyledText(leftText, SWT.LEAD | SWT.MULTI | SWT.WRAP
| SWT.H_SCROLL | SWT.V_SCROLL | SWT.NONE);
TextBox1.setText(" ");
FormData fdt1 = new FormData();
fdt1.top = new FormAttachment(label1, 5);
fdt1.left = new FormAttachment(0, 5);
fdt1.right = new FormAttachment(100, -5);
fdt1.bottom = new FormAttachment(100, -5);
TextBox1.setLayoutData(fdt1);
TextBox2 = new StyledText(rightText, SWT.LEAD | SWT.MULTI | SWT.WRAP
| SWT.H_SCROLL | SWT.V_SCROLL | SWT.NONE);
TextBox2.setText("");
FormData fdt2 = new FormData();
fdt2.top = new FormAttachment(label2, 5);
fdt2.left = new FormAttachment(0, 5);
fdt2.right = new FormAttachment(100, -5);
fdt2.bottom = new FormAttachment(100, -5);
TextBox2.setLayoutData(fdt2);
mainContents.setTextBox(TextBox1);
detailContents.setTextBox(TextBox2);
// shell.pack();
shell.setSize(900, 600);
if (witFileName!=null) {
if (!GotData()) {
String message=wo.message;
displayExit(shell, message, wo.allOK);
}
focus.setEnabled(true);
showglobaldata.setEnabled(true);
}
shell.open();
shell.layout(true);
shell.addDisposeListener(new DisposeListener() {
public void widgetDisposed(DisposeEvent e) {
// opalServer.Terminate();
try {
if (wo != null)
wo.finalize();
}
catch (Throwable ex) {
ex.printStackTrace();
}
}
});
try {
while (!shell.isDisposed())
while (!d.readAndDispatch())
d.sleep();
} catch (Throwable t) {
t.printStackTrace();
System.exit(0);
}
}
public void displayExit(Shell s, String message, STATUS allOK) {
TerminalErrorDialog e = new TerminalErrorDialog(shell, this, message, allOK);
e.open();
if (allOK==STATUS.DEAD) System.exit(0);
}
public MyMessageDialog displayMessage(Shell s, String message) {
MyMessageDialog e = new MyMessageDialog(shell, message);
e.open();
return e;
}
public void OpenFullModel() {
witFileName = fileDialog.open();
globalDataDialog.close();
outputData = false;
if (witFileName != null) {
fulltree = true;
sash_form.setMaximizedControl(null);
String mess = "Reading data file. "+ longWaitMessage();
MyMessageDialog e = displayMessage(shell, mess);
if (!GotData()) {
e.close();
String message = wo.message;
displayExit(shell, message, wo.allOK);
shell.setCursor(null);
return;
}
focusObject = null;
EnableOptions();
SetEnabled();
e.close();
}
}
public String WitNeedsToTerminate1() {
return "WIT issued a severe error message while invoking ";
}
public String WitNeedsToTerminate2() {
return "\nAs a result of this severe error, WitViz will need to terminate when this window is closed. \n The severe error message follows:";
}
public String longWaitMessage() {
String mess = "Please wait...\nThis dialog will close when the action is complete";
return mess;
}
public void HeuristicImplosion() throws Exception {
String mess = "Invoking Heuristic Implosion. "+ longWaitMessage();
MyMessageDialog e = displayMessage(shell, mess);
int result = wo.heuristicimplosion();
e.close();
if (result==0) {
SetEnabled();
outputData = true;
}
else {
String message = WitNeedsToTerminate1()+" Heuristic Implosion " + WitNeedsToTerminate2() + wo.message;
displayExit(shell, message, wo.allOK);
shell.setCursor(null);
return;
}
}
public void OptimizingImplosion() throws Exception{
String mess = "Invoking Optimizing Implosion. "+longWaitMessage();
MyMessageDialog e = displayMessage(shell, mess);
int result = wo.optimizingimplosion();
e.close();
if (result==0){
SetEnabled();
outputData = true;
}
else {
String message = WitNeedsToTerminate1()+" Optimizing Implosion " + WitNeedsToTerminate2() + wo.message;
displayExit(shell, message, wo.allOK);
shell.setCursor(null);
return;
}
}
public void StochasticImplosion() throws Exception {
String mess = "Invoking Stochastic Implosion. "+longWaitMessage();
MyMessageDialog e = displayMessage(shell, mess);
int result = wo.stochasticimplosion();
e.close();
if (result==0) {
SetEnabled();
outputData = true;
}
else {
String message = WitNeedsToTerminate1()+" Stochastic Implosion " + WitNeedsToTerminate2() + wo.message;
displayExit(shell, message, wo.allOK);
shell.setCursor(null);
return;
}
}
public void MRP() throws Exception {
String mess = "Invoking MRP. "+longWaitMessage();
MyMessageDialog e = displayMessage(shell, mess);
int result = wo.mrp();
e.close();
if (result==0) {
SetEnabled();
outputData = true;
}
else {
String message = WitNeedsToTerminate1()+" MRP " + WitNeedsToTerminate2() + wo.message;
displayExit(shell, message, wo.allOK);
shell.setCursor(null);
return;
}
}
public void Postprocess() throws Exception {
String mess = "Invoking Postprocessing. "+longWaitMessage();
MyMessageDialog e = displayMessage(shell, mess);
int result = wo.postprocess();
e.close();
if (result==0) {
SetEnabled();
outputData = true;
}
else {
String message = WitNeedsToTerminate1()+" PostProcessing " + WitNeedsToTerminate2() + wo.message;
displayExit(shell, message, wo.allOK);
shell.setCursor(null);
return;
}
}
public void setScenario() {
scenarioChooser.updateShell();
scenarioChooser.open();
}
public void setObjective() {
objectiveChooser.updateShell();
objectiveChooser.open();
}
public void UpdateScenario(int scenario) {
wo.setScenario(scenario);
if (focusObject!=null) {
if (detailLast&&detailContents.SelectedObject!=null)
detailContents.refocus((Figure)detailContents.SelectedObject);
else if (focusObject!=null)
mainContents.refocus((Figure)focusObject);
}
if (globalDataDialog.isOpen==true)
globalDataDialog.open();
}
public void UpdateObjective(String objective) {
wo.setObjective(objective);
if (focusObject!=null) {
if (detailLast&&detailContents.SelectedObject!=null)
detailContents.refocus((Figure)detailContents.SelectedObject);
else if (focusObject!=null)
mainContents.refocus((Figure)focusObject);
}
if (globalDataDialog.isOpen==true)
globalDataDialog.open();
}
public void OpenPartialModel() {
fulltree = false;
outputData = false;
getFigureCanvas().setContents(contents = null); // null it out
sash_form.setMaximizedControl(rightTab); // maximize the sashform to
globalDataDialog.close();
focusObject = null;
detailContents.SelectedObject=null;
// show only the detail
witFileName = fileDialog.open();
if (witFileName != null) {
shell.setText("WitViz" + ": " + witFileName);
try {
shell.setCursor(waitCursor);
String mess = "Reading data file. "+longWaitMessage();
MyMessageDialog e = displayMessage(shell, mess);
wo = new WitObject(witFileName);
e.close();
if (wo.allOK!=STATUS.OK) {
String message=wo.message;
displayExit(shell,message,wo.allOK);
shell.setCursor(null);
return;
}
shell.setCursor(null);
if (wo.getStochMode()==true) {
setScenario.setEnabled(true);
scenarioChooser.updateShell();
}
else {
setScenario.setEnabled(false);
scenarioChooser.close();
}
if (wo.getMultObjectiveMode()==true) {
setObjective.setEnabled(true);
objectiveChooser.updateShell();
}
else {
setObjective.setEnabled(false);
objectiveChooser.close();
}
} catch (Exception ex) {
ex.printStackTrace();
}
setWitObject();
// check for whether we have data for this file yet
hasOpalData = false;
//if (doOpalPicture)
//PopulateOpalDataSource(wo);
focus.setEnabled(true);
showglobaldata.setEnabled(true);
focusObject = null;
EnableOptions();
SetEnabled();
chooseFocusDialog.open();
}
}
public void Print() {
PrintDialog printd = new PrintDialog(shell);
// capture shell as it currently appears into an ImageData object
final org.eclipse.swt.graphics.Rectangle bounds = shell.getBounds();
final Image srcImage = new Image(d, bounds.width, bounds.height);
GC srcGC = new GC(d);
srcGC.copyArea(srcImage, bounds.x, bounds.y);
srcGC.dispose();
ImageData srcData = srcImage.getImageData();
srcImage.dispose();
PrinterData pd = printd.open();
final Printer printer;
if (pd != null) {
// printer.startJob("foo");
/*
* Do the printing in a background thread so that spooling does not
* freeze the UI.
*/
printer = new Printer(pd);
// draw onto printer scaled appropriately
final org.eclipse.swt.graphics.Point srcDPI = d.getDPI();
final org.eclipse.swt.graphics.Point destDPI = printer.getDPI();
org.eclipse.swt.graphics.Rectangle trim = printer.computeTrim(0, 0,
0, 0);
final int leftMargin = destDPI.x + trim.x; // one inch from left
// side of paper
final int topMargin = destDPI.y / 2 + trim.y; // one-half inch
// from top edge of
// paper
final Image destImage = new Image(printer, srcData);
Thread printingThread = new Thread("Printing") {
public void run() {
print(printer, destImage, bounds, leftMargin, topMargin,
destDPI, srcDPI);
printer.dispose();
}
};
printingThread.start();
}
}
public void ChooseFocus() {
chooseFocusDialog.open();
focusObject = null;
}
void RefocusButtonPushed() {
focusObject = detailContents.SelectedObject;
if (fulltree) {
mainContents.setSelected(focusObject);
mainContents.recenter(focusObject);
} else {
focusChanged();
detailContents.recenter(focusObject);
detailContents.ColorSelf(focusObject);
}
}
void RefocusButtonPushed(Object f) {
focusObject = f;
if (fulltree) {
mainContents.setSelected(focusObject);
mainContents.recenter(focusObject);
} else {
focusChanged();
// detailContents.setSelected(focusObject);
// detailContents.recenter(focusObject);
// detailContents.ColorSelf(focusObject);
}
}
void print(Printer printer, Image destImage,
org.eclipse.swt.graphics.Rectangle bounds, int leftMargin,
int topMargin, org.eclipse.swt.graphics.Point destDPI,
org.eclipse.swt.graphics.Point srcDPI) {
if (printer.startJob("SWT Printing Snippet")) {
GC destGC = new GC(printer);
if (printer.startPage()) {
destGC.drawImage(destImage, 0, 0, bounds.width, bounds.height,
leftMargin, topMargin, bounds.width * destDPI.x
/ srcDPI.x, bounds.height * destDPI.y
/ srcDPI.y);
printer.endPage();
}
destGC.dispose();
printer.endJob();
destImage.dispose();
}
printer.dispose();
}
public void setWait() {
shell.setCursor(waitCursor);
}
public void unsetWait() {
shell.setCursor(null);
}
public boolean GotData() {
shell.setText("WitViz" + ": " + witFileName);
try {
shell.setCursor(waitCursor);
wo = new WitObject(witFileName);
if (wo.allOK != STATUS.OK) {
shell.setCursor(null);
return false;
}
shell.setCursor(null);
focusObject = null;
detailContents.SelectedObject=null;
if (wo.getStochMode()==true) {
setScenario.setEnabled(true);
scenarioChooser = new ChooseScenarioDialog(shell, this);
//scenarioChooser.updateShell();
}
else {
setScenario.setEnabled(false);
if (scenarioChooser!=null) scenarioChooser.close();
}
if (wo.getMultObjectiveMode()==true) {
setObjective.setEnabled(true);
objectiveChooser = new ChooseObjectiveDialog(shell, this);
//objectiveChooser.updateShell();
}
else {
setObjective.setEnabled(false);
if (objectiveChooser!=null) objectiveChooser.close();
}
} catch (Exception ex) {
ex.printStackTrace();
} catch (Throwable t) {
System.exit(0);
}
setWitObject();
shell.setCursor(null);
hasOpalData = false;
// if (doOpalPicture)
// PopulateOpalDataSource(wo);
focus.setEnabled(true);
showglobaldata.setEnabled(true);
mainContents = (CreateTreeFigure) getContents();
shell.setCursor(null);
mainContents.setTextBox(TextBox1);
focusObject = null;
EnableOptions();
SetEnabled();
detailContents = (CreateTreeFigure) getDetailContents();
detailContents.setTextBox(TextBox2);
getFigureCanvas().setContents(mainContents);
getDetailCanvas().setContents(detailContents);
return true;
}
private boolean OpalDataExists(String filename) {
try {
FileInputStream isl = new FileInputStream(filename + ".Opal.td");
} catch (Exception e) {
return false;
}
return true;
}
private void setWitObject() {
partOptionDialog.setWitObject(wo);
bomOptionDialog.setWitObject(wo);
bopOptionDialog.setWitObject(wo);
operationOptionDialog.setWitObject(wo);
demandEdgeOptionDialog.setWitObject(wo);
substituteOptionDialog.setWitObject(wo);
globalOptionDialog.setWitObject(wo);
globalDataDialog.setWitObject(wo);
}
// public boolean close() { // copy widgets' input to private members
// return super.close();
// }
// default (English language, United States)
private void SetEnabled() {
partOptionDialog.setEnabled();
operationOptionDialog.setEnabled();
bomOptionDialog.setEnabled();
bopOptionDialog.setEnabled();
substituteOptionDialog.setEnabled();
demandEdgeOptionDialog.setEnabled();
globalOptionDialog.setEnabled();
}
private void EnableOptions() {
part.setEnabled(true);
operation.setEnabled(true);
BOM.setEnabled(true);
BOP.setEnabled(true);
substitute.setEnabled(true);
demand.setEnabled(true);
global.setEnabled(true);
//statistics.setEnabled(true);
postprocess.setEnabled(true);
heurimplosion.setEnabled(true);
optimplosion.setEnabled(true);
stochimplosion.setEnabled(true);
mrp.setEnabled(true);
//if (doOpalPicture)
//statistics.setEnabled(true);
}
private void ShowGlobal() {
//int numparts = wo.getNumParts();
//int numoperations = wo.getNumOperations();
//GlobalOptionsDialog optionsDialog = globalOptionDialog;
//mainContents.TextBox.setText("Number of Parts: "+ numparts + "\n");
//.TextBox.append("Number of Operations: "+ numoperations + "\n");
//optionsDialog.getOptionsString(mainContents.TextBox);
globalDataDialog.open();
}
static class TopOrBottomAnchor extends ChopboxAnchor {
public TopOrBottomAnchor(IFigure owner) {
super(owner);
}
public Point getLocation(Point reference) {
Point p;
p = getOwner().getBounds().getCenter();
getOwner().translateToAbsolute(p);
if (reference.y < p.y)
p = getOwner().getBounds().getTop();
else
p = getOwner().getBounds().getBottom();
getOwner().translateToAbsolute(p);
return p;
}
}
static class TopAnchor extends ChopboxAnchor {
public TopAnchor(IFigure owner) {
super(owner);
}
public Point getLocation(Point reference) {
Point p;
p = getOwner().getBounds().getCenter();
getOwner().translateToAbsolute(p);
p = getOwner().getBounds().getTop();
getOwner().translateToAbsolute(p);
return p;
}
}
static class BottomAnchor extends ChopboxAnchor {
public BottomAnchor(IFigure owner) {
super(owner);
}
public Point getLocation(Point reference) {
Point p;
p = getOwner().getBounds().getCenter();
getOwner().translateToAbsolute(p);
p = getOwner().getBounds().getBottom();
getOwner().translateToAbsolute(p);
return p;
}
}
public Vector getSortedPartsList() {
Vector r = wo.getAllParts();
Vector returnVec = new Vector();
Set<String> s = new TreeSet();
for (int i = 0; i<r.size();i++) {
s.add((String)r.elementAt(i));
}
for (Iterator i=s.iterator();i.hasNext();) {
returnVec.add((String)i.next());
}
return returnVec;
}
public Vector getSortedDemandedPartsList() {
Vector r = wo.getAllDemandedParts();
Vector returnVec = new Vector();
Set<String> s = new TreeSet();
for (int i = 0; i<r.size();i++) {
s.add((String)r.elementAt(i));
}
for (Iterator i=s.iterator();i.hasNext();) {
returnVec.add((String)i.next());
}
return returnVec;
}
public Vector getSortedCapacitiesList() {
Vector r = wo.getAllCapacities();
Vector returnVec = new Vector();
Set<String> s = new TreeSet();
for (int i = 0; i<r.size();i++) {
s.add((String)r.elementAt(i));
}
for (Iterator i=s.iterator();i.hasNext();) {
returnVec.add((String)i.next());
}
return returnVec;
}
public String[] getSortedRawMaterialsList() {
Vector rawmaterials = wo.getRawMaterialsList();
String[][] returnVec = new String[1][rawmaterials.size()];
for (int i = 0; i < rawmaterials.size(); i++) {
ListInfo li = (ListInfo) rawmaterials.elementAt(i);
returnVec[0][i] = li.name;
}
Arrays.sort(returnVec[0]);
return returnVec[0];
}
public Vector getSortedOperationsList() {
Vector r = wo.getAllOperations();
Vector returnVec = new Vector();
Set<String> s = new TreeSet();
for (int i = 0; i<r.size();i++) {
s.add((String)r.elementAt(i));
}
for (Iterator i=s.iterator();i.hasNext();) {
returnVec.add((String)i.next());
}
return returnVec;
}
protected String[] getGraphMethods() {
// Method[] methods = GraphTests.class.getMethods();
Method[] methods = new Method[0];
String[] methodNames = new String[methods.length];
int nameIndex = 0;
for (int i = 0; i < methods.length; i++) {
if (methods[i].getReturnType().equals(DirectedGraph.class)) {
methodNames[nameIndex] = methods[i].getName();
nameIndex++;
}
}
return methodNames;
}
public FigureCanvas getFigureCanvas() {
return figureCanvas;
}
public void setFigureCanvas(FigureCanvas figureCanvas) {
this.figureCanvas = figureCanvas;
}
}
|
[
"\"WITVIZDATAPATH\""
] |
[] |
[
"WITVIZDATAPATH"
] |
[]
|
["WITVIZDATAPATH"]
|
java
| 1 | 0 | |
cmd/goa/gen.go
|
package main
import (
"fmt"
"go/parser"
"go/token"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"goa.design/goa/v3/codegen"
"golang.org/x/tools/go/packages"
)
// Generator is the code generation management data structure.
type Generator struct {
// Command is the name of the command to run.
Command string
// DesignPath is the Go import path to the design package.
DesignPath string
// Output is the absolute path to the output directory.
Output string
// DesignVersion is the major component of the Goa version used by the design DSL.
// DesignVersion is either 2 or 3.
DesignVersion int
// bin is the filename of the generated generator.
bin string
// tmpDir is the temporary directory used to compile the generator.
tmpDir string
}
// NewGenerator creates a Generator.
func NewGenerator(cmd string, path, output string) *Generator {
bin := "goa"
if runtime.GOOS == "windows" {
bin += ".exe"
}
var version int
{
version = 2
matched := false
pkgs, _ := packages.Load(&packages.Config{Mode: packages.NeedFiles}, path)
fset := token.NewFileSet()
p := regexp.MustCompile(`goa.design/goa/v(\d+)/dsl`)
for _, pkg := range pkgs {
for _, gof := range pkg.GoFiles {
if bs, err := ioutil.ReadFile(gof); err == nil {
if f, err := parser.ParseFile(fset, "", string(bs), parser.ImportsOnly); err == nil {
for _, s := range f.Imports {
matches := p.FindStringSubmatch(s.Path.Value)
if len(matches) == 2 {
matched = true
version, _ = strconv.Atoi(matches[1]) // We know it's an integer
}
}
}
}
if matched {
break
}
}
if matched {
break
}
}
}
return &Generator{
Command: cmd,
DesignPath: path,
Output: output,
DesignVersion: version,
bin: bin,
}
}
// Write writes the main file.
func (g *Generator) Write(debug bool) error {
var tmpDir string
{
wd := "."
if cwd, err := os.Getwd(); err != nil {
wd = cwd
}
tmp, err := ioutil.TempDir(wd, "goa")
if err != nil {
return err
}
tmpDir = tmp
}
g.tmpDir = tmpDir
var sections []*codegen.SectionTemplate
{
data := map[string]interface{}{
"Command": g.Command,
"CleanupDirs": cleanupDirs(g.Command, g.Output),
"DesignVersion": g.DesignVersion,
}
ver := ""
if g.DesignVersion > 2 {
ver = "v" + strconv.Itoa(g.DesignVersion) + "/"
}
imports := []*codegen.ImportSpec{
codegen.SimpleImport("flag"),
codegen.SimpleImport("fmt"),
codegen.SimpleImport("os"),
codegen.SimpleImport("path/filepath"),
codegen.SimpleImport("sort"),
codegen.SimpleImport("strconv"),
codegen.SimpleImport("strings"),
codegen.SimpleImport("goa.design/goa/" + ver + "codegen"),
codegen.SimpleImport("goa.design/goa/" + ver + "codegen/generator"),
codegen.SimpleImport("goa.design/goa/" + ver + "eval"),
codegen.NewImport("goa", "goa.design/goa/"+ver+"pkg"),
codegen.NewImport("_", g.DesignPath),
}
sections = []*codegen.SectionTemplate{
codegen.Header("Code Generator", "main", imports),
{
Name: "main",
Source: mainT,
Data: data,
},
}
}
f := &codegen.File{Path: "main.go", SectionTemplates: sections}
_, err := f.Render(tmpDir)
return err
}
// Compile compiles the generator.
func (g *Generator) Compile() error {
return g.runGoCmd("build", "-o", g.bin)
}
// Run runs the compiled binary and return the output lines.
func (g *Generator) Run() ([]string, error) {
var cmdl string
{
args := make([]string, len(os.Args)-1)
gopaths := filepath.SplitList(os.Getenv("GOPATH"))
for i, a := range os.Args[1:] {
for _, p := range gopaths {
if strings.Contains(a, p) {
args[i] = strings.Replace(a, p, "$(GOPATH)", -1)
break
}
}
if args[i] == "" {
args[i] = a
}
}
cmdl = " " + strings.Join(args, " ")
rawcmd := filepath.Base(os.Args[0])
// Remove .exe suffix to avoid different output on Windows.
rawcmd = strings.TrimSuffix(rawcmd, ".exe")
cmdl = fmt.Sprintf("$ %s%s", rawcmd, cmdl)
}
args := []string{"--version=" + strconv.Itoa(g.DesignVersion), "--output=" + g.Output, "--cmd=" + cmdl}
cmd := exec.Command(filepath.Join(g.tmpDir, g.bin), args...)
out, err := cmd.CombinedOutput()
if err != nil {
return nil, fmt.Errorf("%s\n%s", err, string(out))
}
res := strings.Split(string(out), "\n")
for (len(res) > 0) && (res[len(res)-1] == "") {
res = res[:len(res)-1]
}
return res, nil
}
// Remove deletes the package files.
func (g *Generator) Remove() {
if g.tmpDir != "" {
os.RemoveAll(g.tmpDir)
g.tmpDir = ""
}
}
func (g *Generator) runGoCmd(args ...string) error {
gobin, err := exec.LookPath("go")
if err != nil {
return fmt.Errorf(`failed to find a go compiler, looked in "%s"`, os.Getenv("PATH"))
}
if g.DesignVersion > 2 {
os.Setenv("GO111MODULE", "on")
}
c := exec.Cmd{
Path: gobin,
Args: append([]string{gobin}, args...),
Dir: g.tmpDir,
}
out, err := c.CombinedOutput()
if err != nil {
if len(out) > 0 {
return fmt.Errorf(string(out))
}
return fmt.Errorf("failed to compile generator: %s", err)
}
return nil
}
// cleanupDirs returns the paths of the subdirectories under gendir to delete
// before generating code.
func cleanupDirs(cmd, output string) []string {
if cmd == "gen" {
gendirPath := filepath.Join(output, codegen.Gendir)
gendir, err := os.Open(gendirPath)
if err != nil {
return nil
}
defer gendir.Close()
finfos, err := gendir.Readdir(-1)
if err != nil {
return []string{gendirPath}
}
dirs := []string{}
for _, fi := range finfos {
if fi.IsDir() {
dirs = append(dirs, filepath.Join(gendirPath, fi.Name()))
}
}
return dirs
}
return nil
}
// mainT is the template for the generator main.
const mainT = `func main() {
var (
out = flag.String("output", "", "")
version = flag.String("version", "", "")
cmdl = flag.String("cmd", "", "")
ver int
)
{
flag.Parse()
if *out == "" {
fail("missing output flag")
}
if *version == "" {
fail("missing version flag")
}
if *cmdl == "" {
fail("missing cmd flag")
}
v, err := strconv.Atoi(*version)
if err != nil {
fail("invalid version %s", *version)
}
ver = v
}
if ver > goa.Major {
fail("cannot run goa %s on design using goa v%s\n", goa.Version(), *version)
}
if err := eval.Context.Errors; err != nil {
fail(err.Error())
}
if err := eval.RunDSL(); err != nil {
fail(err.Error())
}
{{- range .CleanupDirs }}
if err := os.RemoveAll({{ printf "%q" . }}); err != nil {
fail(err.Error())
}
{{- end }}
{{- if gt .DesignVersion 2 }}
codegen.DesignVersion = ver
{{- end }}
outputs, err := generator.Generate(*out, {{ printf "%q" .Command }})
if err != nil {
fail(err.Error())
}
fmt.Println(strings.Join(outputs, "\n"))
}
func fail(msg string, vals ...interface{}) {
fmt.Fprintf(os.Stderr, msg, vals...)
os.Exit(1)
}
`
|
[
"\"GOPATH\"",
"\"PATH\""
] |
[] |
[
"GOPATH",
"PATH"
] |
[]
|
["GOPATH", "PATH"]
|
go
| 2 | 0 | |
src/drugrelink/download.py
|
# -*- coding: utf-8 -*-
"""Helper functions for getting resources."""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional
from urllib.request import urlretrieve
logger = logging.getLogger(__name__)
HERE = os.path.abspath(os.path.dirname(__file__))
DEFAULT_DIRECTORY = os.path.abspath(os.path.join(HERE, os.pardir, os.pardir, 'data'))
DATA_DIRECTORY = os.environ.get('REPOSITIONING_COMPARISON_DIRECTORY', DEFAULT_DIRECTORY)
# URLs from dhimmel/integrate
NODE_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/nodes.tsv'
EDGE_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/edges.sif.gz'
PERMUTATION1_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-1.json.bz2'
PERMUTATION2_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-2.json.bz2'
PERMUTATION3_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-3.json.bz2'
PERMUTATION4_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-4.json.bz2'
PERMUTATION5_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-5.json.bz2'
PERMUTATION_DATA_FILE_FMT = 'hetnet_perm-{}.json.bz2'
PERMUTATION_DATA_URL_FMT = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-{}.json.bz2'
# URLs from dhimmel/learn
TRANSFORMED_FEATURES_URL = 'https://github.com/dhimmel/learn/blob/master/prediction/features/features.tsv.bz2?raw=true'
VALIDATE_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/learn/master/validate/validation-statuses.tsv'
SYMPTOMATIC_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/learn/master/prediction/predictions/probabilities.tsv'
REPURPOSE_DATA_URL = 'https://raw.githubusercontent.com/drugrelink/drugrelink/master/notebooks/repurpose_overlap.json'
REPO_DATA_URL = 'https://raw.githubusercontent.com/drugrelink/drugrelink/master/notebooks/repo_data.csv'
@dataclass
class DataPaths:
"""Container for the paths for training."""
node_data_path: str
edge_data_path: str
transformed_features_path: str
validate_data_path: str
symptomatic_data_path: str
permutation_paths: List[str]
data_edge2vec_path: str
repurpose_data_path: str
repo_data_path: str
def get_data_paths(directory: Optional[str] = None) -> DataPaths:
"""Ensure Himmelstein's data files are downloaded."""
if directory is None:
directory = DATA_DIRECTORY
os.makedirs(directory, exist_ok=True)
node_data_path = os.path.join(directory, 'nodes.tsv')
if not os.path.exists(node_data_path):
logger.info(f'downloading {NODE_DATA_URL}')
urlretrieve(NODE_DATA_URL, node_data_path)
edge_data_path = os.path.join(directory, 'edges.sif.gz')
if not os.path.exists(edge_data_path):
logger.info(f'downloading {EDGE_DATA_URL}')
urlretrieve(EDGE_DATA_URL, edge_data_path)
transformed_features_path = os.path.join(directory, 'transformed-features.tsv.bz2')
if not os.path.exists(transformed_features_path):
logger.info(f'downloading {TRANSFORMED_FEATURES_URL}')
urlretrieve(TRANSFORMED_FEATURES_URL, transformed_features_path)
validate_data_path = os.path.join(directory, 'validation-statuses.tsv')
if not os.path.exists(validate_data_path):
logger.info(f'downloading {VALIDATE_DATA_URL}')
urlretrieve(VALIDATE_DATA_URL, validate_data_path)
symptomatic_data_path = os.path.join(directory, 'probabilities.tsv')
if not os.path.exists(symptomatic_data_path):
logger.info(f'downloading {SYMPTOMATIC_DATA_URL}')
urlretrieve(SYMPTOMATIC_DATA_URL, symptomatic_data_path)
repurpose_data_path = os.path.join(directory,'repurpose_overlap.json')
if not os.path.exists(repurpose_data_path):
logger.info(f'downloading {REPURPOSE_DATA_URL}')
urlretrieve(REPURPOSE_DATA_URL, repurpose_data_path)
repo_data_path = os.path.join(directory, 'repo_data.csv')
if not os.path.exists(repo_data_path):
logger.info(f'downloading {REPO_DATA_URL}')
urlretrieve(REPO_DATA_URL, repo_data_path)
permutation_directory = os.path.join(directory, "permutations")
os.makedirs(permutation_directory, exist_ok=True)
permutation_paths = []
for i in range(5):
permutation_data_path = os.path.join(permutation_directory, PERMUTATION_DATA_FILE_FMT.format(i + 1))
if not os.path.exists(permutation_data_path):
url = PERMUTATION_DATA_URL_FMT.format(i + 1)
logger.info(f'downloading {url}')
urlretrieve(url, permutation_data_path)
permutation_paths.append(permutation_data_path)
data_edge2vec_path = os.path.join(directory, 'data_edge2vec')
return DataPaths(
node_data_path=node_data_path,
edge_data_path=edge_data_path,
transformed_features_path=transformed_features_path,
validate_data_path=validate_data_path,
symptomatic_data_path=symptomatic_data_path,
permutation_paths=permutation_paths,
data_edge2vec_path=data_edge2vec_path,
repurpose_data_path = repurpose_data_path,
repo_data_path = repo_data_path
)
|
[] |
[] |
[
"REPOSITIONING_COMPARISON_DIRECTORY"
] |
[]
|
["REPOSITIONING_COMPARISON_DIRECTORY"]
|
python
| 1 | 0 | |
server-plugin/server-plugin-executor-serverdocker/src/main/java/io/onedev/server/plugin/executor/serverdocker/ServerDockerExecutor.java
|
package io.onedev.server.plugin.executor.serverdocker;
import static io.onedev.k8shelper.KubernetesHelper.*;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.io.File;
import java.io.IOException;
import java.io.Serializable;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Date;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
import javax.inject.Provider;
import javax.validation.ConstraintValidatorContext;
import org.apache.commons.lang3.SystemUtils;
import org.hibernate.validator.constraints.NotEmpty;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.base.Throwables;
import io.onedev.agent.DockerUtils;
import io.onedev.commons.loader.AppLoader;
import io.onedev.commons.bootstrap.Bootstrap;
import io.onedev.commons.utils.ExplicitException;
import io.onedev.commons.utils.FileUtils;
import io.onedev.commons.utils.PathUtils;
import io.onedev.commons.utils.StringUtils;
import io.onedev.commons.utils.TaskLogger;
import io.onedev.commons.utils.command.Commandline;
import io.onedev.commons.utils.command.ExecutionResult;
import io.onedev.commons.utils.command.LineConsumer;
import io.onedev.commons.utils.command.ProcessKiller;
import io.onedev.k8shelper.CacheInstance;
import io.onedev.k8shelper.CheckoutExecutable;
import io.onedev.k8shelper.CloneInfo;
import io.onedev.k8shelper.CommandExecutable;
import io.onedev.k8shelper.CompositeExecutable;
import io.onedev.k8shelper.LeafExecutable;
import io.onedev.k8shelper.LeafHandler;
import io.onedev.k8shelper.ServerExecutable;
import io.onedev.k8shelper.SshCloneInfo;
import io.onedev.server.OneDev;
import io.onedev.server.buildspec.Service;
import io.onedev.server.buildspec.job.JobContext;
import io.onedev.server.buildspec.job.JobManager;
import io.onedev.server.git.config.GitConfig;
import io.onedev.server.job.resource.ResourceManager;
import io.onedev.server.model.support.RegistryLogin;
import io.onedev.server.model.support.administration.jobexecutor.JobExecutor;
import io.onedev.server.plugin.executor.serverdocker.ServerDockerExecutor.TestData;
import io.onedev.server.util.ExceptionUtils;
import io.onedev.server.util.PKCS12CertExtractor;
import io.onedev.server.util.ServerConfig;
import io.onedev.server.util.patternset.PatternSet;
import io.onedev.server.util.validation.Validatable;
import io.onedev.server.util.validation.annotation.ClassValidating;
import io.onedev.server.web.editable.annotation.Editable;
import io.onedev.server.web.editable.annotation.Horizontal;
import io.onedev.server.web.editable.annotation.NameOfEmptyValue;
import io.onedev.server.web.editable.annotation.OmitName;
import io.onedev.server.web.util.Testable;
@Editable(order=200, name="Server Docker Executor", description="This executor runs build jobs as docker containers on OneDev server")
@ClassValidating
@Horizontal
public class ServerDockerExecutor extends JobExecutor implements Testable<TestData>, Validatable {
private static final long serialVersionUID = 1L;
private static final Logger logger = LoggerFactory.getLogger(ServerDockerExecutor.class);
private static final Object cacheHomeCreationLock = new Object();
private List<RegistryLogin> registryLogins = new ArrayList<>();
private String runOptions;
private String dockerExecutable;
private transient volatile String outerInstallPath;
@Editable(order=400, description="Specify login information for docker registries if necessary")
public List<RegistryLogin> getRegistryLogins() {
return registryLogins;
}
public void setRegistryLogins(List<RegistryLogin> registryLogins) {
this.registryLogins = registryLogins;
}
@Editable(order=50050, group="More Settings", description="Optionally specify options to run container. For instance, you may use <tt>-m 2g</tt> "
+ "to limit memory of created container to be 2 giga bytes")
public String getRunOptions() {
return runOptions;
}
public void setRunOptions(String runOptions) {
this.runOptions = runOptions;
}
@Editable(order=50100, group="More Settings", description="Optionally specify docker executable, for instance <i>/usr/local/bin/docker</i>. "
+ "Leave empty to use docker executable in PATH")
@NameOfEmptyValue("Use default")
public String getDockerExecutable() {
return dockerExecutable;
}
public void setDockerExecutable(String dockerExecutable) {
this.dockerExecutable = dockerExecutable;
}
private Commandline newDocker() {
if (getDockerExecutable() != null)
return new Commandline(getDockerExecutable());
else
return new Commandline("docker");
}
private File getCacheHome() {
File file = new File(Bootstrap.getSiteDir(), "cache");
if (!file.exists()) synchronized (cacheHomeCreationLock) {
FileUtils.createDir(file);
}
return file;
}
private LineConsumer newInfoLogger(TaskLogger jobLogger) {
return new LineConsumer(StandardCharsets.UTF_8.name()) {
private final String sessionId = UUID.randomUUID().toString();
@Override
public void consume(String line) {
jobLogger.log(line, sessionId);
}
};
}
private LineConsumer newErrorLogger(TaskLogger jobLogger) {
return new LineConsumer(StandardCharsets.UTF_8.name()) {
@Override
public void consume(String line) {
jobLogger.warning(line);
}
};
}
protected List<String> getTrustCertContent() {
List<String> trustCertContent = new ArrayList<>();
ServerConfig serverConfig = OneDev.getInstance(ServerConfig.class);
File keystoreFile = serverConfig.getKeystoreFile();
if (keystoreFile != null) {
String password = serverConfig.getKeystorePassword();
for (Map.Entry<String, String> entry: new PKCS12CertExtractor(keystoreFile, password).extact().entrySet())
trustCertContent.addAll(Splitter.on('\n').trimResults().splitToList(entry.getValue()));
}
if (serverConfig.getTrustCertsDir() != null) {
for (File file: serverConfig.getTrustCertsDir().listFiles()) {
if (file.isFile()) {
try {
trustCertContent.addAll(FileUtils.readLines(file, UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
}
return trustCertContent;
}
@Override
public void execute(String jobToken, JobContext jobContext) {
File hostBuildHome = FileUtils.createTempDir("onedev-build");
try {
TaskLogger jobLogger = jobContext.getLogger();
OneDev.getInstance(ResourceManager.class).run(new Runnable() {
@Override
public void run() {
String network = getName() + "-" + jobContext.getProjectName() + "-"
+ jobContext.getBuildNumber() + "-" + jobContext.getRetried();
jobLogger.log(String.format("Executing job (executor: %s, network: %s)...", getName(), network));
jobContext.notifyJobRunning(null);
JobManager jobManager = OneDev.getInstance(JobManager.class);
File hostCacheHome = getCacheHome();
jobLogger.log("Allocating job caches...") ;
Map<CacheInstance, Date> cacheInstances = getCacheInstances(hostCacheHome);
Map<CacheInstance, String> cacheAllocations = jobManager.allocateJobCaches(jobToken, new Date(), cacheInstances);
preprocess(hostCacheHome, cacheAllocations, new Consumer<File>() {
@Override
public void accept(File directory) {
DockerUtils.cleanDirAsRoot(directory, newDocker(), Bootstrap.isInDocker());
}
});
login(jobLogger);
DockerUtils.createNetwork(newDocker(), network, jobLogger);
try {
for (Service jobService: jobContext.getServices()) {
jobLogger.log("Starting service (name: " + jobService.getName() + ", image: " + jobService.getImage() + ")...");
DockerUtils.startService(newDocker(), network, jobService.toMap(), jobLogger);
}
AtomicReference<File> workspaceCache = new AtomicReference<>(null);
for (Map.Entry<CacheInstance, String> entry: cacheAllocations.entrySet()) {
if (PathUtils.isCurrent(entry.getValue())) {
workspaceCache.set(entry.getKey().getDirectory(hostCacheHome));
break;
}
}
File hostWorkspace;
if (workspaceCache.get() != null) {
hostWorkspace = workspaceCache.get();
} else {
hostWorkspace = new File(hostBuildHome, "workspace");
FileUtils.createDir(hostWorkspace);
}
AtomicReference<File> hostAuthInfoHome = new AtomicReference<>(null);
try {
jobLogger.log("Copying job dependencies...");
jobContext.copyDependencies(hostWorkspace);
String containerBuildHome;
String containerWorkspace;
String containerEntryPoint;
if (SystemUtils.IS_OS_WINDOWS) {
containerBuildHome = "C:\\onedev-build";
containerWorkspace = "C:\\onedev-build\\workspace";
containerEntryPoint = "cmd";
} else {
containerBuildHome = "/onedev-build";
containerWorkspace = "/onedev-build/workspace";
containerEntryPoint = "sh";
}
jobContext.reportJobWorkspace(containerWorkspace);
CompositeExecutable entryExecutable = new CompositeExecutable(jobContext.getActions());
List<String> errorMessages = new ArrayList<>();
entryExecutable.execute(new LeafHandler() {
@Override
public boolean execute(LeafExecutable executable, List<Integer> position) {
String stepNames = entryExecutable.getNamesAsString(position);
jobLogger.log("Running step \"" + stepNames + "\"...");
if (executable instanceof CommandExecutable) {
CommandExecutable commandExecutable = (CommandExecutable) executable;
String[] containerCommand;
if (SystemUtils.IS_OS_WINDOWS) {
if (hostAuthInfoHome.get() != null)
containerCommand = new String[] {"/c", "xcopy /Y /S /K /Q /H /R C:\\Users\\%USERNAME%\\auth-info\\* C:\\Users\\%USERNAME% > nul && C:\\onedev-build\\job-commands.bat"};
else
containerCommand = new String[] {"/c", "C:\\onedev-build\\job-commands.bat"};
File scriptFile = new File(hostBuildHome, "job-commands.bat");
try {
FileUtils.writeLines(
scriptFile,
new ArrayList<>(replacePlaceholders(commandExecutable.getCommands(), hostBuildHome)),
"\r\n");
} catch (IOException e) {
throw new RuntimeException(e);
}
} else {
if (hostAuthInfoHome.get() != null)
containerCommand = new String[] {"-c", "cp -r -f -p /root/auth-info/. /root && sh /onedev-build/job-commands.sh"};
else
containerCommand = new String[] {"/onedev-build/job-commands.sh"};
File scriptFile = new File(hostBuildHome, "job-commands.sh");
try {
FileUtils.writeLines(
scriptFile,
new ArrayList<>(replacePlaceholders(commandExecutable.getCommands(), hostBuildHome)),
"\n");
} catch (IOException e) {
throw new RuntimeException(e);
}
}
String containerName = network + "-step-" + stringifyPosition(position);
Commandline docker = newDocker();
docker.clearArgs();
docker.addArgs("run", "--name=" + containerName, "--network=" + network);
if (getRunOptions() != null)
docker.addArgs(StringUtils.parseQuoteTokens(getRunOptions()));
docker.addArgs("-v", getOuterPath(hostBuildHome.getAbsolutePath()) + ":" + containerBuildHome);
if (workspaceCache.get() != null)
docker.addArgs("-v", getOuterPath(workspaceCache.get().getAbsolutePath()) + ":" + containerWorkspace);
for (Map.Entry<CacheInstance, String> entry: cacheAllocations.entrySet()) {
if (!PathUtils.isCurrent(entry.getValue())) {
String hostCachePath = entry.getKey().getDirectory(hostCacheHome).getAbsolutePath();
String containerCachePath = PathUtils.resolve(containerWorkspace, entry.getValue());
docker.addArgs("-v", getOuterPath(hostCachePath) + ":" + containerCachePath);
}
}
if (SystemUtils.IS_OS_LINUX)
docker.addArgs("-v", "/var/run/docker.sock:/var/run/docker.sock");
if (hostAuthInfoHome.get() != null) {
String outerPath = getOuterPath(hostAuthInfoHome.get().getAbsolutePath());
if (SystemUtils.IS_OS_WINDOWS) {
docker.addArgs("-v", outerPath + ":C:\\Users\\ContainerAdministrator\\auth-info");
docker.addArgs("-v", outerPath + ":C:\\Users\\ContainerUser\\auth-info");
} else {
docker.addArgs("-v", outerPath + ":/root/auth-info");
}
}
if (commandExecutable.isUseTTY())
docker.addArgs("-t");
docker.addArgs("-w", containerWorkspace, "--entrypoint=" + containerEntryPoint);
docker.addArgs(commandExecutable.getImage());
docker.addArgs(containerCommand);
ProcessKiller killer = DockerUtils.newDockerKiller(newDocker(), containerName, jobLogger);
ExecutionResult result = docker.execute(newInfoLogger(jobLogger), newErrorLogger(jobLogger), null, killer);
if (result.getReturnCode() != 0) {
errorMessages.add("Step \"" + stepNames + "\": Command failed with exit code " + result.getReturnCode());
return false;
} else {
return true;
}
} else if (executable instanceof CheckoutExecutable) {
try {
CheckoutExecutable checkoutExecutable = (CheckoutExecutable) executable;
jobLogger.log("Checking out code...");
if (hostAuthInfoHome.get() == null)
hostAuthInfoHome.set(FileUtils.createTempDir());
Commandline git = new Commandline(AppLoader.getInstance(GitConfig.class).getExecutable());
git.workingDir(hostWorkspace).environments().put("HOME", hostAuthInfoHome.get().getAbsolutePath());
CloneInfo cloneInfo = checkoutExecutable.getCloneInfo();
cloneInfo.writeAuthData(hostAuthInfoHome.get(), git, newInfoLogger(jobLogger), newErrorLogger(jobLogger));
List<String> trustCertContent = getTrustCertContent();
if (!trustCertContent.isEmpty()) {
installGitCert(new File(hostAuthInfoHome.get(), "trust-cert.pem"), trustCertContent,
git, newInfoLogger(jobLogger), newErrorLogger(jobLogger));
}
int cloneDepth = checkoutExecutable.getCloneDepth();
cloneRepository(git, jobContext.getProjectGitDir().getAbsolutePath(),
jobContext.getCommitId().name(), cloneDepth,
newInfoLogger(jobLogger), newErrorLogger(jobLogger));
addOriginRemote(git, cloneInfo.getCloneUrl(), newInfoLogger(jobLogger), newErrorLogger(jobLogger));
if (SystemUtils.IS_OS_WINDOWS || !(cloneInfo instanceof SshCloneInfo)) {
updateSubmodulesIfNecessary(git, cloneDepth, newInfoLogger(jobLogger), newErrorLogger(jobLogger));
} else if (new File(hostWorkspace, ".gitmodules").exists()) {
/*
* We need to update submodules within a helper image in order to use our own .ssh folder.
* Specifying HOME env to change ~/.ssh folder does not have effect on Linux
*/
Provider<Commandline> dockerProvider = new Provider<Commandline>() {
@Override
public Commandline get() {
return newDocker();
}
};
String hostAuthInfoHomeOuterPath = getOuterPath(hostAuthInfoHome.get().getAbsolutePath());
String workspaceOuterPath = getOuterPath(hostWorkspace.getAbsolutePath());
DockerUtils.dockerUpdateSubmodules(dockerProvider, network, hostAuthInfoHomeOuterPath,
workspaceOuterPath, cloneDepth, jobLogger);
}
return true;
} catch (Exception e) {
errorMessages.add("Step \"" + stepNames + "\" is failed: " + getErrorMessage(e));
return false;
}
} else {
ServerExecutable serverExecutable = (ServerExecutable) executable;
File filesDir = FileUtils.createTempDir();
try {
Collection<String> placeholders = serverExecutable.getPlaceholders();
Map<String, String> placeholderValues = readPlaceholderValues(hostBuildHome, placeholders);
PatternSet filePatterns = new PatternSet(
new HashSet<>(replacePlaceholders(serverExecutable.getIncludeFiles(), placeholderValues)),
new HashSet<>(replacePlaceholders(serverExecutable.getExcludeFiles(), placeholderValues)));
int baseLen = hostWorkspace.getAbsolutePath().length()+1;
for (File file: filePatterns.listFiles(hostWorkspace)) {
try {
FileUtils.copyFile(file, new File(filesDir, file.getAbsolutePath().substring(baseLen)));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
Map<String, byte[]> outputFiles = jobContext.runServerStep(position, filesDir, placeholderValues, jobLogger);
if (outputFiles != null) {
for (Map.Entry<String, byte[]> entry: outputFiles.entrySet()) {
FileUtils.writeByteArrayToFile(
new File(hostBuildHome, entry.getKey()),
entry.getValue());
}
}
return true;
} catch (Exception e) {
errorMessages.add("Step \"" + stepNames + "\" is failed: " + getErrorMessage(e));
return false;
} finally {
FileUtils.deleteDir(filesDir);
}
}
}
@Override
public void skip(LeafExecutable executable, List<Integer> position) {
jobLogger.log("Skipping step \"" + entryExecutable.getNamesAsString(position) + "\"...");
}
}, new ArrayList<>());
if (!errorMessages.isEmpty())
throw new ExplicitException(errorMessages.iterator().next());
jobLogger.log("Reporting job caches...");
jobManager.reportJobCaches(jobToken, getCacheInstances(hostCacheHome).keySet());
} finally {
if (hostAuthInfoHome.get() != null)
FileUtils.deleteDir(hostAuthInfoHome.get());
}
} finally {
DockerUtils.deleteNetwork(newDocker(), network, jobLogger);
}
}
}, jobContext.getResourceRequirements(), jobLogger);
} finally {
DockerUtils.cleanDirAsRoot(hostBuildHome, newDocker(), Bootstrap.isInDocker());
FileUtils.deleteDir(hostBuildHome);
}
}
private void login(TaskLogger jobLogger) {
for (RegistryLogin login: getRegistryLogins())
DockerUtils.login(newDocker(), login.getRegistryUrl(), login.getUserName(), login.getPassword(), jobLogger);
}
private boolean hasOptions(String[] arguments, String... options) {
for (String argument: arguments) {
for (String option: options) {
if (option.startsWith("--")) {
if (argument.startsWith(option + "=") || argument.equals(option))
return true;
} else if (option.startsWith("-")) {
if (argument.startsWith(option))
return true;
} else {
throw new ExplicitException("Invalid option: " + option);
}
}
}
return false;
}
private String getErrorMessage(Exception exception) {
String errorMessage = ExceptionUtils.getExpectedError(exception);
if (errorMessage == null)
errorMessage = Throwables.getStackTraceAsString(exception);
return errorMessage;
}
@Override
public boolean isValid(ConstraintValidatorContext context) {
boolean isValid = true;
Set<String> registryUrls = new HashSet<>();
for (RegistryLogin login: getRegistryLogins()) {
if (!registryUrls.add(login.getRegistryUrl())) {
isValid = false;
String message;
if (login.getRegistryUrl() != null)
message = "Duplicate login entry for registry '" + login.getRegistryUrl() + "'";
else
message = "Duplicate login entry for official registry";
context.buildConstraintViolationWithTemplate(message)
.addPropertyNode("registryLogins").addConstraintViolation();
break;
}
}
if (getRunOptions() != null) {
String[] arguments = StringUtils.parseQuoteTokens(getRunOptions());
String reservedOptions[] = new String[] {"-w", "--workdir", "-d", "--detach", "-a", "--attach", "-t", "--tty",
"-i", "--interactive", "--rm", "--restart", "--name"};
if (hasOptions(arguments, reservedOptions)) {
StringBuilder errorMessage = new StringBuilder("Can not use options: "
+ Joiner.on(", ").join(reservedOptions));
context.buildConstraintViolationWithTemplate(errorMessage.toString())
.addPropertyNode("runOptions").addConstraintViolation();
isValid = false;
}
}
if (!isValid)
context.disableDefaultConstraintViolation();
return isValid;
}
private String getOuterPath(String hostPath) {
String hostInstallPath = Bootstrap.installDir.getAbsolutePath();
Preconditions.checkState(hostPath.startsWith(hostInstallPath + "/")
|| hostPath.startsWith(hostInstallPath + "\\"));
if (outerInstallPath == null) {
if (Bootstrap.isInDocker()) {
AtomicReference<String> installDirRef = new AtomicReference<>(null);
Commandline docker = newDocker();
String inspectFormat = String.format(
"{{range .Mounts}} {{if eq .Destination \"%s\"}} {{.Source}} {{end}} {{end}}",
hostInstallPath);
docker.addArgs("inspect", "-f", inspectFormat, System.getenv("HOSTNAME"));
docker.execute(new LineConsumer() {
@Override
public void consume(String line) {
installDirRef.set(line.trim());
}
}, new LineConsumer() {
@Override
public void consume(String line) {
logger.error(line);
}
}).checkReturnCode();
outerInstallPath = Preconditions.checkNotNull(installDirRef.get());
} else {
outerInstallPath = hostInstallPath;
}
}
return outerInstallPath + hostPath.substring(hostInstallPath.length());
}
@Override
public void test(TestData testData, TaskLogger jobLogger) {
login(jobLogger);
jobLogger.log("Running container...");
File workspaceDir = null;
File cacheDir = null;
Commandline docker = newDocker();
try {
workspaceDir = FileUtils.createTempDir("workspace");
cacheDir = new File(getCacheHome(), UUID.randomUUID().toString());
FileUtils.createDir(cacheDir);
jobLogger.log("Test running specified docker image...");
docker.clearArgs();
docker.addArgs("run", "--rm");
if (getRunOptions() != null)
docker.addArgs(StringUtils.parseQuoteTokens(getRunOptions()));
String containerWorkspacePath;
String containerCachePath;
if (SystemUtils.IS_OS_WINDOWS) {
containerWorkspacePath = "C:\\onedev-build\\workspace";
containerCachePath = "C:\\onedev-build\\cache";
} else {
containerWorkspacePath = "/onedev-build/workspace";
containerCachePath = "/onedev-build/cache";
}
docker.addArgs("-v", getOuterPath(workspaceDir.getAbsolutePath()) + ":" + containerWorkspacePath);
docker.addArgs("-v", getOuterPath(cacheDir.getAbsolutePath()) + ":" + containerCachePath);
docker.addArgs("-w", containerWorkspacePath);
docker.addArgs(testData.getDockerImage());
if (SystemUtils.IS_OS_WINDOWS)
docker.addArgs("cmd", "/c", "echo hello from container");
else
docker.addArgs("sh", "-c", "echo hello from container");
docker.execute(new LineConsumer() {
@Override
public void consume(String line) {
jobLogger.log(line);
}
}, new LineConsumer() {
@Override
public void consume(String line) {
jobLogger.log(line);
}
}).checkReturnCode();
} finally {
if (workspaceDir != null)
FileUtils.deleteDir(workspaceDir);
if (cacheDir != null)
FileUtils.deleteDir(cacheDir);
}
if (!SystemUtils.IS_OS_WINDOWS) {
jobLogger.log("Test running busybox...");
docker = newDocker();
docker.addArgs("run", "--rm", "busybox", "sh", "-c", "echo hello from busybox");
docker.execute(new LineConsumer() {
@Override
public void consume(String line) {
jobLogger.log(line);
}
}, new LineConsumer() {
@Override
public void consume(String line) {
jobLogger.log(line);
}
}).checkReturnCode();
}
}
@Editable(name="Specify a Docker Image to Test Against")
public static class TestData implements Serializable {
private static final long serialVersionUID = 1L;
private String dockerImage;
@Editable
@OmitName
@NotEmpty
public String getDockerImage() {
return dockerImage;
}
public void setDockerImage(String dockerImage) {
this.dockerImage = dockerImage;
}
}
}
|
[
"\"HOSTNAME\""
] |
[] |
[
"HOSTNAME"
] |
[]
|
["HOSTNAME"]
|
java
| 1 | 0 | |
beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hive.beeline;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.OptionGroup;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.io.output.NullOutputStream;
import org.apache.commons.lang.ArrayUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.metastore.HiveMetaException;
import org.apache.hadoop.hive.metastore.MetaStoreSchemaInfo;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hive.beeline.HiveSchemaHelper.NestedScriptParser;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.ImmutableMap;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintStream;
import java.net.URI;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class HiveSchemaTool {
private String userName = null;
private String passWord = null;
private boolean dryRun = false;
private boolean verbose = false;
private String dbOpts = null;
private URI[] validationServers = null; // The list of servers the database/partition/table can locate on
private final HiveConf hiveConf;
private final String dbType;
private final MetaStoreSchemaInfo metaStoreSchemaInfo;
static final private Logger LOG = LoggerFactory.getLogger(HiveSchemaTool.class.getName());
public HiveSchemaTool(String dbType) throws HiveMetaException {
this(System.getenv("HIVE_HOME"), new HiveConf(HiveSchemaTool.class), dbType);
}
public HiveSchemaTool(String hiveHome, HiveConf hiveConf, String dbType)
throws HiveMetaException {
if (hiveHome == null || hiveHome.isEmpty()) {
throw new HiveMetaException("No Hive home directory provided");
}
this.hiveConf = hiveConf;
this.dbType = dbType;
this.metaStoreSchemaInfo = new MetaStoreSchemaInfo(hiveHome, dbType);
userName = hiveConf.get(ConfVars.METASTORE_CONNECTION_USER_NAME.varname);
try {
passWord = ShimLoader.getHadoopShims().getPassword(hiveConf,
HiveConf.ConfVars.METASTOREPWD.varname);
} catch (IOException err) {
throw new HiveMetaException("Error getting metastore password", err);
}
}
public HiveConf getHiveConf() {
return hiveConf;
}
public void setUserName(String userName) {
this.userName = userName;
}
public void setPassWord(String passWord) {
this.passWord = passWord;
}
public void setDryRun(boolean dryRun) {
this.dryRun = dryRun;
}
public void setVerbose(boolean verbose) {
this.verbose = verbose;
}
public void setDbOpts(String dbOpts) {
this.dbOpts = dbOpts;
}
public void setValidationServers(String servers) {
if(StringUtils.isNotEmpty(servers)) {
String[] strServers = servers.split(",");
this.validationServers = new URI[strServers.length];
for (int i = 0; i < validationServers.length; i++) {
validationServers[i] = new Path(strServers[i]).toUri();
}
}
}
private static void printAndExit(Options cmdLineOptions) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("schemaTool", cmdLineOptions);
System.exit(1);
}
Connection getConnectionToMetastore(boolean printInfo)
throws HiveMetaException {
return HiveSchemaHelper.getConnectionToMetastore(userName,
passWord, printInfo, hiveConf);
}
private NestedScriptParser getDbCommandParser(String dbType) {
return HiveSchemaHelper.getDbCommandParser(dbType, dbOpts, userName,
passWord, hiveConf);
}
/***
* Print Hive version and schema version
* @throws MetaException
*/
public void showInfo() throws HiveMetaException {
Connection metastoreConn = getConnectionToMetastore(true);
String hiveVersion = MetaStoreSchemaInfo.getHiveSchemaVersion();
String dbVersion = getMetaStoreSchemaVersion(metastoreConn);
System.out.println("Hive distribution version:\t " + hiveVersion);
System.out.println("Metastore schema version:\t " + dbVersion);
assertCompatibleVersion(hiveVersion, dbVersion);
}
private String getMetaStoreSchemaVersion(Connection metastoreConn)
throws HiveMetaException {
return getMetaStoreSchemaVersion(metastoreConn, false);
}
// read schema version from metastore
private String getMetaStoreSchemaVersion(Connection metastoreConn,
boolean checkDuplicatedVersion) throws HiveMetaException {
String versionQuery;
if (getDbCommandParser(dbType).needsQuotedIdentifier()) {
versionQuery = "select t.\"SCHEMA_VERSION\" from \"VERSION\" t";
} else {
versionQuery = "select t.SCHEMA_VERSION from VERSION t";
}
try(Statement stmt = metastoreConn.createStatement();
ResultSet res = stmt.executeQuery(versionQuery)) {
if (!res.next()) {
throw new HiveMetaException("Could not find version info in metastore VERSION table");
}
String currentSchemaVersion = res.getString(1);
if (checkDuplicatedVersion && res.next()) {
throw new HiveMetaException("Multiple versions were found in metastore.");
}
return currentSchemaVersion;
} catch (SQLException e) {
throw new HiveMetaException("Failed to get schema version, Cause:" + e.getMessage());
}
}
boolean validateLocations(Connection conn, URI[] defaultServers) throws HiveMetaException {
System.out.println("Validating database/table/partition locations");
boolean rtn;
rtn = checkMetaStoreDBLocation(conn, defaultServers);
rtn = checkMetaStoreTableLocation(conn, defaultServers) && rtn;
rtn = checkMetaStorePartitionLocation(conn, defaultServers) && rtn;
rtn = checkMetaStoreSkewedColumnsLocation(conn, defaultServers) && rtn;
System.out.println((rtn ? "Succeeded" : "Failed") + " in database/table/partition location validation");
return rtn;
}
private String getNameOrID(ResultSet res, int nameInx, int idInx) throws SQLException {
String itemName = res.getString(nameInx);
return (itemName == null || itemName.isEmpty()) ? "ID: " + res.getString(idInx) : "Name: " + itemName;
}
private boolean checkMetaStoreDBLocation(Connection conn, URI[] defaultServers)
throws HiveMetaException {
String dbLoc;
boolean isValid = true;
int numOfInvalid = 0;
if (getDbCommandParser(dbType).needsQuotedIdentifier()) {
dbLoc = "select dbt.\"DB_ID\", dbt.\"NAME\", dbt.\"DB_LOCATION_URI\" from \"DBS\" dbt";
} else {
dbLoc = "select dbt.DB_ID, dbt.NAME, dbt.DB_LOCATION_URI from DBS dbt";
}
try(Statement stmt = conn.createStatement();
ResultSet res = stmt.executeQuery(dbLoc)) {
while (res.next()) {
String locValue = res.getString(3);
String dbName = getNameOrID(res,2,1);
if (!checkLocation("Database " + dbName, locValue, defaultServers)) {
numOfInvalid++;
}
}
} catch (SQLException e) {
throw new HiveMetaException("Failed to get DB Location Info.", e);
}
if (numOfInvalid > 0) {
isValid = false;
System.err.println("Total number of invalid DB locations is: "+ numOfInvalid);
}
return isValid;
}
private boolean checkMetaStoreTableLocation(Connection conn, URI[] defaultServers)
throws HiveMetaException {
String tabLoc, tabIDRange;
boolean isValid = true;
int numOfInvalid = 0;
if (getDbCommandParser(dbType).needsQuotedIdentifier()) {
tabIDRange = "select max(\"TBL_ID\"), min(\"TBL_ID\") from \"TBLS\" ";
} else {
tabIDRange = "select max(TBL_ID), min(TBL_ID) from TBLS";
}
if (getDbCommandParser(dbType).needsQuotedIdentifier()) {
tabLoc = "select tbl.\"TBL_ID\", tbl.\"TBL_NAME\", sd.\"LOCATION\", dbt.\"DB_ID\", dbt.\"NAME\" from \"TBLS\" tbl inner join " +
"\"SDS\" sd on tbl.\"SD_ID\" = sd.\"SD_ID\" and tbl.\"TBL_TYPE\" != '" + TableType.VIRTUAL_VIEW +
"' and tbl.\"TBL_ID\" >= ? and tbl.\"TBL_ID\"<= ? " + "inner join \"DBS\" dbt on tbl.\"DB_ID\" = dbt.\"DB_ID\" ";
} else {
tabLoc = "select tbl.TBL_ID, tbl.TBL_NAME, sd.LOCATION, dbt.DB_ID, dbt.NAME from TBLS tbl join SDS sd on tbl.SD_ID = sd.SD_ID and tbl.TBL_TYPE !='"
+ TableType.VIRTUAL_VIEW + "' and tbl.TBL_ID >= ? and tbl.TBL_ID <= ? inner join DBS dbt on tbl.DB_ID = dbt.DB_ID";
}
long maxID = 0, minID = 0;
long rtnSize = 2000;
try {
Statement stmt = conn.createStatement();
ResultSet res = stmt.executeQuery(tabIDRange);
if (res.next()) {
maxID = res.getLong(1);
minID = res.getLong(2);
}
res.close();
stmt.close();
PreparedStatement pStmt = conn.prepareStatement(tabLoc);
while (minID <= maxID) {
pStmt.setLong(1, minID);
pStmt.setLong(2, minID + rtnSize);
res = pStmt.executeQuery();
while (res.next()) {
String locValue = res.getString(3);
String entity = "Database " + getNameOrID(res, 5, 4) +
", Table " + getNameOrID(res,2,1);
if (!checkLocation(entity, locValue, defaultServers)) {
numOfInvalid++;
}
}
res.close();
minID += rtnSize + 1;
}
pStmt.close();
} catch (SQLException e) {
throw new HiveMetaException("Failed to get Table Location Info.", e);
}
if (numOfInvalid > 0) {
isValid = false;
System.err.println("Total number of invalid TABLE locations is: "+ numOfInvalid);
}
return isValid;
}
private boolean checkMetaStorePartitionLocation(Connection conn, URI[] defaultServers)
throws HiveMetaException {
String partLoc, partIDRange;
boolean isValid = true;
int numOfInvalid = 0;
if (getDbCommandParser(dbType).needsQuotedIdentifier()) {
partIDRange = "select max(\"PART_ID\"), min(\"PART_ID\") from \"PARTITIONS\" ";
} else {
partIDRange = "select max(PART_ID), min(PART_ID) from PARTITIONS";
}
if (getDbCommandParser(dbType).needsQuotedIdentifier()) {
partLoc = "select pt.\"PART_ID\", pt.\"PART_NAME\", sd.\"LOCATION\", tbl.\"TBL_ID\", tbl.\"TBL_NAME\",dbt.\"DB_ID\", dbt.\"NAME\" from \"PARTITIONS\" pt "
+ "inner join \"SDS\" sd on pt.\"SD_ID\" = sd.\"SD_ID\" and pt.\"PART_ID\" >= ? and pt.\"PART_ID\"<= ? "
+ " inner join \"TBLS\" tbl on pt.\"TBL_ID\" = tbl.\"TBL_ID\" inner join "
+ "\"DBS\" dbt on tbl.\"DB_ID\" = dbt.\"DB_ID\" ";
} else {
partLoc = "select pt.PART_ID, pt.PART_NAME, sd.LOCATION, tbl.TBL_ID, tbl.TBL_NAME, dbt.DB_ID, dbt.NAME from PARTITIONS pt "
+ "inner join SDS sd on pt.SD_ID = sd.SD_ID and pt.PART_ID >= ? and pt.PART_ID <= ? "
+ "inner join TBLS tbl on tbl.TBL_ID = pt.TBL_ID inner join DBS dbt on tbl.DB_ID = dbt.DB_ID ";
}
long maxID = 0, minID = 0;
long rtnSize = 2000;
try {
Statement stmt = conn.createStatement();
ResultSet res = stmt.executeQuery(partIDRange);
if (res.next()) {
maxID = res.getLong(1);
minID = res.getLong(2);
}
res.close();
stmt.close();
PreparedStatement pStmt = conn.prepareStatement(partLoc);
while (minID <= maxID) {
pStmt.setLong(1, minID);
pStmt.setLong(2, minID + rtnSize);
res = pStmt.executeQuery();
while (res.next()) {
String locValue = res.getString(3);
String entity = "Database " + getNameOrID(res,7,6) +
", Table " + getNameOrID(res,5,4) +
", Partition " + getNameOrID(res,2,1);
if (!checkLocation(entity, locValue, defaultServers)) {
numOfInvalid++;
}
}
res.close();
minID += rtnSize + 1;
}
pStmt.close();
} catch (SQLException e) {
throw new HiveMetaException("Failed to get Partiton Location Info.", e);
}
if (numOfInvalid > 0) {
isValid = false;
System.err.println("Total number of invalid PARTITION locations is: "+ numOfInvalid);
}
return isValid;
}
private boolean checkMetaStoreSkewedColumnsLocation(Connection conn, URI[] defaultServers)
throws HiveMetaException {
String skewedColLoc, skewedColIDRange;
boolean isValid = true;
int numOfInvalid = 0;
if (getDbCommandParser(dbType).needsQuotedIdentifier()) {
skewedColIDRange = "select max(\"STRING_LIST_ID_KID\"), min(\"STRING_LIST_ID_KID\") from \"SKEWED_COL_VALUE_LOC_MAP\" ";
} else {
skewedColIDRange = "select max(STRING_LIST_ID_KID), min(STRING_LIST_ID_KID) from SKEWED_COL_VALUE_LOC_MAP";
}
if (getDbCommandParser(dbType).needsQuotedIdentifier()) {
skewedColLoc = "select t.\"TBL_NAME\", t.\"TBL_ID\", sk.\"STRING_LIST_ID_KID\", sk.\"LOCATION\" from \"TBLS\" t, \"SDS\" s, \"SKEWED_COL_VALUE_LOC_MAP\" sk "
+ "where sk.\"SD_ID\" = s.\"SD_ID\" and s.\"SD_ID\" = t.\"SD_ID\" and sk.\"STRING_LIST_ID_KID\" >= ? and sk.\"STRING_LIST_ID_KID\" <= ? ";
} else {
skewedColLoc = "select t.TBL_NAME, t.TBL_ID, sk.STRING_LIST_ID_KID, sk.LOCATION from TBLS t, SDS s, SKEWED_COL_VALUE_LOC_MAP sk "
+ "where sk.SD_ID = s.SD_ID and s.SD_ID = t.SD_ID and sk.STRING_LIST_ID_KID >= ? and sk.STRING_LIST_ID_KID <= ? ";
}
long maxID = 0, minID = 0;
long rtnSize = 2000;
try {
Statement stmt = conn.createStatement();
ResultSet res = stmt.executeQuery(skewedColIDRange);
if (res.next()) {
maxID = res.getLong(1);
minID = res.getLong(2);
}
res.close();
stmt.close();
PreparedStatement pStmt = conn.prepareStatement(skewedColLoc);
while (minID <= maxID) {
pStmt.setLong(1, minID);
pStmt.setLong(2, minID + rtnSize);
res = pStmt.executeQuery();
while (res.next()) {
String locValue = res.getString(4);
String entity = "Table " + getNameOrID(res,1,2) +
", String list " + res.getString(3);
if (!checkLocation(entity, locValue, defaultServers)) {
numOfInvalid++;
}
}
res.close();
minID += rtnSize + 1;
}
pStmt.close();
} catch (SQLException e) {
throw new HiveMetaException("Failed to get skewed columns location info.", e);
}
if (numOfInvalid > 0) {
isValid = false;
System.err.println("Total number of invalid SKEWED_COL_VALUE_LOC_MAP locations is: "+ numOfInvalid);
}
return isValid;
}
/**
* Check if the location is valid for the given entity
* @param entity the entity to represent a database, partition or table
* @param entityLocation the location
* @param defaultServers a list of the servers that the location needs to match.
* The location host needs to match one of the given servers.
* If empty, then no check against such list.
* @return true if the location is valid
*/
private boolean checkLocation(
String entity,
String entityLocation,
URI[] defaultServers) {
boolean isValid = true;
if (entityLocation == null) {
System.err.println(entity + ", error: empty location");
isValid = false;
} else {
try {
URI currentUri = new Path(entityLocation).toUri();
String scheme = currentUri.getScheme();
if (StringUtils.isEmpty(scheme)) {
System.err.println(entity + ", location: "+ entityLocation + ", error: missing location scheme");
isValid = false;
} else if (ArrayUtils.isNotEmpty(defaultServers) && currentUri.getAuthority() != null) {
String authority = currentUri.getAuthority();
boolean matchServer = false;
for(URI server : defaultServers) {
if (StringUtils.equalsIgnoreCase(server.getScheme(), scheme) &&
StringUtils.equalsIgnoreCase(server.getAuthority(), authority)) {
matchServer = true;
break;
}
}
if (!matchServer) {
System.err.println(entity + ", location: " + entityLocation + ", error: mismatched server");
isValid = false;
}
}
} catch (Exception pe) {
System.err.println(entity + ", error: invalid location " + pe.getMessage());
isValid =false;
}
}
return isValid;
}
// test the connection metastore using the config property
private void testConnectionToMetastore() throws HiveMetaException {
Connection conn = getConnectionToMetastore(true);
try {
conn.close();
} catch (SQLException e) {
throw new HiveMetaException("Failed to close metastore connection", e);
}
}
/**
* check if the current schema version in metastore matches the Hive version
* @throws MetaException
*/
public void verifySchemaVersion() throws HiveMetaException {
// don't check version if its a dry run
if (dryRun) {
return;
}
String newSchemaVersion = getMetaStoreSchemaVersion(
getConnectionToMetastore(false));
// verify that the new version is added to schema
assertCompatibleVersion(MetaStoreSchemaInfo.getHiveSchemaVersion(), newSchemaVersion);
}
private void assertCompatibleVersion(String hiveSchemaVersion, String dbSchemaVersion)
throws HiveMetaException {
if (!MetaStoreSchemaInfo.isVersionCompatible(hiveSchemaVersion, dbSchemaVersion)) {
throw new HiveMetaException("Metastore schema version is not compatible. Hive Version: "
+ hiveSchemaVersion + ", Database Schema Version: " + dbSchemaVersion);
}
}
/**
* Perform metastore schema upgrade. extract the current schema version from metastore
* @throws MetaException
*/
public void doUpgrade() throws HiveMetaException {
String fromVersion = getMetaStoreSchemaVersion(
getConnectionToMetastore(false));
if (fromVersion == null || fromVersion.isEmpty()) {
throw new HiveMetaException("Schema version not stored in the metastore. " +
"Metastore schema is too old or corrupt. Try specifying the version manually");
}
doUpgrade(fromVersion);
}
/**
* Perform metastore schema upgrade
*
* @param fromSchemaVer
* Existing version of the metastore. If null, then read from the metastore
* @throws MetaException
*/
public void doUpgrade(String fromSchemaVer) throws HiveMetaException {
if (MetaStoreSchemaInfo.getHiveSchemaVersion().equals(fromSchemaVer)) {
System.out.println("No schema upgrade required from version " + fromSchemaVer);
return;
}
// Find the list of scripts to execute for this upgrade
//拿到需要升级的脚本,fromSchemeVer是从元数库中获取的
List<String> upgradeScripts =
metaStoreSchemaInfo.getUpgradeScripts(fromSchemaVer);
testConnectionToMetastore();
System.out.println("Starting upgrade metastore schema from version " +
fromSchemaVer + " to " + MetaStoreSchemaInfo.getHiveSchemaVersion());
String scriptDir = metaStoreSchemaInfo.getMetaStoreScriptDir();
try {
for (String scriptFile : upgradeScripts) {
System.out.println("Upgrade script " + scriptFile);
if (!dryRun) {
runPreUpgrade(scriptDir, scriptFile);
runBeeLine(scriptDir, scriptFile);
System.out.println("Completed " + scriptFile);
}
}
} catch (IOException eIO) {
throw new HiveMetaException(
"Upgrade FAILED! Metastore state would be inconsistent !!", eIO);
}
// Revalidated the new version after upgrade
verifySchemaVersion();
}
/**
* Initialize the metastore schema to current version
*
* @throws MetaException
*/
public void doInit() throws HiveMetaException {
doInit(MetaStoreSchemaInfo.getHiveSchemaVersion());
// Revalidated the new version after upgrade
verifySchemaVersion();
}
/**
* Initialize the metastore schema
*
* @param toVersion
* If null then current hive version is used
* @throws MetaException
*/
public void doInit(String toVersion) throws HiveMetaException {
testConnectionToMetastore();
System.out.println("Starting metastore schema initialization to " + toVersion);
String initScriptDir = metaStoreSchemaInfo.getMetaStoreScriptDir();
String initScriptFile = metaStoreSchemaInfo.generateInitFileName(toVersion);
try {
System.out.println("Initialization script " + initScriptFile);
if (!dryRun) {
runBeeLine(initScriptDir, initScriptFile);
System.out.println("Initialization script completed");
}
} catch (IOException e) {
throw new HiveMetaException("Schema initialization FAILED!" +
" Metastore state would be inconsistent !!", e);
}
}
public void doValidate() throws HiveMetaException {
System.out.println("Starting metastore validation\n");
Connection conn = getConnectionToMetastore(false);
boolean success = true;
try {
if (validateSchemaVersions(conn)) {
System.out.println("[SUCCESS]\n");
} else {
success = false;
System.out.println("[FAIL]\n");
}
if (validateSequences(conn)) {
System.out.println("[SUCCESS]\n");
} else {
success = false;
System.out.println("[FAIL]\n");
}
if (validateSchemaTables(conn)) {
System.out.println("[SUCCESS]\n");
} else {
success = false;
System.out.println("[FAIL]\n");
}
if (validateLocations(conn, this.validationServers)) {
System.out.println("[SUCCESS]\n");
} else {
success = false;
System.out.println("[FAIL]\n");
}
if (validateColumnNullValues(conn)) {
System.out.println("[SUCCESS]\n");
} else {
success = false;
System.out.println("[FAIL]\n");
}
} finally {
if (conn != null) {
try {
conn.close();
} catch (SQLException e) {
throw new HiveMetaException("Failed to close metastore connection", e);
}
}
}
System.out.print("Done with metastore validation: ");
if (!success) {
System.out.println("[FAIL]");
System.exit(1);
} else {
System.out.println("[SUCCESS]");
}
}
boolean validateSequences(Connection conn) throws HiveMetaException {
Map<String, Pair<String, String>> seqNameToTable =
new ImmutableMap.Builder<String, Pair<String, String>>()
.put("MDatabase", Pair.of("DBS", "DB_ID"))
.put("MRole", Pair.of("ROLES", "ROLE_ID"))
.put("MGlobalPrivilege", Pair.of("GLOBAL_PRIVS", "USER_GRANT_ID"))
.put("MTable", Pair.of("TBLS","TBL_ID"))
.put("MStorageDescriptor", Pair.of("SDS", "SD_ID"))
.put("MSerDeInfo", Pair.of("SERDES", "SERDE_ID"))
.put("MColumnDescriptor", Pair.of("CDS", "CD_ID"))
.put("MTablePrivilege", Pair.of("TBL_PRIVS", "TBL_GRANT_ID"))
.put("MTableColumnStatistics", Pair.of("TAB_COL_STATS", "CS_ID"))
.put("MPartition", Pair.of("PARTITIONS", "PART_ID"))
.put("MPartitionColumnStatistics", Pair.of("PART_COL_STATS", "CS_ID"))
.put("MFunction", Pair.of("FUNCS", "FUNC_ID"))
.put("MIndex", Pair.of("IDXS", "INDEX_ID"))
.put("MStringList", Pair.of("SKEWED_STRING_LIST", "STRING_LIST_ID"))
.build();
System.out.println("Validating sequence number for SEQUENCE_TABLE");
boolean isValid = true;
try {
Statement stmt = conn.createStatement();
for (String seqName : seqNameToTable.keySet()) {
String tableName = seqNameToTable.get(seqName).getLeft();
String tableKey = seqNameToTable.get(seqName).getRight();
String seqQuery = getDbCommandParser(dbType).needsQuotedIdentifier() ?
("select t.\"NEXT_VAL\" from \"SEQUENCE_TABLE\" t WHERE t.\"SEQUENCE_NAME\"='org.apache.hadoop.hive.metastore.model." + seqName + "'")
: ("select t.NEXT_VAL from SEQUENCE_TABLE t WHERE t.SEQUENCE_NAME='org.apache.hadoop.hive.metastore.model." + seqName + "'");
String maxIdQuery = getDbCommandParser(dbType).needsQuotedIdentifier() ?
("select max(\"" + tableKey + "\") from \"" + tableName + "\"")
: ("select max(" + tableKey + ") from " + tableName);
ResultSet res = stmt.executeQuery(maxIdQuery);
if (res.next()) {
long maxId = res.getLong(1);
if (maxId > 0) {
ResultSet resSeq = stmt.executeQuery(seqQuery);
if (!resSeq.next()) {
isValid = false;
System.err.println("Missing SEQUENCE_NAME " + seqName + " from SEQUENCE_TABLE");
} else if (resSeq.getLong(1) < maxId) {
isValid = false;
System.err.println("NEXT_VAL for " + seqName + " in SEQUENCE_TABLE < max("+ tableKey + ") in " + tableName);
}
}
}
}
System.out.println((isValid ? "Succeeded" :"Failed") + " in sequence number validation for SEQUENCE_TABLE");
return isValid;
} catch(SQLException e) {
throw new HiveMetaException("Failed to validate sequence number for SEQUENCE_TABLE", e);
}
}
boolean validateSchemaVersions(Connection conn) throws HiveMetaException {
System.out.println("Validating schema version");
try {
String newSchemaVersion = getMetaStoreSchemaVersion(conn, true);
assertCompatibleVersion(MetaStoreSchemaInfo.getHiveSchemaVersion(), newSchemaVersion);
} catch (HiveMetaException hme) {
if (hme.getMessage().contains("Metastore schema version is not compatible")
|| hme.getMessage().contains("Multiple versions were found in metastore")
|| hme.getMessage().contains("Could not find version info in metastore VERSION table")) {
System.out.println("Failed in schema version validation: " + hme.getMessage());
return false;
} else {
throw hme;
}
}
System.out.println("Succeeded in schema version validation.");
return true;
}
boolean validateSchemaTables(Connection conn) throws HiveMetaException {
String version = null;
ResultSet rs = null;
DatabaseMetaData metadata = null;
List<String> dbTables = new ArrayList<String>();
List<String> schemaTables = new ArrayList<String>();
List<String> subScripts = new ArrayList<String>();
Connection hmsConn = getConnectionToMetastore(false);
System.out.println("Validating metastore schema tables");
try {
version = getMetaStoreSchemaVersion(hmsConn);
} catch (HiveMetaException he) {
System.err.println("Failed to determine schema version from Hive Metastore DB," + he.getMessage());
LOG.debug("Failed to determine schema version from Hive Metastore DB," + he.getMessage());
return false;
}
// re-open the hms connection
hmsConn = getConnectionToMetastore(false);
LOG.debug("Validating tables in the schema for version " + version);
try {
metadata = conn.getMetaData();
String[] types = {"TABLE"};
rs = metadata.getTables(null, null, "%", types);
String table = null;
while (rs.next()) {
table = rs.getString("TABLE_NAME");
dbTables.add(table.toLowerCase());
LOG.debug("Found table " + table + " in HMS dbstore");
}
} catch (SQLException e) {
throw new HiveMetaException("Failed to retrieve schema tables from Hive Metastore DB," + e.getMessage());
} finally {
if (rs != null) {
try {
rs.close();
} catch (SQLException e) {
throw new HiveMetaException("Failed to close resultset", e);
}
}
}
// parse the schema file to determine the tables that are expected to exist
// we are using oracle schema because it is simpler to parse, no quotes or backticks etc
String baseDir = new File(metaStoreSchemaInfo.getMetaStoreScriptDir()).getParent();
String schemaFile = baseDir + "/" + dbType + "/hive-schema-" + version + "." + dbType + ".sql";
try {
LOG.debug("Parsing schema script " + schemaFile);
subScripts.addAll(findCreateTable(schemaFile, schemaTables));
while (subScripts.size() > 0) {
schemaFile = baseDir + "/" + dbType + "/" + subScripts.remove(0);
LOG.debug("Parsing subscript " + schemaFile);
subScripts.addAll(findCreateTable(schemaFile, schemaTables));
}
} catch (Exception e) {
System.err.println("Exception in parsing schema file. Cause:" + e.getMessage());
System.out.println("Schema table validation failed!!!");
return false;
}
LOG.debug("Schema tables:[ " + Arrays.toString(schemaTables.toArray()) + " ]");
LOG.debug("DB tables:[ " + Arrays.toString(dbTables.toArray()) + " ]");
// now diff the lists
int schemaSize = schemaTables.size();
schemaTables.removeAll(dbTables);
if (schemaTables.size() > 0) {
System.out.println("Table(s) [ " + Arrays.toString(schemaTables.toArray())
+ " ] are missing from the metastore database schema.");
System.out.println("Schema table validation failed!!!");
return false;
} else {
System.out.println("Succeeded in schema table validation.");
return true;
}
}
private List<String> findCreateTable(String path, List<String> tableList)
throws Exception {
NestedScriptParser sp = HiveSchemaHelper.getDbCommandParser(dbType);
Matcher matcher = null;
Pattern regexp = null;
List<String> subs = new ArrayList<String>();
int groupNo = 0;
switch (dbType) {
case HiveSchemaHelper.DB_ORACLE:
regexp = Pattern.compile("(CREATE TABLE(IF NOT EXISTS)*) (\\S+).*");
groupNo = 3;
break;
case HiveSchemaHelper.DB_MYSQL:
regexp = Pattern.compile("(CREATE TABLE) (\\S+).*");
groupNo = 2;
break;
case HiveSchemaHelper.DB_MSSQL:
regexp = Pattern.compile("(CREATE TABLE) (\\S+).*");
groupNo = 2;
break;
case HiveSchemaHelper.DB_DERBY:
regexp = Pattern.compile("(CREATE TABLE(IF NOT EXISTS)*) (\\S+).*");
groupNo = 3;
break;
case HiveSchemaHelper.DB_POSTGRACE:
regexp = Pattern.compile("(CREATE TABLE(IF NOT EXISTS)*) (\\S+).*");
groupNo = 3;
break;
default:
regexp = Pattern.compile("(CREATE TABLE(IF NOT EXISTS)*) (\\S+).*");
groupNo = 3;
break;
}
if (!(new File(path)).exists()) {
throw new Exception(path + " does not exist. Potentially incorrect version in the metastore VERSION table");
}
try (
BufferedReader reader = new BufferedReader(new FileReader(path));
){
String line = null;
while ((line = reader.readLine()) != null) {
if (sp.isNestedScript(line)) {
String subScript = null;
subScript = sp.getScriptName(line);
LOG.debug("Schema subscript " + subScript + " found");
subs.add(subScript);
continue;
}
line = line.replaceAll("\\(", " ");
line = line.replaceAll("IF NOT EXISTS ", "");
line = line.replaceAll("`","");
line = line.replaceAll("'","");
line = line.replaceAll("\"","");
matcher = regexp.matcher(line);
if (matcher.find()) {
String table = matcher.group(groupNo);
if (dbType.equals("derby"))
table = table.replaceAll("APP.","");
tableList.add(table.toLowerCase());
LOG.debug("Found table " + table + " in the schema");
}
}
} catch (IOException ex){
throw new Exception(ex.getMessage());
}
return subs;
}
boolean validateColumnNullValues(Connection conn) throws HiveMetaException {
System.out.println("Validating columns for incorrect NULL values");
boolean isValid = true;
try {
Statement stmt = conn.createStatement();
String tblQuery = getDbCommandParser(dbType).needsQuotedIdentifier() ?
("select t.* from \"TBLS\" t WHERE t.\"SD_ID\" IS NULL and (t.\"TBL_TYPE\"='" + TableType.EXTERNAL_TABLE + "' or t.\"TBL_TYPE\"='" + TableType.MANAGED_TABLE + "')")
: ("select t.* from TBLS t WHERE t.SD_ID IS NULL and (t.TBL_TYPE='" + TableType.EXTERNAL_TABLE + "' or t.TBL_TYPE='" + TableType.MANAGED_TABLE + "')");
ResultSet res = stmt.executeQuery(tblQuery);
while (res.next()) {
long tableId = res.getLong("TBL_ID");
String tableName = res.getString("TBL_NAME");
String tableType = res.getString("TBL_TYPE");
isValid = false;
System.err.println("SD_ID in TBLS should not be NULL for Table Name=" + tableName + ", Table ID=" + tableId + ", Table Type=" + tableType);
}
System.out.println((isValid ? "Succeeded" : "Failed") + " in column validation for incorrect NULL values");
return isValid;
} catch(SQLException e) {
throw new HiveMetaException("Failed to validate columns for incorrect NULL values", e);
}
}
/**
* Run pre-upgrade scripts corresponding to a given upgrade script,
* if any exist. The errors from pre-upgrade are ignored.
* Pre-upgrade scripts typically contain setup statements which
* may fail on some database versions and failure is ignorable.
*
* @param scriptDir upgrade script directory name
* @param scriptFile upgrade script file name
*/
private void runPreUpgrade(String scriptDir, String scriptFile) {
for (int i = 0;; i++) {
String preUpgradeScript =
MetaStoreSchemaInfo.getPreUpgradeScriptName(i, scriptFile);
File preUpgradeScriptFile = new File(scriptDir, preUpgradeScript);
if (!preUpgradeScriptFile.isFile()) {
break;
}
try {
runBeeLine(scriptDir, preUpgradeScript);
System.out.println("Completed " + preUpgradeScript);
} catch (Exception e) {
// Ignore the pre-upgrade script errors
System.err.println("Warning in pre-upgrade script " + preUpgradeScript + ": "
+ e.getMessage());
if (verbose) {
e.printStackTrace();
}
}
}
}
/***
* Run beeline with the given metastore script. Flatten the nested scripts
* into single file.
*/
private void runBeeLine(String scriptDir, String scriptFile)
throws IOException, HiveMetaException {
NestedScriptParser dbCommandParser = getDbCommandParser(dbType);
// expand the nested script
//全部将脚本中的语句抽取出来,转换为一条条SQL语句
String sqlCommands = dbCommandParser.buildCommand(scriptDir, scriptFile);
File tmpFile = File.createTempFile("schematool", ".sql");
tmpFile.deleteOnExit();
// write out the buffer into a file. Add beeline commands for autocommit and close
FileWriter fstream = new FileWriter(tmpFile.getPath());
BufferedWriter out = new BufferedWriter(fstream);
//SQL 结构
/*
* !autocommit on
* all sql
* !closeall
* */
out.write("!autocommit on" + System.getProperty("line.separator"));
out.write(sqlCommands);
out.write("!closeall" + System.getProperty("line.separator"));
out.close();
runBeeLine(tmpFile.getPath());
}
// Generate the beeline args per hive conf and execute the given script
public void runBeeLine(String sqlScriptFile) throws IOException {
CommandBuilder builder = new CommandBuilder(hiveConf, userName, passWord, sqlScriptFile);
// run the script using Beeline
try (BeeLine beeLine = new BeeLine()) {
if (!verbose) {
beeLine.setOutputStream(new PrintStream(new NullOutputStream()));
beeLine.getOpts().setSilent(true);
}
beeLine.getOpts().setAllowMultiLineCommand(false);
beeLine.getOpts().setIsolation("TRANSACTION_READ_COMMITTED");
// We can be pretty sure that an entire line can be processed as a single command since
// we always add a line separator at the end while calling dbCommandParser.buildCommand.
beeLine.getOpts().setEntireLineAsCommand(true);
LOG.debug("Going to run command <" + builder.buildToLog() + ">");
int status = beeLine.begin(builder.buildToRun(), null);
if (status != 0) {
throw new IOException("Schema script failed, errorcode " + status);
}
}
}
static class CommandBuilder {
private final HiveConf hiveConf;
private final String userName;
private final String password;
private final String sqlScriptFile;
CommandBuilder(HiveConf hiveConf, String userName, String password, String sqlScriptFile) {
this.hiveConf = hiveConf;
this.userName = userName;
this.password = password;
this.sqlScriptFile = sqlScriptFile;
}
String[] buildToRun() throws IOException {
return argsWith(password);
}
String buildToLog() throws IOException {
logScript();
return StringUtils.join(argsWith(BeeLine.PASSWD_MASK), " ");
}
private String[] argsWith(String password) throws IOException {
return new String[] { "-u",
HiveSchemaHelper.getValidConfVar(ConfVars.METASTORECONNECTURLKEY, hiveConf), "-d",
HiveSchemaHelper.getValidConfVar(ConfVars.METASTORE_CONNECTION_DRIVER, hiveConf), "-n",
userName, "-p", password, "-f", sqlScriptFile };
}
private void logScript() throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Going to invoke file that contains:");
try (BufferedReader reader = new BufferedReader(new FileReader(sqlScriptFile))) {
String line;
while ((line = reader.readLine()) != null) {
LOG.debug("script: " + line);
}
}
}
}
}
// Create the required command line options
@SuppressWarnings("static-access")
private static void initOptions(Options cmdLineOptions) {
Option help = new Option("help", "print this message");
Option upgradeOpt = new Option("upgradeSchema", "Schema upgrade");
Option upgradeFromOpt = OptionBuilder.withArgName("upgradeFrom").hasArg().
withDescription("Schema upgrade from a version").
create("upgradeSchemaFrom");
Option initOpt = new Option("initSchema", "Schema initialization");
Option initToOpt = OptionBuilder.withArgName("initTo").hasArg().
withDescription("Schema initialization to a version").
create("initSchemaTo");
Option infoOpt = new Option("info", "Show config and schema details");
Option validateOpt = new Option("validate", "Validate the database");
OptionGroup optGroup = new OptionGroup();
optGroup.addOption(upgradeOpt).addOption(initOpt).
addOption(help).addOption(upgradeFromOpt).
addOption(initToOpt).addOption(infoOpt).addOption(validateOpt);
optGroup.setRequired(true);
Option userNameOpt = OptionBuilder.withArgName("user")
.hasArgs()
.withDescription("Override config file user name")
.create("userName");
Option passwdOpt = OptionBuilder.withArgName("password")
.hasArgs()
.withDescription("Override config file password")
.create("passWord");
Option dbTypeOpt = OptionBuilder.withArgName("databaseType")
.hasArgs().withDescription("Metastore database type")
.create("dbType");
Option dbOpts = OptionBuilder.withArgName("databaseOpts")
.hasArgs().withDescription("Backend DB specific options")
.create("dbOpts");
Option dryRunOpt = new Option("dryRun", "list SQL scripts (no execute)");
Option verboseOpt = new Option("verbose", "only print SQL statements");
Option serversOpt = OptionBuilder.withArgName("serverList")
.hasArgs().withDescription("a comma-separated list of servers used in location validation")
.create("servers");
cmdLineOptions.addOption(help);
cmdLineOptions.addOption(dryRunOpt);
cmdLineOptions.addOption(userNameOpt);
cmdLineOptions.addOption(passwdOpt);
cmdLineOptions.addOption(dbTypeOpt);
cmdLineOptions.addOption(verboseOpt);
cmdLineOptions.addOption(dbOpts);
cmdLineOptions.addOption(serversOpt);
cmdLineOptions.addOptionGroup(optGroup);
}
public static void main(String[] args) {
CommandLineParser parser = new GnuParser();
CommandLine line = null;
String dbType = null;
String schemaVer = null;
Options cmdLineOptions = new Options();
// Argument handling
initOptions(cmdLineOptions);
try {
line = parser.parse(cmdLineOptions, args);
} catch (ParseException e) {
System.err.println("HiveSchemaTool:Parsing failed. Reason: " + e.getLocalizedMessage());
printAndExit(cmdLineOptions);
}
if (line.hasOption("help")) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("schemaTool", cmdLineOptions);
return;
}
if (line.hasOption("dbType")) {
dbType = line.getOptionValue("dbType");
if ((!dbType.equalsIgnoreCase(HiveSchemaHelper.DB_DERBY) &&
!dbType.equalsIgnoreCase(HiveSchemaHelper.DB_MSSQL) &&
!dbType.equalsIgnoreCase(HiveSchemaHelper.DB_MYSQL) &&
!dbType.equalsIgnoreCase(HiveSchemaHelper.DB_POSTGRACE) && !dbType
.equalsIgnoreCase(HiveSchemaHelper.DB_ORACLE))) {
System.err.println("Unsupported dbType " + dbType);
printAndExit(cmdLineOptions);
}
} else {
System.err.println("no dbType supplied");
printAndExit(cmdLineOptions);
}
System.setProperty(HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION.varname, "true");
try {
HiveSchemaTool schemaTool = new HiveSchemaTool(dbType);
if (line.hasOption("userName")) {
schemaTool.setUserName(line.getOptionValue("userName"));
}
if (line.hasOption("passWord")) {
schemaTool.setPassWord(line.getOptionValue("passWord"));
}
if (line.hasOption("dryRun")) {
schemaTool.setDryRun(true);
}
if (line.hasOption("verbose")) {
schemaTool.setVerbose(true);
}
if (line.hasOption("dbOpts")) {
schemaTool.setDbOpts(line.getOptionValue("dbOpts"));
}
if (line.hasOption("validate") && line.hasOption("servers")) {
schemaTool.setValidationServers(line.getOptionValue("servers"));
}
if (line.hasOption("info")) {
schemaTool.showInfo();
} else if (line.hasOption("upgradeSchema")) {
schemaTool.doUpgrade();
} else if (line.hasOption("upgradeSchemaFrom")) {
schemaVer = line.getOptionValue("upgradeSchemaFrom");
schemaTool.doUpgrade(schemaVer);
} else if (line.hasOption("initSchema")) {
schemaTool.doInit();
} else if (line.hasOption("initSchemaTo")) {
schemaVer = line.getOptionValue("initSchemaTo");
schemaTool.doInit(schemaVer);
} else if (line.hasOption("validate")) {
schemaTool.doValidate();
} else {
System.err.println("no valid option supplied");
printAndExit(cmdLineOptions);
}
} catch (HiveMetaException e) {
System.err.println(e);
if (e.getCause() != null) {
Throwable t = e.getCause();
System.err.println("Underlying cause: "
+ t.getClass().getName() + " : "
+ t.getMessage());
if (e.getCause() instanceof SQLException) {
System.err.println("SQL Error code: " + ((SQLException)t).getErrorCode());
}
}
if (line.hasOption("verbose")) {
e.printStackTrace();
} else {
System.err.println("Use --verbose for detailed stacktrace.");
}
System.err.println("*** schemaTool failed ***");
System.exit(1);
}
System.out.println("schemaTool completed");
}
}
|
[
"\"HIVE_HOME\""
] |
[] |
[
"HIVE_HOME"
] |
[]
|
["HIVE_HOME"]
|
java
| 1 | 0 | |
wifipumpkin3/plugins/external/sslstrip/ServerConnectionFactory.py
|
# Copyright (c) 2004-2009 Moxie Marlinspike
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
import logging
from twisted.internet.protocol import ClientFactory
class ServerConnectionFactory(ClientFactory):
def __init__(self, command, uri, postData, headers, client):
self.command = command
self.uri = uri
self.postData = postData
self.headers = headers
self.client = client
def buildProtocol(self, addr):
return self.protocol(
self.command, self.uri, self.postData, self.headers, self.client
)
def clientConnectionFailed(self, connector, reason):
logging.debug("Server connection failed.")
destination = connector.getDestination()
if destination.port != 443:
logging.debug("Retrying via SSL")
self.client.proxyViaSSL(
self.headers["host"],
self.command,
self.uri,
self.postData,
self.headers,
443,
)
else:
self.client.finish()
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
cvat/apps/engine/views.py
|
# Copyright (C) 2018-2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
import errno
import io
import os
import os.path as osp
from django.db.models import query
import pytz
import shutil
import traceback
import uuid
from datetime import datetime
from distutils.util import strtobool
from tempfile import mkstemp, NamedTemporaryFile
import json
import csv
from django.template.loader import get_template
import cv2
from django.db.models.query import Prefetch
import django_rq
from django.apps import apps
from django.conf import settings
from django.contrib.auth.models import User
from django.db import IntegrityError
from django.http import HttpResponse, HttpResponseNotFound, HttpResponseBadRequest, response
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django.utils.decorators import method_decorator
from django_filters import rest_framework as filters
from django_filters.rest_framework import DjangoFilterBackend
from drf_yasg import openapi
from drf_yasg.inspectors import CoreAPICompatInspector, NotHandled, FieldInspector
from drf_yasg.utils import swagger_auto_schema
from rest_framework import mixins, serializers, status, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import APIException, NotFound, ValidationError
from rest_framework.permissions import SAFE_METHODS, IsAuthenticated
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from sendfile import sendfile
import cvat.apps.dataset_manager as dm
import cvat.apps.dataset_manager.views # pylint: disable=unused-import
from cvat.apps.authentication import auth
from cvat.apps.engine.cloud_provider import get_cloud_storage_instance, Credentials, Status
from cvat.apps.dataset_manager.bindings import CvatImportError
from cvat.apps.dataset_manager.serializers import DatasetFormatsSerializer
from cvat.apps.engine.frame_provider import FrameProvider
from cvat.apps.engine.media_extractors import ImageListReader
from cvat.apps.engine.mime_types import mimetypes
from cvat.apps.engine.models import (
Job, StatusChoice, Task, Project, Review, Issue,
Comment, StorageMethodChoice, ReviewStatus, StorageChoice, Image,
CredentialsTypeChoice, CloudProviderChoice, Segment
)
from cvat.apps.engine.models import CloudStorage as CloudStorageModel
from cvat.apps.engine.serializers import (
AboutSerializer, AnnotationFileSerializer, BasicUserSerializer,
DataMetaSerializer, DataSerializer, ExceptionSerializer,
FileInfoSerializer, JobSerializer, LabeledDataSerializer,
LogEventSerializer, ProjectSerializer, ProjectSearchSerializer, ProjectWithoutTaskSerializer,
RqStatusSerializer, TaskSerializer, UserSerializer, PluginsSerializer, ReviewSerializer,
CombinedReviewSerializer, IssueSerializer, CombinedIssueSerializer, CommentSerializer,
CloudStorageSerializer, BaseCloudStorageSerializer, TaskFileSerializer)
from utils.dataset_manifest import ImageManifestManager
from cvat.apps.engine.utils import av_scan_paths
from cvat.apps.engine.backup import import_task
from . import models, task
from .log import clogger, slogger
from allauth.account.models import EmailAddress
from cvat.apps.restrictions.models import UserAgreementStatus
import smtplib, ssl
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from cvat.apps.engine.templates.email_template import admin_annot, annot_review
class ServerViewSet(viewsets.ViewSet):
serializer_class = None
# To get nice documentation about ServerViewSet actions it is necessary
# to implement the method. By default, ViewSet doesn't provide it.
def get_serializer(self, *args, **kwargs):
pass
@staticmethod
@swagger_auto_schema(method='get', operation_summary='Method provides basic CVAT information',
responses={'200': AboutSerializer})
@action(detail=False, methods=['GET'], serializer_class=AboutSerializer)
def about(request):
from cvat import __version__ as cvat_version
about = {
"name": "Computer Vision Annotation Tool",
"version": cvat_version,
"description": "CVAT is completely re-designed and re-implemented " +
"version of Video Annotation Tool from Irvine, California " +
"tool. It is free, online, interactive video and image annotation " +
"tool for computer vision. It is being used by our team to " +
"annotate million of objects with different properties. Many UI " +
"and UX decisions are based on feedbacks from professional data " +
"annotation team."
}
serializer = AboutSerializer(data=about)
if serializer.is_valid(raise_exception=True):
return Response(data=serializer.data)
@staticmethod
@swagger_auto_schema(method='post', request_body=ExceptionSerializer)
@action(detail=False, methods=['POST'], serializer_class=ExceptionSerializer)
def exception(request):
"""
Saves an exception from a client on the server
Sends logs to the ELK if it is connected
"""
serializer = ExceptionSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
additional_info = {
"username": request.user.username,
"name": "Send exception",
}
message = JSONRenderer().render({**serializer.data, **additional_info}).decode('UTF-8')
jid = serializer.data.get("job_id")
tid = serializer.data.get("task_id")
if jid:
clogger.job[jid].error(message)
elif tid:
clogger.task[tid].error(message)
else:
clogger.glob.error(message)
return Response(serializer.data, status=status.HTTP_201_CREATED)
@staticmethod
@swagger_auto_schema(method='post', request_body=LogEventSerializer(many=True))
@action(detail=False, methods=['POST'], serializer_class=LogEventSerializer)
def logs(request):
"""
Saves logs from a client on the server
Sends logs to the ELK if it is connected
"""
serializer = LogEventSerializer(many=True, data=request.data)
if serializer.is_valid(raise_exception=True):
user = { "username": request.user.username }
for event in serializer.data:
message = JSONRenderer().render({**event, **user}).decode('UTF-8')
jid = event.get("job_id")
tid = event.get("task_id")
if jid:
clogger.job[jid].info(message)
elif tid:
clogger.task[tid].info(message)
else:
clogger.glob.info(message)
return Response(serializer.data, status=status.HTTP_201_CREATED)
@staticmethod
@swagger_auto_schema(
method='get', operation_summary='Returns all files and folders that are on the server along specified path',
manual_parameters=[openapi.Parameter('directory', openapi.IN_QUERY, type=openapi.TYPE_STRING, description='Directory to browse')],
responses={'200' : FileInfoSerializer(many=True)}
)
@action(detail=False, methods=['GET'], serializer_class=FileInfoSerializer)
def share(request):
param = request.query_params.get('directory', '/')
if param.startswith("/"):
param = param[1:]
directory = os.path.abspath(os.path.join(settings.SHARE_ROOT, param))
if directory.startswith(settings.SHARE_ROOT) and os.path.isdir(directory):
data = []
content = os.scandir(directory)
for entry in content:
entry_type = None
if entry.is_file():
entry_type = "REG"
elif entry.is_dir():
entry_type = "DIR"
if entry_type:
data.append({"name": entry.name, "type": entry_type})
serializer = FileInfoSerializer(many=True, data=data)
if serializer.is_valid(raise_exception=True):
return Response(serializer.data)
else:
return Response("{} is an invalid directory".format(param),
status=status.HTTP_400_BAD_REQUEST)
@staticmethod
@swagger_auto_schema(method='get', operation_summary='Method provides the list of supported annotations formats',
responses={'200': DatasetFormatsSerializer()})
@action(detail=False, methods=['GET'], url_path='annotation/formats')
def annotation_formats(request):
data = dm.views.get_all_formats()
return Response(DatasetFormatsSerializer(data).data)
@staticmethod
@swagger_auto_schema(method='get', operation_summary='Method provides allowed plugins.',
responses={'200': PluginsSerializer()})
@action(detail=False, methods=['GET'], url_path='plugins', serializer_class=PluginsSerializer)
def plugins(request):
response = {
'GIT_INTEGRATION': apps.is_installed('cvat.apps.dataset_repo'),
'ANALYTICS': False,
'MODELS': False,
'PREDICT': apps.is_installed('cvat.apps.training')
}
if strtobool(os.environ.get("CVAT_ANALYTICS", '0')):
response['ANALYTICS'] = True
if strtobool(os.environ.get("CVAT_SERVERLESS", '0')):
response['MODELS'] = True
return Response(response)
class ProjectFilter(filters.FilterSet):
name = filters.CharFilter(field_name="name", lookup_expr="icontains")
owner = filters.CharFilter(field_name="owner__username", lookup_expr="icontains")
assignee = filters.CharFilter(field_name="assignee__username", lookup_expr="icontains")
status = filters.CharFilter(field_name="status", lookup_expr="icontains")
project_type = filters.CharFilter(field_name="project_type", lookup_expr="icontains")
class Meta:
model = models.Project
fields = ("id", "name", "owner", "status", "project_type")
@method_decorator(name='list', decorator=swagger_auto_schema(
operation_summary='Returns a paginated list of projects according to query parameters (12 projects per page)',
manual_parameters=[
openapi.Parameter('id', openapi.IN_QUERY, description="A unique number value identifying this project",
type=openapi.TYPE_NUMBER),
openapi.Parameter('name', openapi.IN_QUERY, description="Find all projects where name contains a parameter value",
type=openapi.TYPE_STRING),
openapi.Parameter('owner', openapi.IN_QUERY, description="Find all project where owner name contains a parameter value",
type=openapi.TYPE_STRING),
openapi.Parameter('status', openapi.IN_QUERY, description="Find all projects with a specific status",
type=openapi.TYPE_STRING, enum=[str(i) for i in StatusChoice]),
openapi.Parameter('names_only', openapi.IN_QUERY, description="Returns only names and id's of projects.",
type=openapi.TYPE_BOOLEAN),
openapi.Parameter('without_tasks', openapi.IN_QUERY, description="Returns only projects entities without related tasks",
type=openapi.TYPE_BOOLEAN)],))
@method_decorator(name='create', decorator=swagger_auto_schema(operation_summary='Method creates a new project'))
@method_decorator(name='retrieve', decorator=swagger_auto_schema(operation_summary='Method returns details of a specific project'))
@method_decorator(name='destroy', decorator=swagger_auto_schema(operation_summary='Method deletes a specific project'))
@method_decorator(name='partial_update', decorator=swagger_auto_schema(operation_summary='Methods does a partial update of chosen fields in a project'))
class ProjectViewSet(auth.ProjectGetQuerySetMixin, viewsets.ModelViewSet):
queryset = models.Project.objects.all().order_by('-id')
search_fields = ("name", "owner__username", "assignee__username", "status", "project_type")
filterset_class = ProjectFilter
ordering_fields = ("id", "name", "owner", "status", "assignee", "project_type")
http_method_names = ['get', 'post', 'head', 'patch', 'delete']
def get_serializer_class(self):
if self.request.path.endswith('tasks'):
return TaskSerializer
if self.request.query_params and self.request.query_params.get("names_only") == "true":
return ProjectSearchSerializer
if self.request.query_params and self.request.query_params.get("without_tasks") == "true":
return ProjectWithoutTaskSerializer
else:
return ProjectSerializer
def get_permissions(self):
http_method = self.request.method
permissions = [IsAuthenticated]
if http_method in SAFE_METHODS:
permissions.append(auth.ProjectAccessPermission)
elif http_method in ["POST"]:
permissions.append(auth.ProjectCreatePermission)
elif http_method in ["PATCH"]:
permissions.append(auth.ProjectChangePermission)
elif http_method in ["DELETE"]:
permissions.append(auth.ProjectDeletePermission)
else:
permissions.append(auth.AdminRolePermission)
return [perm() for perm in permissions]
def perform_create(self, serializer):
def validate_project_limit(owner):
admin_perm = auth.AdminRolePermission()
is_admin = admin_perm.has_permission(self.request, self)
if not is_admin and settings.RESTRICTIONS['project_limit'] is not None and \
Project.objects.filter(owner=owner).count() >= settings.RESTRICTIONS['project_limit']:
raise serializers.ValidationError('The user has the maximum number of projects')
owner = self.request.data.get('owner', None)
if owner:
validate_project_limit(owner)
serializer.save()
else:
validate_project_limit(self.request.user)
serializer.save(owner=self.request.user)
@swagger_auto_schema(method='get', operation_summary='Returns information of the tasks of the project with the selected id',
responses={'200': TaskSerializer(many=True)})
@action(detail=True, methods=['GET'], serializer_class=TaskSerializer)
def tasks(self, request, pk):
print("tasksss")
self.get_object() # force to call check_object_permissions
queryset = Task.objects.filter(project_id=pk).order_by('-id')
queryset = auth.filter_task_queryset(queryset, request.user)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True,
context={"request": request})
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True,
context={"request": request})
return Response(serializer.data)
@swagger_auto_schema(method='get', operation_summary='Export project as a dataset in a specific format',
manual_parameters=[
openapi.Parameter('format', openapi.IN_QUERY,
description="Desired output format name\nYou can get the list of supported formats at:\n/server/annotation/formats",
type=openapi.TYPE_STRING, required=True),
openapi.Parameter('filename', openapi.IN_QUERY,
description="Desired output file name",
type=openapi.TYPE_STRING, required=False),
openapi.Parameter('action', in_=openapi.IN_QUERY,
description='Used to start downloading process after annotation file had been created',
type=openapi.TYPE_STRING, required=False, enum=['download'])
],
responses={'202': openapi.Response(description='Exporting has been started'),
'201': openapi.Response(description='Output file is ready for downloading'),
'200': openapi.Response(description='Download of file started'),
'405': openapi.Response(description='Format is not available'),
}
)
@action(detail=True, methods=['GET'], serializer_class=None,
url_path='dataset')
def dataset_export(self, request, pk):
db_project = self.get_object() # force to call check_object_permissions
format_name = request.query_params.get("format", "")
return _export_annotations(db_instance=db_project,
rq_id="/api/v1/project/{}/dataset/{}".format(pk, format_name),
request=request,
action=request.query_params.get("action", "").lower(),
callback=dm.views.export_project_as_dataset,
format_name=format_name,
filename=request.query_params.get("filename", "").lower(),
)
@swagger_auto_schema(method='get', operation_summary='Method allows to download project annotations',
manual_parameters=[
openapi.Parameter('format', openapi.IN_QUERY,
description="Desired output format name\nYou can get the list of supported formats at:\n/server/annotation/formats",
type=openapi.TYPE_STRING, required=True),
openapi.Parameter('filename', openapi.IN_QUERY,
description="Desired output file name",
type=openapi.TYPE_STRING, required=False),
openapi.Parameter('action', in_=openapi.IN_QUERY,
description='Used to start downloading process after annotation file had been created',
type=openapi.TYPE_STRING, required=False, enum=['download'])
],
responses={
'202': openapi.Response(description='Dump of annotations has been started'),
'201': openapi.Response(description='Annotations file is ready to download'),
'200': openapi.Response(description='Download of file started'),
'405': openapi.Response(description='Format is not available'),
'401': openapi.Response(description='Format is not specified'),
}
)
@action(detail=True, methods=['GET'],
serializer_class=LabeledDataSerializer)
def annotations(self, request, pk):
db_project = self.get_object() # force to call check_object_permissions
format_name = request.query_params.get('format')
if format_name:
return _export_annotations(db_instance=db_project,
rq_id="/api/v1/projects/{}/annotations/{}".format(pk, format_name),
request=request,
action=request.query_params.get("action", "").lower(),
callback=dm.views.export_project_annotations,
format_name=format_name,
filename=request.query_params.get("filename", "").lower(),
)
else:
return Response("Format is not specified",status=status.HTTP_400_BAD_REQUEST)
class TaskFilter(filters.FilterSet):
project = filters.CharFilter(field_name="project__name", lookup_expr="icontains")
name = filters.CharFilter(field_name="name", lookup_expr="icontains")
owner = filters.CharFilter(field_name="owner__username", lookup_expr="icontains")
mode = filters.CharFilter(field_name="mode", lookup_expr="icontains")
status = filters.CharFilter(field_name="status", lookup_expr="icontains")
assignee = filters.CharFilter(field_name="assignee__username", lookup_expr="icontains")
dimension = filters.CharFilter(field_name="dimension", lookup_expr="icontains")
class Meta:
model = Task
fields = ("id", "project_id", "project", "name", "owner", "mode", "status",
"assignee", "dimension")
class DjangoFilterInspector(CoreAPICompatInspector):
def get_filter_parameters(self, filter_backend):
if isinstance(filter_backend, DjangoFilterBackend):
result = super(DjangoFilterInspector, self).get_filter_parameters(filter_backend)
res = result.copy()
print("hhh409", res)
for param in result:
if param.get('name') == 'project_id' or param.get('name') == 'project':
res.remove(param)
return res
return NotHandled
@method_decorator(name='list', decorator=swagger_auto_schema(
operation_summary='Returns a paginated list of tasks according to query parameters (10 tasks per page)',
manual_parameters=[
openapi.Parameter('id',openapi.IN_QUERY,description="A unique number value identifying this task",type=openapi.TYPE_NUMBER),
openapi.Parameter('name', openapi.IN_QUERY, description="Find all tasks where name contains a parameter value", type=openapi.TYPE_STRING),
openapi.Parameter('owner', openapi.IN_QUERY, description="Find all tasks where owner name contains a parameter value", type=openapi.TYPE_STRING),
openapi.Parameter('mode', openapi.IN_QUERY, description="Find all tasks with a specific mode", type=openapi.TYPE_STRING, enum=['annotation', 'interpolation']),
openapi.Parameter('status', openapi.IN_QUERY, description="Find all tasks with a specific status", type=openapi.TYPE_STRING,enum=['annotation','validation','completed']),
openapi.Parameter('assignee', openapi.IN_QUERY, description="Find all tasks where assignee name contains a parameter value", type=openapi.TYPE_STRING)
],
filter_inspectors=[DjangoFilterInspector]))
@method_decorator(name='create', decorator=swagger_auto_schema(operation_summary='Method creates a new task in a database without any attached images and videos'))
@method_decorator(name='retrieve', decorator=swagger_auto_schema(operation_summary='Method returns details of a specific task'))
@method_decorator(name='update', decorator=swagger_auto_schema(operation_summary='Method updates a task by id'))
@method_decorator(name='destroy', decorator=swagger_auto_schema(operation_summary='Method deletes a specific task, all attached jobs, annotations, and data'))
@method_decorator(name='partial_update', decorator=swagger_auto_schema(operation_summary='Methods does a partial update of chosen fields in a task'))
class TaskViewSet(auth.TaskGetQuerySetMixin, viewsets.ModelViewSet):
queryset = Task.objects.all().prefetch_related(
"label_set__attributespec_set",
"segment_set__job_set",
).order_by('-id')
serializer_class = TaskSerializer
search_fields = ("name", "owner__username", "mode", "status", "dimension")
filterset_class = TaskFilter
ordering_fields = ("id", "name", "owner", "status", "assignee", "dimension")
def get_permissions(self):
http_method = self.request.method
permissions = [IsAuthenticated]
if http_method in SAFE_METHODS:
permissions.append(auth.TaskAccessPermission)
elif http_method in ["POST"]:
permissions.append(auth.TaskCreatePermission)
elif self.action == 'annotations' or http_method in ["PATCH", "PUT"]:
permissions.append(auth.TaskChangePermission)
elif http_method in ["DELETE"]:
permissions.append(auth.TaskDeletePermission)
else:
permissions.append(auth.AdminRolePermission)
return [perm() for perm in permissions]
def _validate_task_limit(self, owner):
admin_perm = auth.AdminRolePermission()
is_admin = admin_perm.has_permission(self.request, self)
if not is_admin and settings.RESTRICTIONS['task_limit'] is not None and \
Task.objects.filter(owner=owner).count() >= settings.RESTRICTIONS['task_limit']:
raise serializers.ValidationError('The user has the maximum number of tasks')
def create(self, request):
action = self.request.query_params.get('action', None)
if action is None:
return super().create(request)
elif action == 'import':
self._validate_task_limit(owner=self.request.user)
if 'rq_id' in request.data:
rq_id = request.data['rq_id']
else:
rq_id = "{}@/api/v1/tasks/{}/import".format(request.user, uuid.uuid4())
queue = django_rq.get_queue("default")
rq_job = queue.fetch_job(rq_id)
if not rq_job:
serializer = TaskFileSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
task_file = serializer.validated_data['task_file']
fd, filename = mkstemp(prefix='cvat_')
with open(filename, 'wb+') as f:
for chunk in task_file.chunks():
f.write(chunk)
rq_job = queue.enqueue_call(
func=import_task,
args=(filename, request.user.id),
job_id=rq_id,
meta={
'tmp_file': filename,
'tmp_file_descriptor': fd,
},
)
else:
if rq_job.is_finished:
task_id = rq_job.return_value
os.close(rq_job.meta['tmp_file_descriptor'])
os.remove(rq_job.meta['tmp_file'])
rq_job.delete()
return Response({'id': task_id}, status=status.HTTP_201_CREATED)
elif rq_job.is_failed:
os.close(rq_job.meta['tmp_file_descriptor'])
os.remove(rq_job.meta['tmp_file'])
exc_info = str(rq_job.exc_info)
rq_job.delete()
# RQ adds a prefix with exception class name
import_error_prefix = '{}.{}'.format(
CvatImportError.__module__, CvatImportError.__name__)
if exc_info.startswith(import_error_prefix):
exc_info = exc_info.replace(import_error_prefix + ': ', '')
return Response(data=exc_info,
status=status.HTTP_400_BAD_REQUEST)
else:
return Response(data=exc_info,
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response({'rq_id': rq_id}, status=status.HTTP_202_ACCEPTED)
else:
raise serializers.ValidationError(
"Unexpected action specified for the request")
def retrieve(self, request, pk=None):
db_task = self.get_object() # force to call check_object_permissions
action = self.request.query_params.get('action', None)
if action is None:
return super().retrieve(request, pk)
elif action in ('export', 'download'):
queue = django_rq.get_queue("default")
rq_id = "/api/v1/tasks/{}/export".format(pk)
print("line 521",pk, rq_id,queue)
rq_job = queue.fetch_job(rq_id)
print("Redisss", rq_job)
if rq_job:
last_task_update_time = timezone.localtime(db_task.updated_date)
request_time = rq_job.meta.get('request_time', None)
print("request_time",request_time)
if request_time is None or request_time < last_task_update_time:
rq_job.cancel()
rq_job.delete()
else:
print(">>>>>>>>>>>>",rq_job.is_finished)
if rq_job.is_finished:
file_path = rq_job.return_value
print("line 549",file_path)
if action == "download" and osp.exists(file_path):
rq_job.delete()
timestamp = datetime.strftime(last_task_update_time,
"%Y_%m_%d_%H_%M_%S")
filename = "task_{}_backup_{}{}".format(
db_task.name, timestamp,
osp.splitext(file_path)[1])
print("line 549",file_path)
return sendfile(request, file_path, attachment=True,
attachment_filename=filename.lower())
else:
if osp.exists(file_path):
return Response(status=status.HTTP_201_CREATED)
elif rq_job.is_failed:
exc_info = str(rq_job.exc_info)
rq_job.delete()
return Response(exc_info,
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response(status=status.HTTP_202_ACCEPTED)
ttl = dm.views.TASK_CACHE_TTL.total_seconds()
queue.enqueue_call(
func=dm.views.backup_task,
args=(pk, 'task_dump.zip'),
job_id=rq_id,
meta={ 'request_time': timezone.localtime() },
result_ttl=ttl, failure_ttl=ttl)
return Response(pk,status=status.HTTP_202_ACCEPTED)
else:
raise serializers.ValidationError(
"Unexpected action specified for the request")
def perform_create(self, serializer):
owner = self.request.data.get('owner', None)
if owner:
self._validate_task_limit(owner)
serializer.save()
else:
self._validate_task_limit(self.request.user)
serializer.save(owner=self.request.user)
def perform_destroy(self, instance):
task_dirname = instance.get_task_dirname()
super().perform_destroy(instance)
shutil.rmtree(task_dirname, ignore_errors=True)
if instance.data and not instance.data.tasks.all():
shutil.rmtree(instance.data.get_data_dirname(), ignore_errors=True)
instance.data.delete()
@swagger_auto_schema(method='get', operation_summary='Returns a list of jobs for a specific task',
responses={'200': JobSerializer(many=True)})
@action(detail=True, methods=['GET'], serializer_class=JobSerializer)
def jobs(self, request, pk):
print("we are here")
self.get_object() # force to call check_object_permissions
queryset = Job.objects.filter(segment__task_id=pk)#request.user.username
serializer = JobSerializer(queryset, many=True,
context={"request": request})
#print(serializer.data)
print("respo in 620", response)
return Response(serializer.data)
# @swagger_auto_schema(method='get', operation_summary='Returns a list of jobs for a specific task',
# manual_parameters=[
# openapi.Parameter('assignee_id', openapi.IN_QUERY, description="This is an optional parameter. Find all Jobs where assignee id is parameter value", type=openapi.TYPE_NUMBER)
# ],
# responses={'200': JobSerializer(many=True)})
# @action(detail=True, methods=['GET'], serializer_class=JobSerializer)
# def jobs(self, request, pk):
# assignee_id = request.query_params.get('assignee_id', None)
# print("assignee id ", assignee_id)
# self.get_object() # force to call check_object_permissions
# # new code changes are done for jobs assignment to logged in users
# if assignee_id is not None:
# # find if current requestor is admin ?
# # if yes go to else
# # if not an admin fetch his user id as assignee id
# queryset = Job.objects.filter(segment__task_id=pk, assignee__id=assignee_id)
# else:
# queryset = Job.objects.filter(segment__task_id=pk)
# serializer = JobSerializer(queryset, many=True,
# context={"request": request})
# return Response(serializer.data)
@swagger_auto_schema(method='post', operation_summary='Method permanently attaches images or video to a task',
request_body=DataSerializer,
)
@swagger_auto_schema(method='get', operation_summary='Method returns data for a specific task',
manual_parameters=[
openapi.Parameter('type', in_=openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING,
enum=['chunk', 'frame', 'preview', 'context_image'],
description="Specifies the type of the requested data"),
openapi.Parameter('quality', in_=openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING,
enum=['compressed', 'original'],
description="Specifies the quality level of the requested data, doesn't matter for 'preview' type"),
openapi.Parameter('number', in_=openapi.IN_QUERY, required=True, type=openapi.TYPE_NUMBER,
description="A unique number value identifying chunk or frame, doesn't matter for 'preview' type"),
]
)
@action(detail=True, methods=['POST', 'GET'])
def data(self, request, pk):
db_task = self.get_object() # call check_object_permissions as well
print("hhh", request.method)
if request.method == 'POST':
if db_task.data:
return Response(data='Adding more data is not supported',
status=status.HTTP_400_BAD_REQUEST)
serializer = DataSerializer(data=request.data)
print("{{{{{",request)
serializer.is_valid(raise_exception=True)
db_data = serializer.save()
db_task.data = db_data
db_task.save()
data = {k:v for k, v in serializer.data.items()}
data['use_zip_chunks'] = serializer.validated_data['use_zip_chunks']
data['use_cache'] = serializer.validated_data['use_cache']
data['copy_data'] = serializer.validated_data['copy_data']
if data['use_cache']:
db_task.data.storage_method = StorageMethodChoice.CACHE
db_task.data.save(update_fields=['storage_method'])
if data['server_files'] and not data.get('copy_data'):
db_task.data.storage = StorageChoice.SHARE
db_task.data.save(update_fields=['storage'])
if db_data.cloud_storage:
db_task.data.storage = StorageChoice.CLOUD_STORAGE
db_task.data.save(update_fields=['storage'])
# if the value of stop_frame is 0, then inside the function we cannot know
# the value specified by the user or it's default value from the database
if 'stop_frame' not in serializer.validated_data:
data['stop_frame'] = None
task.create(db_task.id, data)
print("respi in 694", response)
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
else:
print("getmmwm")
data_type = request.query_params.get('type', None)
data_id = request.query_params.get('number', None)
data_quality = request.query_params.get('quality', 'compressed')
possible_data_type_values = ('chunk', 'frame', 'preview', 'context_image')
possible_quality_values = ('compressed', 'original')
try:
print(data_type,";;;;;;")
if not data_type or data_type not in possible_data_type_values:
raise ValidationError(detail='Data type not specified or has wrong value')
elif data_type == 'chunk' or data_type == 'frame':
if not data_id:
raise ValidationError(detail='Number is not specified')
elif data_quality not in possible_quality_values:
raise ValidationError(detail='Wrong quality value')
db_data = db_task.data
print( db_data,";;;;;;;;;")
if not db_data:
raise NotFound(detail='Cannot find requested data for the task')
frame_provider = FrameProvider(db_task.data, db_task.dimension)
if data_type == 'chunk':
data_id = int(data_id)
data_quality = FrameProvider.Quality.COMPRESSED \
if data_quality == 'compressed' else FrameProvider.Quality.ORIGINAL
#TODO: av.FFmpegError processing
if settings.USE_CACHE and db_data.storage_method == StorageMethodChoice.CACHE:
buff, mime_type = frame_provider.get_chunk(data_id, data_quality)
return HttpResponse(buff.getvalue(), content_type=mime_type)
# Follow symbol links if the chunk is a link on a real image otherwise
# mimetype detection inside sendfile will work incorrectly.
path = os.path.realpath(frame_provider.get_chunk(data_id, data_quality))
return sendfile(request, path)
elif data_type == 'frame':
data_id = int(data_id)
data_quality = FrameProvider.Quality.COMPRESSED \
if data_quality == 'compressed' else FrameProvider.Quality.ORIGINAL
buf, mime = frame_provider.get_frame(data_id, data_quality)
return HttpResponse(buf.getvalue(), content_type=mime)
elif data_type == 'preview':
return sendfile(request, frame_provider.get_preview())
elif data_type == 'context_image':
data_id = int(data_id)
image = Image.objects.get(data_id=db_data.id, frame=data_id)
for i in image.related_files.all():
path = os.path.realpath(str(i.path))
image = cv2.imread(path)
success, result = cv2.imencode('.JPEG', image)
if not success:
raise Exception('Failed to encode image to ".jpeg" format')
return HttpResponse(io.BytesIO(result.tobytes()), content_type='image/jpeg')
return Response(data='No context image related to the frame',
status=status.HTTP_404_NOT_FOUND)
else:
return Response(data='unknown data type {}.'.format(data_type), status=status.HTTP_400_BAD_REQUEST)
except APIException as e:
return Response(data=e.get_full_details(), status=e.status_code)
except FileNotFoundError as ex:
msg = f"{ex.strerror} {ex.filename}"
slogger.task[pk].error(msg, exc_info=True)
return Response(data=msg, status=status.HTTP_404_NOT_FOUND)
except Exception as e:
msg = 'cannot get requested data type: {}, number: {}, quality: {}'.format(data_type, data_id, data_quality)
slogger.task[pk].error(msg, exc_info=True)
return Response(data=msg + '\n' + str(e), status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(method='get', operation_summary='Method allows to download task annotations',
manual_parameters=[
openapi.Parameter('format', openapi.IN_QUERY,
description="Desired output format name\nYou can get the list of supported formats at:\n/server/annotation/formats",
type=openapi.TYPE_STRING, required=False),
openapi.Parameter('filename', openapi.IN_QUERY,
description="Desired output file name",
type=openapi.TYPE_STRING, required=False),
openapi.Parameter('action', in_=openapi.IN_QUERY,
description='Used to start downloading process after annotation file had been created',
type=openapi.TYPE_STRING, required=False, enum=['download'])
],
responses={
'202': openapi.Response(description='Dump of annotations has been started'),
'201': openapi.Response(description='Annotations file is ready to download'),
'200': openapi.Response(description='Download of file started'),
'405': openapi.Response(description='Format is not available'),
}
)
@swagger_auto_schema(method='put', operation_summary='Method allows to upload task annotations',
manual_parameters=[
openapi.Parameter('format', openapi.IN_QUERY,
description="Input format name\nYou can get the list of supported formats at:\n/server/annotation/formats",
type=openapi.TYPE_STRING, required=False),
],
responses={
'202': openapi.Response(description='Uploading has been started'),
'201': openapi.Response(description='Uploading has finished'),
'405': openapi.Response(description='Format is not available'),
}
)
@swagger_auto_schema(method='patch', operation_summary='Method performs a partial update of annotations in a specific task',
manual_parameters=[openapi.Parameter('action', in_=openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING,
enum=['create', 'update', 'delete'])])
@swagger_auto_schema(method='delete', operation_summary='Method deletes all annotations for a specific task')
@action(detail=True, methods=['GET', 'DELETE', 'PUT', 'PATCH'],
serializer_class=LabeledDataSerializer)
def annotations(self, request, pk):
db_task = self.get_object() # force to call check_object_permissions
if request.method == 'GET':
format_name = request.query_params.get('format')
print("//annotat//",format_name)
if format_name:
return _export_annotations(db_instance=db_task,
rq_id="/api/v1/tasks/{}/annotations/{}".format(pk, format_name),
request=request,
action=request.query_params.get("action", "").lower(),
callback=dm.views.export_task_annotations,
format_name=format_name,
filename=request.query_params.get("filename", "").lower(),
)
else:
data = dm.task.get_task_data(pk)
serializer = LabeledDataSerializer(data=data)
if serializer.is_valid(raise_exception=True):
return Response(serializer.data)
elif request.method == 'PUT':
format_name = request.query_params.get('format')
if format_name:
return _import_annotations(
request=request,
rq_id="{}@/api/v1/tasks/{}/annotations/upload".format(request.user, pk),
rq_func=dm.task.import_task_annotations,
pk=pk,
format_name=format_name,
)
else:
serializer = LabeledDataSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
data = dm.task.put_task_data(pk, serializer.data)
return Response(data)
elif request.method == 'DELETE':
dm.task.delete_task_data(pk)
return Response(status=status.HTTP_204_NO_CONTENT)
elif request.method == 'PATCH':
action = self.request.query_params.get("action", None)
if action not in dm.task.PatchAction.values():
raise serializers.ValidationError(
"Please specify a correct 'action' for the request")
serializer = LabeledDataSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
try:
data = dm.task.patch_task_data(pk, serializer.data, action)
except (AttributeError, IntegrityError) as e:
return Response(data=str(e), status=status.HTTP_400_BAD_REQUEST)
return Response(data)
@swagger_auto_schema(method='get', operation_summary='When task is being created the method returns information about a status of the creation process')
@action(detail=True, methods=['GET'], serializer_class=RqStatusSerializer)
def status(self, request, pk):
self.get_object() # force to call check_object_permissions
response = self._get_rq_response(queue="default",
job_id="/api/{}/tasks/{}".format(request.version, pk))
serializer = RqStatusSerializer(data=response)
if serializer.is_valid(raise_exception=True):
return Response(serializer.data)
@staticmethod
def _get_rq_response(queue, job_id):
queue = django_rq.get_queue(queue)
job = queue.fetch_job(job_id)
print("priniting jobs in status",job)
response = {}
if job is None or job.is_finished:
response = { "state": "Finished" }
elif job.is_queued:
response = { "state": "Queued" }
elif job.is_failed:
response = { "state": "Failed", "message": job.exc_info }
else:
response = { "state": "Started" }
if 'status' in job.meta:
response['message'] = job.meta['status']
return response
@staticmethod
@swagger_auto_schema(method='get', operation_summary='Method provides a meta information about media files which are related with the task',
responses={'200': DataMetaSerializer()})
@action(detail=True, methods=['GET'], serializer_class=DataMetaSerializer,
url_path='data/meta')
def data_info(request, pk):
db_task = models.Task.objects.prefetch_related(
Prefetch('data', queryset=models.Data.objects.select_related('video').prefetch_related(
Prefetch('images', queryset=models.Image.objects.prefetch_related('related_files').order_by('frame'))
))
).get(pk=pk)
if hasattr(db_task.data, 'video'):
media = [db_task.data.video]
else:
media = list(db_task.data.images.all())
frame_meta = [{
'width': item.width,
'height': item.height,
'name': item.path,
'has_related_context': hasattr(item, 'related_files') and item.related_files.exists()
} for item in media]
db_data = db_task.data
db_data.frames = frame_meta
serializer = DataMetaSerializer(db_data)
return Response(serializer.data)
@swagger_auto_schema(method='get', operation_summary='Export task as a dataset in a specific format',
manual_parameters=[
openapi.Parameter('format', openapi.IN_QUERY,
description="Desired output format name\nYou can get the list of supported formats at:\n/server/annotation/formats",
type=openapi.TYPE_STRING, required=True),
openapi.Parameter('filename', openapi.IN_QUERY,
description="Desired output file name",
type=openapi.TYPE_STRING, required=False),
openapi.Parameter('action', in_=openapi.IN_QUERY,
description='Used to start downloading process after annotation file had been created',
type=openapi.TYPE_STRING, required=False, enum=['download'])
],
responses={'202': openapi.Response(description='Exporting has been started'),
'201': openapi.Response(description='Output file is ready for downloading'),
'200': openapi.Response(description='Download of file started'),
'405': openapi.Response(description='Format is not available'),
}
)
@action(detail=True, methods=['GET'], serializer_class=None,
url_path='dataset')
def dataset_export(self, request, pk):
db_task = self.get_object() # force to call check_object_permissions
format_name = request.query_params.get("format", "")
return _export_annotations(db_instance=db_task,
rq_id="/api/v1/tasks/{}/dataset/{}".format(pk, format_name),
request=request,
action=request.query_params.get("action", "").lower(),
callback=dm.views.export_task_as_dataset,
format_name=format_name,
filename=request.query_params.get("filename", "").lower(),
)
@method_decorator(name='retrieve', decorator=swagger_auto_schema(operation_summary='Method returns details of a job'))
@method_decorator(name='update', decorator=swagger_auto_schema(operation_summary='Method updates a job by id'))
@method_decorator(name='partial_update', decorator=swagger_auto_schema(
operation_summary='Methods does a partial update of chosen fields in a job'))
class JobViewSet(viewsets.GenericViewSet,
mixins.RetrieveModelMixin, mixins.UpdateModelMixin):
queryset = Job.objects.all().order_by('id')
serializer_class = JobSerializer
def get_permissions(self):
http_method = self.request.method
permissions = [IsAuthenticated]
if http_method in SAFE_METHODS:
permissions.append(auth.JobAccessPermission)
elif http_method in ['PATCH', 'PUT', 'DELETE']:
# assignee_id = json.loads(self.request.body).get('assignee_id',False)
# reviewer_id = json.loads(self.request.body).get('reviewer_id',False)
# msg = MIMEMultipart('alternative')
# msg['From'] = EMAIL_HOST_USER
# mess = ''
# job_id = self.kwargs['pk']
# segment_id = Job.objects.filter(id=job_id).values('segment_id')[0].get('segment_id')
# task_id =Segment.objects.filter(id=segment_id).values('task_id')[0].get('task_id')
# if assignee_id:
# assignee_queryset = User.objects.filter(id=assignee_id).values()[0]
# assignee_email, assignee_username = assignee_queryset.get('email'),assignee_queryset.get('username')
# msg['Subject'] = "Job assignment notification"
# msg['To'] = assignee_email
# mess = admin_annot.format(assignee_username,task_id, job_id)
# elif reviewer_id:
# reviewer_queryset = User.objects.filter(id=reviewer_id).values()[0]
# reviewer_email, reviewer_username = reviewer_queryset.get('email'),reviewer_queryset.get('username')
# msg['Subject'] = "Job Review notification"
# msg['To'] = reviewer_email
# mess = annot_review.format(reviewer_username,task_id,job_id)
# part = MIMEText(mess, 'html')
# msg.attach(part)
# port = 465
# context = ssl.create_default_context()
# with smtplib.SMTP_SSL("smtp.gmail.com", port, context=context) as server:
# server.login(EMAIL_HOST_USER,EMAIL_HOST_PASSWORD)
# if msg.get('To') is not None:
# server.sendmail(EMAIL_HOST_USER, msg.get('To'), msg.as_string())
# server.quit()
permissions.append(auth.JobChangePermission)
else:
permissions.append(auth.AdminRolePermission)
return [perm() for perm in permissions]
@swagger_auto_schema(method='get', operation_summary='Method returns annotations for a specific job')
@swagger_auto_schema(method='put', operation_summary='Method performs an update of all annotations in a specific job')
@swagger_auto_schema(method='patch', manual_parameters=[
openapi.Parameter('action', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=True,
enum=['create', 'update', 'delete'])],
operation_summary='Method performs a partial update of annotations in a specific job')
@swagger_auto_schema(method='delete', operation_summary='Method deletes all annotations for a specific job')
@action(detail=True, methods=['GET', 'DELETE', 'PUT', 'PATCH'],
serializer_class=LabeledDataSerializer)
def annotations(self, request, pk):
self.get_object() # force to call check_object_permissions
if request.method == 'GET':
data = dm.task.get_job_data(pk)
return Response(data)
elif request.method == 'PUT':
format_name = request.query_params.get("format", "")
if format_name:
return _import_annotations(
request=request,
rq_id="{}@/api/v1/jobs/{}/annotations/upload".format(request.user, pk),
rq_func=dm.task.import_job_annotations,
pk=pk,
format_name=format_name
)
else:
serializer = LabeledDataSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
try:
data = dm.task.put_job_data(pk, serializer.data)
except (AttributeError, IntegrityError) as e:
return Response(data=str(e), status=status.HTTP_400_BAD_REQUEST)
return Response(data)
elif request.method == 'DELETE':
dm.task.delete_job_data(pk)
return Response(status=status.HTTP_204_NO_CONTENT)
elif request.method == 'PATCH':
action = self.request.query_params.get("action", None)
if action not in dm.task.PatchAction.values():
raise serializers.ValidationError(
"Please specify a correct 'action' for the request")
serializer = LabeledDataSerializer(data=request.data)
#print("985",serializer)
if serializer.is_valid(raise_exception=True):
try:
data = dm.task.patch_job_data(pk, serializer.data, action)
except (AttributeError, IntegrityError) as e:
return Response(data=str(e), status=status.HTTP_400_BAD_REQUEST)
return Response(data)
@swagger_auto_schema(method='get', operation_summary='Method returns list of reviews for the job',
responses={'200': ReviewSerializer(many=True)}
)
@action(detail=True, methods=['GET'], serializer_class=ReviewSerializer)
def reviews(self, request, pk):
db_job = self.get_object()
queryset = db_job.review_set
serializer = ReviewSerializer(queryset, context={'request': request}, many=True)
return Response(serializer.data)
@swagger_auto_schema(method='get', operation_summary='Method returns list of issues for the job',
responses={'200': CombinedIssueSerializer(many=True)}
)
@action(detail=True, methods=['GET'], serializer_class=CombinedIssueSerializer)
def issues(self, request, pk):
db_job = self.get_object()
queryset = db_job.issue_set
serializer = CombinedIssueSerializer(queryset, context={'request': request}, many=True)
return Response(serializer.data)
@method_decorator(name='create', decorator=swagger_auto_schema(operation_summary='Submit a review for a job'))
@method_decorator(name='destroy', decorator=swagger_auto_schema(operation_summary='Method removes a review from a job'))
class ReviewViewSet(viewsets.GenericViewSet, mixins.DestroyModelMixin, mixins.CreateModelMixin):
queryset = Review.objects.all().order_by('id')
def get_serializer_class(self):
if self.request.method == 'POST':
return CombinedReviewSerializer
else:
return ReviewSerializer
def get_permissions(self):
permissions = [IsAuthenticated]
if self.request.method == 'POST':
permissions.append(auth.JobReviewPermission)
else:
permissions.append(auth.AdminRolePermission)
return [perm() for perm in permissions]
def create(self, request, *args, **kwargs):
job_id = request.data['job']
print("line",job_id)
db_job = get_object_or_404(Job, pk=job_id)
self.check_object_permissions(self.request, db_job)
if request.data['status'] == ReviewStatus.REVIEW_FURTHER:
if 'reviewer_id' not in request.data:
return Response('Must provide a new reviewer', status=status.HTTP_400_BAD_REQUEST)
reviewer_id = request.data['reviewer_id']
reviewer = get_object_or_404(User, pk=reviewer_id)
request.data.update({
'reviewer_id': request.user.id,
})
if db_job.assignee:
request.data.update({
'assignee_id': db_job.assignee.id,
})
issue_set = request.data['issue_set']
for issue in issue_set:
issue['job'] = db_job.id
issue['owner_id'] = request.user.id
comment_set = issue['comment_set']
for comment in comment_set:
comment['author_id'] = request.user.id
serializer = self.get_serializer(data=request.data, partial=True)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
if serializer.data['status'] == ReviewStatus.ACCEPTED:
db_job.status = StatusChoice.COMPLETED
db_job.save()
elif serializer.data['status'] == ReviewStatus.REJECTED:
db_job.status = StatusChoice.ANNOTATION
db_job.save()
else:
db_job.reviewer = reviewer
db_job.save()
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
@method_decorator(name='destroy', decorator=swagger_auto_schema(operation_summary='Method removes an issue from a job'))
@method_decorator(name='partial_update', decorator=swagger_auto_schema(operation_summary='Method updates an issue. It is used to resolve/reopen an issue'))
class IssueViewSet(viewsets.GenericViewSet, mixins.DestroyModelMixin, mixins.UpdateModelMixin):
queryset = Issue.objects.all().order_by('id')
http_method_names = ['get', 'patch', 'delete', 'options']
def get_serializer_class(self):
return IssueSerializer
def partial_update(self, request, *args, **kwargs):
db_issue = self.get_object()
if 'resolver_id' in request.data and request.data['resolver_id'] and db_issue.resolver is None:
# resolve
db_issue.resolver = request.user
db_issue.resolved_date = datetime.now()
db_issue.save(update_fields=['resolver', 'resolved_date'])
elif 'resolver_id' in request.data and not request.data['resolver_id'] and db_issue.resolver is not None:
# reopen
db_issue.resolver = None
db_issue.resolved_date = None
db_issue.save(update_fields=['resolver', 'resolved_date'])
serializer = self.get_serializer(db_issue)
return Response(serializer.data)
def get_permissions(self):
http_method = self.request.method
permissions = [IsAuthenticated]
if http_method in SAFE_METHODS:
permissions.append(auth.IssueAccessPermission)
elif http_method in ['DELETE']:
permissions.append(auth.IssueDestroyPermission)
elif http_method in ['PATCH']:
permissions.append(auth.IssueChangePermission)
else:
permissions.append(auth.AdminRolePermission)
return [perm() for perm in permissions]
@swagger_auto_schema(method='get', operation_summary='The action returns all comments of a specific issue',
responses={'200': CommentSerializer(many=True)}
)
@action(detail=True, methods=['GET'], serializer_class=CommentSerializer)
def comments(self, request, pk):
db_issue = self.get_object()
queryset = db_issue.comment_set
serializer = CommentSerializer(queryset, context={'request': request}, many=True)
return Response(serializer.data)
@method_decorator(name='partial_update', decorator=swagger_auto_schema(operation_summary='Method updates comment in an issue'))
@method_decorator(name='destroy', decorator=swagger_auto_schema(operation_summary='Method removes a comment from an issue'))
class CommentViewSet(viewsets.GenericViewSet,
mixins.DestroyModelMixin, mixins.UpdateModelMixin, mixins.CreateModelMixin):
queryset = Comment.objects.all().order_by('id')
serializer_class = CommentSerializer
http_method_names = ['get', 'post', 'patch', 'delete', 'options']
def create(self, request, *args, **kwargs):
request.data.update({
'author_id': request.user.id,
})
issue_id = request.data['issue']
db_issue = get_object_or_404(Issue, pk=issue_id)
self.check_object_permissions(self.request, db_issue.job)
return super().create(request, args, kwargs)
def get_permissions(self):
http_method = self.request.method
permissions = [IsAuthenticated]
if http_method in ['PATCH', 'DELETE']:
permissions.append(auth.CommentChangePermission)
elif http_method in ['POST']:
permissions.append(auth.CommentCreatePermission)
else:
permissions.append(auth.AdminRolePermission)
return [perm() for perm in permissions]
class UserFilter(filters.FilterSet):
class Meta:
model = User
fields = ("id", "is_active")
@method_decorator(name='list', decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter('id',openapi.IN_QUERY,description="A unique number value identifying this user",type=openapi.TYPE_NUMBER),
openapi.Parameter('is_active',openapi.IN_QUERY,description="Returns only active users",type=openapi.TYPE_BOOLEAN),
],
operation_summary='Method provides a paginated list of users registered on the server'))
@method_decorator(name='retrieve', decorator=swagger_auto_schema(
operation_summary='Method provides information of a specific user'))
@method_decorator(name='partial_update', decorator=swagger_auto_schema(
operation_summary='Method updates chosen fields of a user'))
@method_decorator(name='destroy', decorator=swagger_auto_schema(
operation_summary='Method deletes a specific user from the server'))
class UserViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
mixins.RetrieveModelMixin, mixins.UpdateModelMixin, mixins.DestroyModelMixin):
#queryset = User.objects.prefetch_related('groups').all().order_by('id')
print("line 1148")
queryset = User.objects.all().order_by('id')
http_method_names = ['get', 'post', 'head', 'patch', 'delete']
search_fields = ('username', 'first_name', 'last_name', 'email')
filterset_class = UserFilter
def get_serializer_class(self):
print("line 1155")
user = self.request.user
if user.is_staff:
return UserSerializer
else:
print("line 1160")
is_self = int(self.kwargs.get("pk", 0)) == user.id or \
self.action == "self"
if is_self and self.request.method in SAFE_METHODS:
return UserSerializer
else:
return BasicUserSerializer
def get_permissions(self):
permissions = [IsAuthenticated]
user = self.request.user
if not self.request.method in SAFE_METHODS:
is_self = int(self.kwargs.get("pk", 0)) == user.id
if not is_self:
permissions.append(auth.AdminRolePermission)
return [perm() for perm in permissions]
@swagger_auto_schema(method='get', operation_summary='Method returns an instance of a user who is currently authorized')
@action(detail=False, methods=['GET'])
def self(self, request):
"""
Method returns an instance of a user who is currently authorized
"""
serializer_class = self.get_serializer_class()
serializer = serializer_class(request.user, context={ "request": request })
# """code changes for email verification required by Savita and Kalaiselvi"""
# emailid = serializer.data['email']
# isverified = EmailAddress.objects.filter(email=emailid).values('email','verified')
agreemt = UserAgreementStatus.objects.filter(user=request.user).values('accepted_status','accepted_date')
response = Response(serializer.data)
# print(response.data.get('is_superuser'))
# if response.data.get('is_superuser'):
response.data['email_verification_required']= True
# try:
# response.data['email_verification_required']= isverified[0].get('verified')
# except:
# pass
try:
response.data['accepted_status']= agreemt[0].get('accepted_status')
response.data['accepted_date']= agreemt[0].get('accepted_date')
except:
pass
return response
class RedefineDescriptionField(FieldInspector):
# pylint: disable=no-self-use
def process_result(self, result, method_name, obj, **kwargs):
if isinstance(result, openapi.Schema):
if hasattr(result, 'title') and result.title == 'Specific attributes':
result.description = 'structure like key1=value1&key2=value2\n' \
'supported: range=aws_range'
print("result1320,,,,,", result)
return result
class CloudStorageFilter(filters.FilterSet):
display_name = filters.CharFilter(field_name='display_name', lookup_expr='icontains')
provider_type = filters.CharFilter(field_name='provider_type', lookup_expr='icontains')
resource = filters.CharFilter(field_name='resource', lookup_expr='icontains')
credentials_type = filters.CharFilter(field_name='credentials_type', lookup_expr='icontains')
description = filters.CharFilter(field_name='description', lookup_expr='icontains')
owner = filters.CharFilter(field_name='owner__username', lookup_expr='icontains')
class Meta:
model = models.CloudStorage
fields = ('id', 'display_name', 'provider_type', 'resource', 'credentials_type', 'description', 'owner')
@method_decorator(
name='retrieve',
decorator=swagger_auto_schema(
operation_summary='Method returns details of a specific cloud storage',
responses={
'200': openapi.Response(description='A details of a storage'),
},
tags=['cloud storages']
)
)
@method_decorator(name='list', decorator=swagger_auto_schema(
operation_summary='Returns a paginated list of storages according to query parameters',
manual_parameters=[
openapi.Parameter('provider_type', openapi.IN_QUERY, description="A supported provider of cloud storages",
type=openapi.TYPE_STRING, enum=CloudProviderChoice.list()),
openapi.Parameter('display_name', openapi.IN_QUERY, description="A display name of storage", type=openapi.TYPE_STRING),
openapi.Parameter('resource', openapi.IN_QUERY, description="A name of bucket or container", type=openapi.TYPE_STRING),
openapi.Parameter('owner', openapi.IN_QUERY, description="A resource owner", type=openapi.TYPE_STRING),
openapi.Parameter('credentials_type', openapi.IN_QUERY, description="A type of a granting access", type=openapi.TYPE_STRING, enum=CredentialsTypeChoice.list()),
],
responses={'200': BaseCloudStorageSerializer(many=True)},
tags=['cloud storages'],
field_inspectors=[RedefineDescriptionField]
)
)
@method_decorator(name='destroy', decorator=swagger_auto_schema(
operation_summary='Method deletes a specific cloud storage',
tags=['cloud storages']
)
)
@method_decorator(name='partial_update', decorator=swagger_auto_schema(
operation_summary='Methods does a partial update of chosen fields in a cloud storage instance',
tags=['cloud storages'],
field_inspectors=[RedefineDescriptionField]
)
)
class CloudStorageViewSet(auth.CloudStorageGetQuerySetMixin, viewsets.ModelViewSet):
http_method_names = ['get', 'post', 'patch', 'delete']
queryset = CloudStorageModel.objects.all().prefetch_related('data').order_by('-id')
search_fields = ('provider_type', 'display_name', 'resource', 'credentials_type', 'owner__username', 'description')
filterset_class = CloudStorageFilter
def get_permissions(self):
http_method = self.request.method
permissions = [IsAuthenticated]
if http_method in SAFE_METHODS:
permissions.append(auth.CloudStorageAccessPermission)
elif http_method in ("POST", "PATCH", "DELETE"):
permissions.append(auth.CloudStorageChangePermission)
else:
permissions.append(auth.AdminRolePermission)
return [perm() for perm in permissions]
def get_serializer_class(self):
if self.request.method in ("POST", "PATCH"):
return CloudStorageSerializer
else:
return BaseCloudStorageSerializer
def get_queryset(self):
queryset = super().get_queryset()
provider_type = self.request.query_params.get('provider_type', None)
if provider_type:
if provider_type in CloudProviderChoice.list():
return queryset.filter(provider_type=provider_type)
raise ValidationError('Unsupported type of cloud provider')
return queryset
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
def perform_destroy(self, instance):
cloud_storage_dirname = instance.get_storage_dirname()
super().perform_destroy(instance)
shutil.rmtree(cloud_storage_dirname, ignore_errors=True)
@method_decorator(name='create', decorator=swagger_auto_schema(
operation_summary='Method creates a cloud storage with a specified characteristics',
responses={
'201': openapi.Response(description='A storage has beed created')
},
tags=['cloud storages'],
field_inspectors=[RedefineDescriptionField],
)
)
def create(self, request, *args, **kwargs):
try:
response = super().create(request, *args, **kwargs)
except IntegrityError:
response = HttpResponseBadRequest('Same storage already exists')
except ValidationError as exceptions:
msg_body = ""
for ex in exceptions.args:
for field, ex_msg in ex.items():
msg_body += ': '.join([field, ex_msg if isinstance(ex_msg, str) else str(ex_msg[0])])
msg_body += '\n'
return HttpResponseBadRequest(msg_body)
except APIException as ex:
return Response(data=ex.get_full_details(), status=ex.status_code)
except Exception as ex:
response = HttpResponseBadRequest(str(ex))
return response
@swagger_auto_schema(
method='get',
operation_summary='Method returns a manifest content',
manual_parameters=[
openapi.Parameter('manifest_path', openapi.IN_QUERY,
description="Path to the manifest file in a cloud storage",
type=openapi.TYPE_STRING)
],
responses={
'200': openapi.Response(description='A manifest content'),
},
tags=['cloud storages']
)
@action(detail=True, methods=['GET'], url_path='content')
def content(self, request, pk):
try:
db_storage = CloudStorageModel.objects.get(pk=pk)
credentials = Credentials()
credentials.convert_from_db({
'type': db_storage.credentials_type,
'value': db_storage.credentials,
})
details = {
'resource': db_storage.resource,
'credentials': credentials,
'specific_attributes': db_storage.get_specific_attributes()
}
storage = get_cloud_storage_instance(cloud_provider=db_storage.provider_type, **details)
if not db_storage.manifests.count():
raise Exception('There is no manifest file')
manifest_path = request.query_params.get('manifest_path', 'manifest.jsonl')
file_status = storage.get_file_status(manifest_path)
if file_status == Status.NOT_FOUND:
raise FileNotFoundError(errno.ENOENT,
"Not found on the cloud storage {}".format(db_storage.display_name), manifest_path)
elif file_status == Status.FORBIDDEN:
raise PermissionError(errno.EACCES,
"Access to the file on the '{}' cloud storage is denied".format(db_storage.display_name), manifest_path)
full_manifest_path = os.path.join(db_storage.get_storage_dirname(), manifest_path)
if not os.path.exists(full_manifest_path) or \
datetime.utcfromtimestamp(os.path.getmtime(full_manifest_path)).replace(tzinfo=pytz.UTC) < storage.get_file_last_modified(manifest_path):
storage.download_file(manifest_path, full_manifest_path)
manifest = ImageManifestManager(full_manifest_path, db_storage.get_storage_dirname())
# need to update index
manifest.set_index()
manifest_files = manifest.data
return Response(data=manifest_files, content_type="text/plain")
except CloudStorageModel.DoesNotExist:
message = f"Storage {pk} does not exist"
slogger.glob.error(message)
return HttpResponseNotFound(message)
except FileNotFoundError as ex:
msg = f"{ex.strerror} {ex.filename}"
slogger.cloud_storage[pk].info(msg)
return Response(data=msg, status=status.HTTP_404_NOT_FOUND)
except Exception as ex:
# check that cloud storage was not deleted
storage_status = storage.get_status()
if storage_status == Status.FORBIDDEN:
msg = 'The resource {} is no longer available. Access forbidden.'.format(storage.name)
elif storage_status == Status.NOT_FOUND:
msg = 'The resource {} not found. It may have been deleted.'.format(storage.name)
else:
msg = str(ex)
return HttpResponseBadRequest(msg)
@swagger_auto_schema(
method='get',
operation_summary='Method returns a preview image from a cloud storage',
responses={
'200': openapi.Response(description='Preview'),
},
tags=['cloud storages']
)
@action(detail=True, methods=['GET'], url_path='preview')
def preview(self, request, pk):
try:
db_storage = CloudStorageModel.objects.get(pk=pk)
if not os.path.exists(db_storage.get_preview_path()):
credentials = Credentials()
credentials.convert_from_db({
'type': db_storage.credentials_type,
'value': db_storage.credentials,
})
details = {
'resource': db_storage.resource,
'credentials': credentials,
'specific_attributes': db_storage.get_specific_attributes()
}
storage = get_cloud_storage_instance(cloud_provider=db_storage.provider_type, **details)
if not db_storage.manifests.count():
raise Exception('Cannot get the cloud storage preview. There is no manifest file')
preview_path = None
for manifest_model in db_storage.manifests.all():
full_manifest_path = os.path.join(db_storage.get_storage_dirname(), manifest_model.filename)
if not os.path.exists(full_manifest_path) or \
datetime.utcfromtimestamp(os.path.getmtime(full_manifest_path)).replace(tzinfo=pytz.UTC) < storage.get_file_last_modified(manifest_model.filename):
storage.download_file(manifest_model.filename, full_manifest_path)
manifest = ImageManifestManager(
os.path.join(db_storage.get_storage_dirname(), manifest_model.filename),
db_storage.get_storage_dirname()
)
# need to update index
manifest.set_index()
if not len(manifest):
continue
preview_info = manifest[0]
preview_path = ''.join([preview_info['name'], preview_info['extension']])
break
if not preview_path:
msg = 'Cloud storage {} does not contain any images'.format(pk)
slogger.cloud_storage[pk].info(msg)
return HttpResponseBadRequest(msg)
file_status = storage.get_file_status(preview_path)
if file_status == Status.NOT_FOUND:
raise FileNotFoundError(errno.ENOENT,
"Not found on the cloud storage {}".format(db_storage.display_name), preview_path)
elif file_status == Status.FORBIDDEN:
raise PermissionError(errno.EACCES,
"Access to the file on the '{}' cloud storage is denied".format(db_storage.display_name), preview_path)
with NamedTemporaryFile() as temp_image:
storage.download_file(preview_path, temp_image.name)
reader = ImageListReader([temp_image.name])
preview = reader.get_preview()
preview.save(db_storage.get_preview_path())
content_type = mimetypes.guess_type(db_storage.get_preview_path())[0]
return HttpResponse(open(db_storage.get_preview_path(), 'rb').read(), content_type)
except CloudStorageModel.DoesNotExist:
message = f"Storage {pk} does not exist"
slogger.glob.error(message)
return HttpResponseNotFound(message)
except Exception as ex:
# check that cloud storage was not deleted
storage_status = storage.get_status()
if storage_status == Status.FORBIDDEN:
msg = 'The resource {} is no longer available. Access forbidden.'.format(storage.name)
elif storage_status == Status.NOT_FOUND:
msg = 'The resource {} not found. It may have been deleted.'.format(storage.name)
else:
msg = str(ex)
return HttpResponseBadRequest(msg)
@swagger_auto_schema(
method='get',
operation_summary='Method returns a cloud storage status',
responses={
'200': openapi.Response(description='Status'),
},
tags=['cloud storages']
)
@action(detail=True, methods=['GET'], url_path='status')
def status(self, request, pk):
try:
db_storage = CloudStorageModel.objects.get(pk=pk)
credentials = Credentials()
credentials.convert_from_db({
'type': db_storage.credentials_type,
'value': db_storage.credentials,
})
details = {
'resource': db_storage.resource,
'credentials': credentials,
'specific_attributes': db_storage.get_specific_attributes()
}
storage = get_cloud_storage_instance(cloud_provider=db_storage.provider_type, **details)
storage_status = storage.get_status()
return HttpResponse(storage_status)
except CloudStorageModel.DoesNotExist:
message = f"Storage {pk} does not exist"
slogger.glob.error(message)
return HttpResponseNotFound(message)
except Exception as ex:
msg = str(ex)
return HttpResponseBadRequest(msg)
def rq_handler(job, exc_type, exc_value, tb):
job.exc_info = "".join(
traceback.format_exception_only(exc_type, exc_value))
job.save()
if "tasks" in job.id.split("/"):
return task.rq_handler(job, exc_type, exc_value, tb)
return True
# TODO: Method should be reimplemented as a separated view
# @swagger_auto_schema(method='put', manual_parameters=[openapi.Parameter('format', in_=openapi.IN_QUERY,
# description='A name of a loader\nYou can get annotation loaders from this API:\n/server/annotation/formats',
# required=True, type=openapi.TYPE_STRING)],
# operation_summary='Method allows to upload annotations',
# responses={'202': openapi.Response(description='Load of annotations has been started'),
# '201': openapi.Response(description='Annotations have been uploaded')},
# tags=['tasks'])
# @api_view(['PUT'])
def _import_annotations(request, rq_id, rq_func, pk, format_name):
format_desc = {f.DISPLAY_NAME: f
for f in dm.views.get_import_formats()}.get(format_name)
if format_desc is None:
raise serializers.ValidationError(
"Unknown input format '{}'".format(format_name))
elif not format_desc.ENABLED:
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
queue = django_rq.get_queue("default")
rq_job = queue.fetch_job(rq_id)
if not rq_job:
serializer = AnnotationFileSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
anno_file = serializer.validated_data['annotation_file']
fd, filename = mkstemp(prefix='cvat_{}'.format(pk))
with open(filename, 'wb+') as f:
for chunk in anno_file.chunks():
f.write(chunk)
av_scan_paths(filename)
rq_job = queue.enqueue_call(
func=rq_func,
args=(pk, filename, format_name),
job_id=rq_id
)
rq_job.meta['tmp_file'] = filename
rq_job.meta['tmp_file_descriptor'] = fd
rq_job.save_meta()
else:
if rq_job.is_finished:
os.close(rq_job.meta['tmp_file_descriptor'])
os.remove(rq_job.meta['tmp_file'])
rq_job.delete()
return Response(status=status.HTTP_201_CREATED)
elif rq_job.is_failed:
os.close(rq_job.meta['tmp_file_descriptor'])
os.remove(rq_job.meta['tmp_file'])
exc_info = str(rq_job.exc_info)
rq_job.delete()
# RQ adds a prefix with exception class name
import_error_prefix = '{}.{}'.format(
CvatImportError.__module__, CvatImportError.__name__)
if exc_info.startswith(import_error_prefix):
exc_info = exc_info.replace(import_error_prefix + ': ', '')
return Response(data=exc_info,
status=status.HTTP_400_BAD_REQUEST)
else:
return Response(data=exc_info,
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response(status=status.HTTP_202_ACCEPTED)
def _export_annotations(db_instance, rq_id, request, format_name, action, callback, filename):
if action not in {"", "download"}:
raise serializers.ValidationError(
"Unexpected action specified for the request")
format_desc = {f.DISPLAY_NAME: f
for f in dm.views.get_export_formats()}.get(format_name)
if format_desc is None:
raise serializers.ValidationError(
"Unknown format specified for the request")
elif not format_desc.ENABLED:
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
queue = django_rq.get_queue("default")
rq_job = queue.fetch_job(rq_id)
if rq_job:
last_instance_update_time = timezone.localtime(db_instance.updated_date)
if isinstance(db_instance, Project):
tasks_update = list(map(lambda db_task: timezone.localtime(db_task.updated_date), db_instance.tasks.all()))
last_instance_update_time = max(tasks_update + [last_instance_update_time])
request_time = rq_job.meta.get('request_time', None)
if request_time is None or request_time < last_instance_update_time:
rq_job.cancel()
rq_job.delete()
else:
if rq_job.is_finished:
file_path = rq_job.return_value
if action == "download" and osp.exists(file_path):
rq_job.delete()
timestamp = datetime.strftime(last_instance_update_time,
"%Y_%m_%d_%H_%M_%S")
filename = filename or \
"{}_{}-{}-{}{}".format(
"project" if isinstance(db_instance, models.Project) else "task",
db_instance.name, timestamp,
format_name, osp.splitext(file_path)[1]
)
return sendfile(request, file_path, attachment=True,
attachment_filename=filename.lower())
else:
if osp.exists(file_path):
return Response(status=status.HTTP_201_CREATED)
elif rq_job.is_failed:
exc_info = str(rq_job.exc_info)
rq_job.delete()
return Response(exc_info,
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response(status=status.HTTP_202_ACCEPTED)
try:
if request.scheme:
server_address = request.scheme + '://'
server_address += request.get_host()
except Exception:
server_address = None
ttl = (dm.views.PROJECT_CACHE_TTL if isinstance(db_instance, Project) else dm.views.TASK_CACHE_TTL).total_seconds()
queue.enqueue_call(func=callback,
args=(db_instance.id, format_name, server_address), job_id=rq_id,
meta={ 'request_time': timezone.localtime() },
result_ttl=ttl, failure_ttl=ttl)
return Response(status=status.HTTP_202_ACCEPTED)
|
[] |
[] |
[
"CVAT_ANALYTICS",
"CVAT_SERVERLESS"
] |
[]
|
["CVAT_ANALYTICS", "CVAT_SERVERLESS"]
|
python
| 2 | 0 | |
AwesomeGreBot.py
|
#!/usr/bin/env python
import sys
import random
from twython import Twython
from wordnik import *
import configparser
import logging
import os
from bs4 import BeautifulSoup
def getANewWord(fileToUse):
lineToUse = random.choice(open(fileToUse).readlines())
lineToUse = lineToUse.split(' ')
wordToUse = lineToUse[0]
return wordToUse
def findDefinition(wordToFind):
# wordnik authentication
wordnikApiUrl = 'http://api.wordnik.com/v4'
wordnikApiKey = os.environ['WORDNIK_APIKEY']
client = swagger.ApiClient(wordnikApiKey, wordnikApiUrl)
wordConnection = WordApi.WordApi(client)
example = wordConnection.getExamples(word=wordToFind,limit=3)
example = example.examples
defn = wordConnection.getDefinitions(word=wordToFind,sourceDictionaries='wiktionary')
return (defn,example)
def tweetDefn(word,defn,api):
arr = ["You must be knowing","Just realized","Did u know","Dictionary says","Pata hai"," "]
prefix = random.choice(arr)
soup = BeautifulSoup(defn[0].text, "html.parser")
definition_text = soup.get_text()
tweet = prefix+" #"+word+" means "+abbreviatePoS(defn[0].partOfSpeech)+' '+definition_text+' #gre #vocabulary #word'
if len(tweet) > 260:
tweet1 = tweet[:260]+' (1/2)'
api.update_status(status=tweet1)
tweet2 = tweet[260:]+' (2/2) #'+word
api.update_status(status=tweet2)
else:
api.update_status(status=tweet)
print("Tweeted another word!")
def abbreviatePoS(partOfSpeech):
if partOfSpeech == 'noun':
return '(n.)'
elif partOfSpeech == 'adjective':
return '(adj.)'
elif partOfSpeech == 'verb':
return '(v.)'
def DMdef(word,defn,example):
#arr = ["You must be knowing","Just realized","Did you know","Dictionary says","Pata hai"," "]
#prefix = random.choice(arr)
message = '#'+word.upper()+": \n--------------------------------\n"
for definition in defn:
message = message+abbreviatePoS(definition.partOfSpeech)+' '+definition.text+"\n\n"
#tweet = prefix+" "+word+" means "+defn+" \n\n"+"EXAMPLE :- "+example+"\n"
message=message+"\nEXAMPLES: \n--------------------------\n"
for eg in example:
if len(eg.text)<=200:
message = message+eg.text+"\n\n"
#message=message+"----------------------"
api.send_direct_message(screen_name=config['twitter']['receiverTwitterUsername'],text=message)
logging.info("DMs Sent.")
def tweetANewWord():
# Twython authentication
# your twitter consumer and access information goes here
apiKey = os.environ['TWITTER_APIKEY']
apiSecret = os.environ['TWITTER_APISECRET']
accessToken = os.environ['TWITTER_ACCESSTOKEN']
accessTokenSecret = os.environ['TWITTER_ACCESSTOKENSECRET']
api = Twython(apiKey,apiSecret,accessToken,accessTokenSecret)
print("api for twython done bro")
# LOGIC!!!!
filesAvailable = ['words.txt','wordsPrinceton.txt']
fileToUse = random.choice(filesAvailable)
print("will be using {} file.".format(fileToUse))
wordToUse = getANewWord(fileToUse)
wordToUse = wordToUse.strip()
print("wordToUse is {}".format(wordToUse))
definition,example = findDefinition(wordToUse)
#DMdef(wordToUse,definition,example)
tweetDefn(wordToUse,definition,api)
print("Done.")
if __name__=="__main__":
tweetANewWord()
|
[] |
[] |
[
"TWITTER_ACCESSTOKEN",
"TWITTER_ACCESSTOKENSECRET",
"TWITTER_APIKEY",
"WORDNIK_APIKEY",
"TWITTER_APISECRET"
] |
[]
|
["TWITTER_ACCESSTOKEN", "TWITTER_ACCESSTOKENSECRET", "TWITTER_APIKEY", "WORDNIK_APIKEY", "TWITTER_APISECRET"]
|
python
| 5 | 0 | |
telegram/config.py
|
"""This module provides telegram notifier configs."""
import os
NOTIFIER_INTERVAL = 10
# SQS stuff
SQS_MAX_MESSAGES = 10
SQS_QUEUE_NAME = "telegram-notifications"
# Telegram stuff
TELEGRAM_BOT_TOKEN = os.environ["TELEGRAM_BOT_TOKEN"]
TELEGRAM_API = f"https://api.telegram.org/bot{TELEGRAM_BOT_TOKEN}"
# REDIS stuff
REDIS_HOST = os.getenv("REDIS_HOST", "localhost")
REDIS_PORT = int(os.getenv("REDIS_PORT", "6379"))
|
[] |
[] |
[
"TELEGRAM_BOT_TOKEN",
"REDIS_HOST",
"REDIS_PORT"
] |
[]
|
["TELEGRAM_BOT_TOKEN", "REDIS_HOST", "REDIS_PORT"]
|
python
| 3 | 0 | |
context.go
|
package dbot
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/fatih/color"
"github.com/robertkrimen/otto"
"golang.org/x/term"
"gopkg.in/yaml.v2"
)
type Context struct {
parent *Context
runnerGroupMap map[string][]string
runnerMap map[string]Runner
job *Job
rawCmd *Command
runCmd *Command
runners []Runner
path string
file string
}
// NewContext create the root context
func NewContext(file string, jobName string) *Context {
vCtx := &Context{
runnerGroupMap: map[string][]string{
"local": {"local"},
},
runnerMap: map[string]Runner{
"local": &LocalRunner{},
},
path: jobName,
file: "",
runners: []Runner{&LocalRunner{}},
runCmd: &Command{Env: Env{}},
}
ret := vCtx.subContext(&Command{Tag: "job", Exec: jobName, File: file})
ret.parent = nil
return ret
}
// subContext create sub Context
func (p *Context) subContext(rawCmd *Command) *Context {
cmdEnv := p.runCmd.Env.Merge(p.runCmd.Env.ParseEnv(rawCmd.Env))
// Notice: if rawCmd tag is job, then runCmd.Env will change in init func
runCmd := &Command{
Tag: cmdEnv.ParseString(rawCmd.Tag, "cmd", true),
Exec: cmdEnv.ParseString(rawCmd.Exec, "", false),
On: cmdEnv.ParseString(rawCmd.On, "", true),
Stdin: cmdEnv.ParseStringArray(rawCmd.Stdin),
Env: cmdEnv,
Args: cmdEnv.ParseEnv(rawCmd.Args),
File: cmdEnv.ParseString(rawCmd.File, "", true),
}
file := p.file
runners := p.runners
path := p.path
job := p.job
switch runCmd.Tag {
case "cmd", "script":
if len(rawCmd.Args) > 0 {
p.Clone("%s.args", p.path).LogError(
"unsupported args on tag \"%s\"", runCmd.Tag,
)
}
if rawCmd.File != "" {
p.Clone("%s.file", p.path).LogError(
"unsupported file on tag \"%s\"", runCmd.Tag,
)
}
case "job":
if len(rawCmd.Stdin) > 0 {
p.Clone("%s.stdin", p.path).LogError(
"unsupported stdin on tag \"%s\"", runCmd.Tag,
)
}
// Load config
config := make(map[string]*Job)
if runCmd.File != "" {
file = runCmd.File
}
if v, ok := p.Clone("%s.file", file).loadConfig(file, &config); ok {
file = v
} else {
return nil
}
// Check is the job exist
if v, ok := config[runCmd.Exec]; ok {
job = v
} else {
p.Clone("%s.exec", p.path).LogError(
"could not find job \"%s\" in \"%s\"", runCmd.Exec, file,
)
return nil
}
path = runCmd.Exec
runCmd.Env = nil
default:
p.Clone("%s.tag", p.path).LogError("unsupported tag \"%s\"", runCmd.Tag)
return nil
}
// Set runners
if runCmd.On != "" {
if v := p.getRunners(runCmd.On); len(v) > 0 {
runners = v
} else {
return nil
}
}
// clone runnerGroupMap
runnerGroupMap := make(map[string][]string)
for key, value := range p.runnerGroupMap {
runnerGroupMap[key] = value
}
ret := &Context{
runnerGroupMap: runnerGroupMap,
runnerMap: p.runnerMap,
job: job,
parent: p,
rawCmd: rawCmd,
runCmd: runCmd,
runners: runners,
path: path,
file: file,
}
if runCmd.Tag == "job" {
if !ret.initJob() {
return nil
}
}
return ret
}
func (p *Context) initJob() bool {
// init jobEnv
rootEnv := p.getRootEnv()
jobEnv := rootEnv.
Merge(rootEnv.ParseEnv(p.job.Env)).
Merge(p.runCmd.Args)
tmpEnv := jobEnv.Merge(Env{})
for key, it := range p.job.Inputs {
itDesc := tmpEnv.ParseString(it.Desc, "input "+key+": ", false)
itType := tmpEnv.ParseString(it.Type, "text", true)
value, ok := p.Clone("%s.inputs.%s", p.path, key).
GetUserInput(itDesc, itType)
if !ok {
return false
}
jobEnv[key] = tmpEnv.ParseString(value, "", false)
}
p.runCmd.Env = jobEnv
// Load imports
for key, it := range p.job.Imports {
itName := jobEnv.ParseString(it.Name, "", true)
itFile := jobEnv.ParseString(it.File, "", true)
config := make(map[string][]*Remote)
if absFile, ok := p.Clone("%s.imports.%s", p.path, key).
loadConfig(itFile, config); !ok {
return false
} else if item, ok := config[itName]; !ok {
return false
} else {
sshGroup := (&Context{
parent: p,
runnerGroupMap: p.runnerGroupMap,
runnerMap: p.runnerMap,
path: itName,
file: absFile,
runners: p.runners,
}).loadSSHGroup(item, Env{})
if sshGroup == nil {
return false
}
p.runnerGroupMap[key] = sshGroup
}
}
// Load remotes
for key, list := range p.job.Remotes {
sshGroup := p.Clone("%s.remotes.%s", p.path, key).
loadSSHGroup(list, jobEnv)
if sshGroup == nil {
return false
}
p.runnerGroupMap[key] = sshGroup
}
return true
}
func (p *Context) loadSSHGroup(list []*Remote, env Env) []string {
if len(list) == 0 {
p.LogError("list is empty")
return nil
}
ret := make([]string, 0)
for idx, it := range list {
host := env.ParseString(it.Host, "", true)
user := env.ParseString(it.User, os.Getenv("USER"), true)
port := env.ParseString(it.Port, "22", true)
id := fmt.Sprintf("%s@%s:%s", user, host, port)
if _, ok := p.runnerMap[id]; !ok {
ssh := NewSSHRunner(
p.Clone("%s[%d]", p.path, idx), port, user, host,
)
if ssh == nil {
return nil
}
p.runnerMap[id] = ssh
}
ret = append(ret, id)
}
return ret
}
func (p *Context) getRootEnv() Env {
return Env{
"KeyESC": "\033",
"KeyEnter": "\n",
}.Merge(Env{
"ConfigDir": filepath.Dir(p.file),
})
}
func (p *Context) getRunners(runOn string) []Runner {
ret := make([]Runner, 0)
for _, groupName := range strings.Split(runOn, ",") {
if groupName = strings.TrimSpace(groupName); groupName != "" {
runnersName, ok := p.runnerGroupMap[groupName]
if !ok {
p.LogError("could not find group \"%s\"", groupName)
return nil
}
for _, runnerName := range runnersName {
runner, ok := p.runnerMap[runnerName]
if !ok {
p.LogError("could not find runner \"%s\"", runnerName)
return nil
}
ret = append(ret, runner)
}
}
}
if len(ret) == 0 {
p.LogError("could not find any runners")
return nil
}
return ret
}
func (p *Context) Clone(format string, a ...interface{}) *Context {
return &Context{
runnerGroupMap: p.runnerGroupMap,
runnerMap: p.runnerMap,
job: p.job,
parent: p.parent,
rawCmd: p.rawCmd,
runCmd: p.runCmd,
runners: p.runners,
path: fmt.Sprintf(format, a...),
file: p.file,
}
}
func (p *Context) Run() bool {
if len(p.runners) == 0 {
// Check
p.Clone("kernel error: runners must be checked in previous call")
return false
} else if len(p.runners) == 1 {
// If len(p.runners) == 1. Run it
switch p.runCmd.Tag {
case "job":
return p.runJob()
case "cmd":
return p.runCommand()
case "script":
return p.runScript()
default:
p.Clone("kernel error: type must be checked in previous call")
return false
}
} else {
// If len(p.runners) > 1, Split the context by runners.
for _, runner := range p.runners {
ctx := p.Clone(p.path)
ctx.runners = []Runner{runner}
if !ctx.Run() {
return false
}
}
return true
}
}
func (p *Context) runJob() bool {
// If the commands are run in sequence, run them one by one and return
if !p.job.Async {
for i := 0; i < len(p.job.Commands); i++ {
ctx := p.Clone("%s.commands[%d]", p.runCmd.Exec, i).
subContext(p.job.Commands[i])
if ctx == nil {
return false
}
if !ctx.Run() {
return false
}
}
return true
}
// The commands are run async
waitCH := make(chan bool, len(p.job.Commands))
for i := 0; i < len(p.job.Commands); i++ {
go func(idx int) {
ctx := p.Clone("jobs.%s.commands[%d]", p.runCmd.Exec, idx).
subContext(p.job.Commands[idx])
if ctx == nil {
waitCH <- false
} else {
waitCH <- ctx.Run()
}
}(i)
}
// Wait for all commands to complete
ret := true
for i := 0; i < len(p.job.Commands); i++ {
if !<-waitCH {
ret = false
}
}
return ret
}
func (p *Context) runScript() bool {
stdout := &bytes.Buffer{}
stderr := &bytes.Buffer{}
vm := otto.New()
_ = vm.Set("dbot", &DbotObject{
vm: vm,
stdout: stdout,
stderr: stderr,
ctx: p,
seed: 0,
})
_, e := vm.Run(p.runCmd.Exec)
p.Log(stdout.String(), stderr.String())
if e != nil {
p.LogError(e.Error())
}
return e == nil
}
func (p *Context) runCommand() bool {
return p.runners[0].Run(p)
}
func (p *Context) getRunnersName() string {
nameArray := make([]string, 0)
for _, runner := range p.runners {
nameArray = append(nameArray, runner.Name())
}
return strings.Join(nameArray, ",")
}
func (p *Context) loadConfig(path string, v interface{}) (string, bool) {
var fnUnmarshal (func(data []byte, v interface{}) error)
ret := ""
if filepath.IsAbs(path) {
ret = path
} else if p.file == "" {
v, e := filepath.Abs(path)
if e != nil {
p.LogError(e.Error())
}
ret = v
} else {
ret = filepath.Join(filepath.Dir(p.file), path)
}
// If config file is a directory, we try to find default config file
if IsDir(ret) {
yamlFile := filepath.Join(ret, "main.yaml")
ymlFile := filepath.Join(ret, "main.yml")
jsonFile := filepath.Join(ret, "main.json")
if IsFile(yamlFile) {
ret = yamlFile
} else if IsFile(ymlFile) {
ret = ymlFile
} else if IsFile(jsonFile) {
ret = jsonFile
} else {
p.LogError(
"could not find main.yaml or main.yml or main.json "+
"in directory \"%s\"\n",
ret,
)
return "", false
}
}
// Check the file extension, and set corresponding unmarshal func
ext := filepath.Ext(ret)
switch ext {
case ".json":
fnUnmarshal = json.Unmarshal
case ".yml":
fnUnmarshal = yaml.Unmarshal
case ".yaml":
fnUnmarshal = yaml.Unmarshal
default:
p.LogError("unsupported file extension \"%s\"", ret)
return "", false
}
// Read the config file, and unmarshal it to config structure
if b, e := ioutil.ReadFile(ret); e != nil {
p.LogError(e.Error())
return "", false
} else if e := fnUnmarshal(b, v); e != nil {
p.LogError(e.Error())
return "", false
} else {
return ret, true
}
}
func (p *Context) GetUserInput(desc string, mode string) (string, bool) {
switch mode {
case "password":
p.LogInfo("")
p.logRawInfo(desc)
b, e := term.ReadPassword(int(syscall.Stdin))
if e != nil {
p.logRawError(e.Error() + "\n")
return "", false
}
p.logRawInfo("\n")
return string(b), true
case "text":
p.LogInfo("")
p.logRawInfo(desc)
ret := ""
if _, e := fmt.Scanf("%s", &ret); e != nil {
p.logRawError(e.Error() + "\n")
return "", false
}
return ret, true
default:
p.LogError("unsupported mode %s", mode)
return "", false
}
}
func (p *Context) logRawInfo(format string, a ...interface{}) {
log(fmt.Sprintf(format, a...), color.FgBlue)
}
func (p *Context) logRawError(format string, a ...interface{}) {
log(fmt.Sprintf(format, a...), color.FgRed)
}
func (p *Context) LogInfo(format string, a ...interface{}) {
p.Log(fmt.Sprintf(format, a...), "")
}
func (p *Context) LogError(format string, a ...interface{}) {
p.Log("", fmt.Sprintf(format, a...))
}
func (p *Context) Log(outStr string, errStr string) {
logItems := []interface{}{}
logItems = append(logItems, p.getRunnersName())
logItems = append(logItems, color.FgYellow)
logItems = append(logItems, " > ")
logItems = append(logItems, color.FgGreen)
logItems = append(logItems, p.file)
logItems = append(logItems, color.FgYellow)
if p.path != "" {
logItems = append(logItems, " > ")
logItems = append(logItems, color.FgGreen)
logItems = append(logItems, p.path)
logItems = append(logItems, color.FgYellow)
}
logItems = append(logItems, "\n")
logItems = append(logItems, color.FgGreen)
if p.runCmd != nil {
if p.runCmd.Tag != "job" && p.runCmd.Exec != "" {
logItems = append(logItems, GetStandradOut(p.runCmd.Exec))
logItems = append(logItems, color.FgBlue)
}
}
if outStr != "" {
logItems = append(logItems, GetStandradOut(outStr))
logItems = append(logItems, color.FgGreen)
}
if errStr != "" {
logItems = append(logItems, GetStandradOut(errStr))
logItems = append(logItems, color.FgRed)
}
log(logItems...)
}
|
[
"\"USER\""
] |
[] |
[
"USER"
] |
[]
|
["USER"]
|
go
| 1 | 0 | |
tests/test_shells/postproc.py
|
#!/usr/bin/env python
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import socket
import sys
import codecs
import platform
import re
test_root = os.environ['TEST_ROOT']
test_type = sys.argv[1]
test_client = sys.argv[2]
shell = sys.argv[3]
fname = os.path.join(test_root, '.'.join((shell, test_type, test_client, 'full.log')))
new_fname = os.path.join(test_root, '.'.join((shell, test_type, test_client, 'log')))
pid_fname = os.path.join(test_root, '3rd', 'pid')
is_pypy = platform.python_implementation() == 'PyPy'
try:
with open(pid_fname, 'r') as P:
pid = P.read().strip()
except IOError:
pid = None
hostname = socket.gethostname()
user = os.environ['USER']
REFS_RE = re.compile(r'^\[\d+ refs\]\n')
IPYPY_DEANSI_RE = re.compile(r'\033(?:\[(?:\?\d+[lh]|[^a-zA-Z]+[a-ln-zA-Z])|[=>])')
ZSH_HL_RE = re.compile(r'\033\[\?\d+[hl]')
start_str = 'cd "$TEST_ROOT"/3rd'
if shell == 'pdb':
start_str = 'class Foo(object):'
with codecs.open(fname, 'r', encoding='utf-8') as R:
with codecs.open(new_fname, 'w', encoding='utf-8') as W:
found_cd = False
i = -1
for line in (R if shell != 'fish' else R.read().split('\n')):
i += 1
if not found_cd:
found_cd = (start_str in line)
continue
if 'true is the last line' in line:
break
line = line.translate({
ord('\r'): None
})
if REFS_RE.match(line):
continue
line = line.replace(hostname, 'HOSTNAME')
line = line.replace(user, 'USER')
if pid is not None:
line = line.replace(pid, 'PID')
if shell == 'zsh':
line = line.replace('\033[0m\033[23m\033[24m\033[J', '')
line = ZSH_HL_RE.subn('', line)[0]
elif shell == 'fish':
res = ''
try:
while line.index('\033[0;'):
start = line.index('\033[0;')
end = line.index('\033[0m', start)
res += line[start:end + 4] + '\n'
line = line[end + 4:]
except ValueError:
pass
line = res
elif shell == 'tcsh':
try:
start = line.index('\033[0;')
end = line.index(' ', start)
line = line[start:end] + '\n'
except ValueError:
line = ''
elif shell == 'mksh':
# Output is different in travis: on my machine I see full
# command, in travis it is truncated just after `true`.
if line.startswith('[1] + Terminated'):
line = '[1] + Terminated bash -c ...\n'
elif shell == 'dash':
# Position of this line is not stable: it may go both before and
# after the next line
if line.startswith('[1] + Terminated'):
continue
elif shell == 'ipython' and is_pypy:
try:
end_idx = line.rindex('\033[0m')
try:
idx = line[:end_idx].rindex('\033[1;1H')
except ValueError:
idx = line[:end_idx].rindex('\033[?25h')
line = line[idx + len('\033[1;1H'):]
except ValueError:
pass
try:
data_end_idx = line.rindex('\033[1;1H')
line = line[:data_end_idx] + '\n'
except ValueError:
pass
if line == '\033[1;1H\n':
continue
was_empty = line == '\n'
line = IPYPY_DEANSI_RE.subn('', line)[0]
if line == '\n' and not was_empty:
line = ''
elif shell == 'rc':
if line == 'read() failed: Connection reset by peer\n':
line = ''
elif shell == 'pdb':
if is_pypy:
if line == '\033[?1h\033=\033[?25l\033[1A\n':
line = ''
line = IPYPY_DEANSI_RE.subn('', line)[0]
if line == '\n':
line = ''
if line.startswith(('>',)):
line = ''
elif line == '-> self.quitting = 1\n':
line = '-> self.quitting = True\n'
elif line == '\n':
line = ''
if line == '-> self.quitting = True\n':
break
W.write(line)
|
[] |
[] |
[
"TEST_ROOT",
"USER"
] |
[]
|
["TEST_ROOT", "USER"]
|
python
| 2 | 0 | |
pkg/fix_test.go
|
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pkg
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
)
func TestFix(t *testing.T) {
for _, module := range []bool{true, false} {
for _, vendor := range []bool{true, false} {
for _, version := range []string{"v0.17.4", "v0.18.0-rc.1"} {
name := ifelse(module, "mod", "gopath") + "_" + ifelse(vendor, "vendor", "novendor") + "_" + version
t.Run(name, func(t *testing.T) {
defer restoreEnv()()
tempDir, cleanupTempDir := tempDir(t, name)
defer cleanupTempDir()
copyDir(t, "testdata/input/", tempDir)
prepDeps(t, tempDir, module, vendor, version)
subdirs, err := ioutil.ReadDir(filepath.Join(tempDir, "src", "example.com"))
if err != nil {
t.Fatal(err)
}
for _, subdir := range subdirs {
if !subdir.IsDir() || subdir.Name() == "vendor" {
continue
}
t.Run(subdir.Name(), func(t *testing.T) {
runFix(t, filepath.Join(tempDir, "src", "example.com"), fmt.Sprintf("./%s/...", subdir.Name()))
diffOutput(t, filepath.Join(tempDir, "src", "example.com"), subdir.Name())
})
}
})
}
}
}
}
func ifelse(b bool, t, f string) string {
if b {
return t
}
return f
}
func restoreEnv() func() {
gopath := os.Getenv("GOPATH")
goflags := os.Getenv("GOFLAGS")
goproxy := os.Getenv("GOPROXY")
gomodule := os.Getenv("GO111MODULE")
return func() {
os.Setenv("GOPATH", gopath)
os.Setenv("GOFLAGS", goflags)
os.Setenv("GOPROXY", goproxy)
os.Setenv("GO111MODULE", gomodule)
}
}
func tempDir(t *testing.T, name string) (string, func()) {
tempDir, err := ioutil.TempDir("", name)
if err != nil {
t.Fatal(err)
}
return tempDir, func() { os.RemoveAll(tempDir) }
}
func copyDir(t *testing.T, in, out string) {
err := filepath.Walk(in, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if in == path || !strings.HasPrefix(path, in) {
return nil
}
rel := strings.TrimPrefix(path, in)
if info.IsDir() {
return os.MkdirAll(filepath.Join(out, rel), os.FileMode(0755))
}
if !strings.HasSuffix(path, ".go") {
return nil
}
data, err := ioutil.ReadFile(path)
if err != nil {
return err
}
return ioutil.WriteFile(filepath.Join(out, rel), data, os.FileMode(0644))
})
if err != nil {
t.Error(err)
}
}
func prepDeps(t *testing.T, tempDir string, module, vendor bool, version string) {
srcDir := filepath.Join(tempDir, "src")
exampleDir := filepath.Join(srcDir, "example.com")
vendorDir := filepath.Join(exampleDir, "vendor")
goModFile := filepath.Join(exampleDir, "go.mod")
writeGoMod(t, goModFile, version)
switch {
case module && vendor:
// enable modules and vendor and build vendor dir
os.Setenv("GO111MODULE", "on")
os.Setenv("GOFLAGS", "-mod=vendor")
runGoModVendor(t, exampleDir)
case module && !vendor:
// enable modules and disable vendor
os.Setenv("GO111MODULE", "on")
os.Setenv("GOFLAGS", "")
case !module && vendor:
// enable modules and build vendor dir to assemble required dependencies
os.Setenv("GO111MODULE", "on")
runGoModVendor(t, exampleDir)
// set $GOPATH to tempDir
os.Setenv("GOPATH", tempDir)
// disable modules and remove go.mod file
os.Setenv("GO111MODULE", "off")
if err := os.Remove(goModFile); err != nil {
t.Fatal(err)
}
case !module && !vendor:
// enable modules and build vendor dir to assemble required dependencies
os.Setenv("GO111MODULE", "on")
runGoModVendor(t, exampleDir)
// set $GOPATH to tempDir and move dependencies from vendor there
os.Setenv("GOPATH", tempDir)
dependencies, err := ioutil.ReadDir(vendorDir)
if err != nil {
t.Fatal(err)
}
for _, dependency := range dependencies {
err := os.Rename(
filepath.Join(vendorDir, dependency.Name()),
filepath.Join(srcDir, dependency.Name()),
)
if err != nil {
t.Fatal(err)
}
}
// disable modules and remove go.mod file and vendor dir
os.Setenv("GO111MODULE", "off")
if err := os.Remove(goModFile); err != nil {
t.Fatal(err)
}
if err := os.Remove(filepath.Join(exampleDir, "vendor")); err != nil {
t.Fatal(err)
}
}
}
func writeGoMod(t *testing.T, goMod string, version string) {
err := ioutil.WriteFile(goMod, []byte(fmt.Sprintf(`module example.com
go 1.13
require (
k8s.io/api %[1]s
k8s.io/apimachinery %[1]s
k8s.io/client-go %[1]s
)`, version)), os.FileMode(0644))
if err != nil {
t.Fatal(err)
}
}
func runGoModVendor(t *testing.T, dir string) {
cmd := exec.Command("go", "mod", "vendor")
cmd.Dir = dir
output, err := cmd.CombinedOutput()
if err != nil {
t.Log(string(output))
t.Fatal(err)
}
}
func runFix(t *testing.T, dir, pkg string) {
b := bytes.NewBuffer([]byte{})
o := DefaultFixOptions()
o.Dir = dir
o.Out = b
o.Packages = []string{pkg}
err := o.Run()
if err != nil {
t.Log(b.String())
t.Fatal(err)
}
}
func diffOutput(t *testing.T, resultDir, subDir string) {
err := filepath.Walk(filepath.Join(resultDir, subDir), func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
if info.Name() == "vendor" {
return filepath.SkipDir
}
return nil
}
if !strings.HasSuffix(info.Name(), ".go") {
return nil
}
rel := strings.TrimPrefix(path, resultDir)
name := filepath.Base(path)
data, err := ioutil.ReadFile(path)
if err != nil {
t.Errorf("%s: %v", name, err)
return nil
}
expectFile := filepath.Join("testdata", "expect", "src", "example.com", rel)
expectData, err := ioutil.ReadFile(expectFile)
ok := err == nil && bytes.Equal(data, expectData)
if !ok {
if os.Getenv("UPDATE_FIXTURE_DATA") == "true" {
os.MkdirAll(filepath.Dir(expectFile), os.FileMode(0755))
if err := ioutil.WriteFile(expectFile, data, os.FileMode(0644)); err != nil {
t.Errorf("%s: %v", name, err)
return nil
}
t.Errorf("%s: wrote testdata, rerun test", name)
return nil
} else if err != nil {
t.Log("set UPDATE_FIXTURE_DATA=true to write expected testdata")
t.Errorf("%s: %v", name, err)
return nil
} else {
t.Log("set UPDATE_FIXTURE_DATA=true to write expected testdata")
t.Log(cmp.Diff(string(data), string(expectData)))
t.Errorf("%s: diff", name)
return nil
}
}
return nil
})
if err != nil {
t.Error(err)
}
}
|
[
"\"GOPATH\"",
"\"GOFLAGS\"",
"\"GOPROXY\"",
"\"GO111MODULE\"",
"\"UPDATE_FIXTURE_DATA\""
] |
[] |
[
"GO111MODULE",
"GOPATH",
"GOFLAGS",
"UPDATE_FIXTURE_DATA",
"GOPROXY"
] |
[]
|
["GO111MODULE", "GOPATH", "GOFLAGS", "UPDATE_FIXTURE_DATA", "GOPROXY"]
|
go
| 5 | 0 | |
fuzzers/eclipser/fuzzer.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration code for Eclipser fuzzer."""
import os
import subprocess
import time
from multiprocessing import Process
from fuzzers import utils
def build():
"""Build benchmark."""
# QEMU does not work with sanitizers.
# See https://github.com/SoftSec-KAIST/Eclipser/issues/5
os.environ['CC'] = 'clang'
os.environ['CXX'] = 'clang++'
os.environ['FUZZER_LIB'] = '/libStandaloneFuzzTarget.a'
utils.build_benchmark()
def fuzz(input_corpus, output_corpus, target_binary):
"""Run fuzzer."""
# Create an encoded temp corpus directory.
encoded_temp_corpus = os.path.join(os.path.dirname(input_corpus),
'temp-corpus')
if not os.path.exists(encoded_temp_corpus):
os.mkdir(encoded_temp_corpus)
print('[run_fuzzer] Running target with Eclipser')
command = [
'dotnet',
'/Eclipser/build/Eclipser.dll',
'fuzz',
'-p',
target_binary,
'-t',
'1048576', # FIXME: Find the max value allowed here.
'-o',
encoded_temp_corpus,
'--src',
'file',
'--initarg',
'foo', # Specifies how command line argument is passed, just a file.
'-f',
'foo',
'--maxfilelen',
# Default is too low (8 bytes), match experiment config at:
# https://github.com/SoftSec-KAIST/Eclipser-Artifact/blob/6aadf02eeadb0416bd4c5edeafc8627bc24ebc82/docker-scripts/experiment-scripts/package-exp/run_eclipser.sh#L25
'1048576',
# Default is low (0.5 sec), recommended to use higher:
# https://github.com/google/fuzzbench/issues/70#issuecomment-596060572
'--exectimeout',
'2000',
]
if os.listdir(input_corpus): # Important, otherwise Eclipser crashes.
command += ['-i', input_corpus]
print('[run_fuzzer] Running command: ' + ' '.join(command))
subprocess.Popen(command)
process = Process(target=copy_corpus_directory,
args=(
encoded_temp_corpus,
output_corpus,
))
process.start()
def copy_corpus_directory(encoded_temp_corpus, output_corpus):
"""Copies corpus periodically from encoded corpus directory into output
directory."""
while True:
# Wait for initial fuzzer initialization, and after every copy.
time.sleep(120)
subprocess.check_call([
'dotnet',
'/Eclipser/build/Eclipser.dll',
'decode',
'-i',
os.path.join(encoded_temp_corpus, 'testcase'),
'-o',
output_corpus,
])
|
[] |
[] |
[
"CXX",
"CC",
"FUZZER_LIB"
] |
[]
|
["CXX", "CC", "FUZZER_LIB"]
|
python
| 3 | 0 | |
tests/test_sts.py
|
# -*- coding: utf-8 -*-
import requests
from common import *
if oss2.compat.is_py2:
from aliyunsdkcore import client
from aliyunsdksts.request.v20150401 import AssumeRoleRequest
import json
class StsToken(object):
def __init__(self):
self.access_key_id = ''
self.access_key_secret = ''
self.expiration = 0
self.security_token = ''
self.request_id = ''
def fetch_sts_token(access_key_id, access_key_secret, role_arn):
clt = client.AcsClient(access_key_id, access_key_secret, OSS_REGION)
req = AssumeRoleRequest.AssumeRoleRequest()
req.set_accept_format('json')
req.set_RoleArn(role_arn)
req.set_RoleSessionName('oss-python-sdk-test')
body = clt.do_action_with_exception(req)
j = json.loads(oss2.to_unicode(body))
token = StsToken()
token.access_key_id = j['Credentials']['AccessKeyId']
token.access_key_secret = j['Credentials']['AccessKeySecret']
token.security_token = j['Credentials']['SecurityToken']
token.request_id = j['RequestId']
token.expiration = oss2.utils.to_unixtime(j['Credentials']['Expiration'], '%Y-%m-%dT%H:%M:%SZ')
return token
class TestSTSAuth(oss2.StsAuth):
def __init__(self, access_key_id, access_key_secret, security_token):
super(TestSTSAuth, self).__init__(access_key_id,
access_key_secret,
security_token,
os.getenv('OSS_TEST_AUTH_VERSION'))
oss2.StsAuth = TestSTSAuth
class TestSts(unittest.TestCase):
def setUp(self):
self.bucket = None
self.key_list = []
self.prefix = 'sts-' + random_string(8) + '/'
def tearDown(self):
if self.bucket is not None:
delete_keys(self.bucket, self.key_list)
def random_key(self, suffix=''):
key = self.prefix + random_string(12) + suffix
self.key_list.append(key)
return key
def init_bucket(self):
self.token = fetch_sts_token(OSS_STS_ID, OSS_STS_KEY, OSS_STS_ARN)
auth = oss2.StsAuth(self.token.access_key_id, self.token.access_key_secret, self.token.security_token)
self.bucket = oss2.Bucket(auth, OSS_ENDPOINT, OSS_BUCKET)
def test_object(self):
self.init_bucket()
key = self.random_key()
content = b'hello world'
self.bucket.put_object(key, content)
self.assertEqual(self.bucket.get_object(key).read(), content)
self.bucket.delete_object(key)
def test_bucket(self):
self.init_bucket()
# just make sure no exception being thrown
self.bucket.get_bucket_referer()
def test_url(self):
self.init_bucket()
key = self.random_key()
content = b'Ali Baba'
self.bucket.put_object(key, content)
url = self.bucket.sign_url('GET', key, 60, params={'para1':'test'})
resp = requests.get(url)
self.assertEqual(content, resp.content)
def test_rtmp(self):
channel_name = 'test-sign-rtmp-url'
self.init_bucket()
self.bucket.list_live_channel()
url = self.bucket.sign_rtmp_url(channel_name, 'test.m3u8', 3600)
self.assertTrue('security-token=' in url)
class TestSign(TestSts):
"""
这个类主要是用来增加测试覆盖率,当环境变量为oss2.AUTH_VERSION_2,则重新设置为oss2.AUTH_VERSION_1再运行TestSts,反之亦然
"""
def __init__(self, *args, **kwargs):
super(TestSign, self).__init__(*args, **kwargs)
def setUp(self):
if os.getenv('OSS_TEST_AUTH_VERSION') == oss2.AUTH_VERSION_2:
os.environ['OSS_TEST_AUTH_VERSION'] = oss2.AUTH_VERSION_1
else:
os.environ['OSS_TEST_AUTH_VERSION'] = oss2.AUTH_VERSION_2
super(TestSign, self).setUp()
def tearDown(self):
if os.getenv('OSS_TEST_AUTH_VERSION') == oss2.AUTH_VERSION_2:
os.environ['OSS_TEST_AUTH_VERSION'] = oss2.AUTH_VERSION_1
else:
os.environ['OSS_TEST_AUTH_VERSION'] = oss2.AUTH_VERSION_2
super(TestSign, self).tearDown()
|
[] |
[] |
[
"OSS_TEST_AUTH_VERSION"
] |
[]
|
["OSS_TEST_AUTH_VERSION"]
|
python
| 1 | 0 | |
node/node_test.go
|
package node_test
import (
"os"
"testing"
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api/test"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/lib/lotuslog"
builder "github.com/filecoin-project/lotus/node/test"
logging "github.com/ipfs/go-log/v2"
)
func init() {
_ = logging.SetLogLevel("*", "INFO")
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
}
func TestAPI(t *testing.T) {
test.TestApis(t, builder.Builder)
}
func TestAPIRPC(t *testing.T) {
test.TestApis(t, builder.RPCBuilder)
}
func TestAPIDealFlow(t *testing.T) {
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
blockTime := 10 * time.Millisecond
// For these tests where the block time is artificially short, just use
// a deal start epoch that is guaranteed to be far enough in the future
// so that the deal starts sealing in time
dealStartEpoch := abi.ChainEpoch(2 << 12)
t.Run("TestDealFlow", func(t *testing.T) {
test.TestDealFlow(t, builder.MockSbBuilder, blockTime, false, false, dealStartEpoch)
})
t.Run("WithExportedCAR", func(t *testing.T) {
test.TestDealFlow(t, builder.MockSbBuilder, blockTime, true, false, dealStartEpoch)
})
t.Run("TestDoubleDealFlow", func(t *testing.T) {
test.TestDoubleDealFlow(t, builder.MockSbBuilder, blockTime, dealStartEpoch)
})
t.Run("TestFastRetrievalDealFlow", func(t *testing.T) {
test.TestFastRetrievalDealFlow(t, builder.MockSbBuilder, blockTime, dealStartEpoch)
})
t.Run("TestPublishDealsBatching", func(t *testing.T) {
test.TestPublishDealsBatching(t, builder.MockSbBuilder, blockTime, dealStartEpoch)
})
t.Run("TestBatchDealInput", func(t *testing.T) {
test.TestBatchDealInput(t, builder.MockSbBuilder, blockTime, dealStartEpoch)
})
}
func TestAPIDealFlowReal(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
lotuslog.SetupLogLevels()
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
// TODO: just set this globally?
oldDelay := policy.GetPreCommitChallengeDelay()
policy.SetPreCommitChallengeDelay(5)
t.Cleanup(func() {
policy.SetPreCommitChallengeDelay(oldDelay)
})
t.Run("basic", func(t *testing.T) {
test.TestDealFlow(t, builder.Builder, time.Second, false, false, 0)
})
t.Run("fast-retrieval", func(t *testing.T) {
test.TestDealFlow(t, builder.Builder, time.Second, false, true, 0)
})
t.Run("retrieval-second", func(t *testing.T) {
test.TestSecondDealRetrieval(t, builder.Builder, time.Second)
})
}
func TestDealMining(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
test.TestDealMining(t, builder.MockSbBuilder, 50*time.Millisecond, false)
}
func TestSDRUpgrade(t *testing.T) {
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
oldDelay := policy.GetPreCommitChallengeDelay()
policy.SetPreCommitChallengeDelay(5)
t.Cleanup(func() {
policy.SetPreCommitChallengeDelay(oldDelay)
})
test.TestSDRUpgrade(t, builder.MockSbBuilder, 50*time.Millisecond)
}
func TestPledgeSectors(t *testing.T) {
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
t.Run("1", func(t *testing.T) {
test.TestPledgeSector(t, builder.MockSbBuilder, 50*time.Millisecond, 1)
})
t.Run("100", func(t *testing.T) {
test.TestPledgeSector(t, builder.MockSbBuilder, 50*time.Millisecond, 100)
})
t.Run("1000", func(t *testing.T) {
if testing.Short() { // takes ~16s
t.Skip("skipping test in short mode")
}
test.TestPledgeSector(t, builder.MockSbBuilder, 50*time.Millisecond, 1000)
})
}
func TestTapeFix(t *testing.T) {
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
test.TestTapeFix(t, builder.MockSbBuilder, 2*time.Millisecond)
}
func TestWindowedPost(t *testing.T) {
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
}
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
test.TestWindowPost(t, builder.MockSbBuilder, 2*time.Millisecond, 10)
}
func TestTerminate(t *testing.T) {
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
}
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
test.TestTerminate(t, builder.MockSbBuilder, 2*time.Millisecond)
}
func TestCCUpgrade(t *testing.T) {
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
test.TestCCUpgrade(t, builder.MockSbBuilder, 5*time.Millisecond)
}
func TestPaymentChannels(t *testing.T) {
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("pubsub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
test.TestPaymentChannels(t, builder.MockSbBuilder, 5*time.Millisecond)
}
func TestWindowPostDispute(t *testing.T) {
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
}
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
test.TestWindowPostDispute(t, builder.MockSbBuilder, 2*time.Millisecond)
}
func TestWindowPostDisputeFails(t *testing.T) {
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
}
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
test.TestWindowPostDisputeFails(t, builder.MockSbBuilder, 2*time.Millisecond)
}
func TestDeadlineToggling(t *testing.T) {
if os.Getenv("LOTUS_TEST_DEADLINE_TOGGLING") != "1" {
t.Skip("this takes a few minutes, set LOTUS_TEST_DEADLINE_TOGGLING=1 to run")
}
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "FATAL")
test.TestDeadlineToggling(t, builder.MockSbBuilder, 2*time.Millisecond)
}
|
[
"\"LOTUS_TEST_WINDOW_POST\"",
"\"LOTUS_TEST_WINDOW_POST\"",
"\"LOTUS_TEST_WINDOW_POST\"",
"\"LOTUS_TEST_WINDOW_POST\"",
"\"LOTUS_TEST_DEADLINE_TOGGLING\""
] |
[] |
[
"LOTUS_TEST_DEADLINE_TOGGLING",
"LOTUS_TEST_WINDOW_POST"
] |
[]
|
["LOTUS_TEST_DEADLINE_TOGGLING", "LOTUS_TEST_WINDOW_POST"]
|
go
| 2 | 0 | |
dotinstall/util/format.py
|
import subprocess
def get_output(cmd):
ret = ""
try:
ret = subprocess.check_output(cmd.split()).decode("utf-8")
except subprocess.CalledProcessError: # pragma: no cover
pass
return ret
class Format(object):
BOLD = get_output("tput bold")
RESET = get_output("tput sgr0")
BLUE = get_output("tput setaf 4")
GREEN = get_output("tput setaf 2")
YELLOW = get_output("tput setaf 3")
RED = get_output("tput setaf 1")
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
examples/pwr_run/checkpointing/final/no_safeguard/job68.py
|
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.007
args_model = 'densenet121'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_no_safeguard/' + job_name + '*'
total_epochs = 19
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '121' in args_model:
base_model = DenseNet121(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '169' in args_model:
base_model = DenseNet169(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '201' in args_model:
base_model = DenseNet201(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
model.add(base_model)
#model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_no_safeguard/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
|
[] |
[] |
[
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"]
|
python
| 2 | 0 | |
internal/grpctest/tlogger.go
|
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpctest
import (
"errors"
"fmt"
"os"
"regexp"
"runtime/debug"
"strconv"
"strings"
"sync"
"testing"
"github.com/seacomandor/grpc-go/grpclog"
)
// TLogger serves as the grpclog logger and is the interface through which
// expected errors are declared in tests.
var TLogger *tLogger
const callingFrame = 4
type logType int
const (
logLog logType = iota
errorLog
fatalLog
)
type tLogger struct {
v int
t *testing.T
initialized bool
m sync.Mutex // protects errors
errors map[*regexp.Regexp]int
}
func init() {
TLogger = &tLogger{errors: map[*regexp.Regexp]int{}}
vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL")
if vl, err := strconv.Atoi(vLevel); err == nil {
TLogger.v = vl
}
}
// getStackFrame gets, from the stack byte string, the appropriate stack frame.
func getStackFrame(stack []byte, frame int) (string, error) {
s := strings.Split(string(stack), "\n")
if frame >= (len(s)-1)/2 {
return "", errors.New("frame request out-of-bounds")
}
split := strings.Split(strings.Fields(s[(frame*2)+2][1:])[0], "/")
return fmt.Sprintf("%v:", split[len(split)-1]), nil
}
// log logs the message with the specified parameters to the tLogger.
func (g *tLogger) log(ltype logType, depth int, format string, args ...interface{}) {
s := debug.Stack()
prefix, err := getStackFrame(s, callingFrame+depth)
args = append([]interface{}{prefix}, args...)
if err != nil {
g.t.Error(err)
return
}
if format == "" {
switch ltype {
case errorLog:
// fmt.Sprintln is used rather than fmt.Sprint because t.Log uses fmt.Sprintln behavior.
if g.expected(fmt.Sprintln(args...)) {
g.t.Log(args...)
} else {
g.t.Error(args...)
}
case fatalLog:
panic(fmt.Sprint(args...))
default:
g.t.Log(args...)
}
} else {
format = "%v " + format
switch ltype {
case errorLog:
if g.expected(fmt.Sprintf(format, args...)) {
g.t.Logf(format, args...)
} else {
g.t.Errorf(format, args...)
}
case fatalLog:
panic(fmt.Sprintf(format, args...))
default:
g.t.Logf(format, args...)
}
}
}
// Update updates the testing.T that the testing logger logs to. Should be done
// before every test. It also initializes the tLogger if it has not already.
func (g *tLogger) Update(t *testing.T) {
if !g.initialized {
grpclog.SetLoggerV2(TLogger)
g.initialized = true
}
g.t = t
g.m.Lock()
defer g.m.Unlock()
g.errors = map[*regexp.Regexp]int{}
}
// ExpectError declares an error to be expected. For the next test, the first
// error log matching the expression (using FindString) will not cause the test
// to fail. "For the next test" includes all the time until the next call to
// Update(). Note that if an expected error is not encountered, this will cause
// the test to fail.
func (g *tLogger) ExpectError(expr string) {
g.ExpectErrorN(expr, 1)
}
// ExpectErrorN declares an error to be expected n times.
func (g *tLogger) ExpectErrorN(expr string, n int) {
re, err := regexp.Compile(expr)
if err != nil {
g.t.Error(err)
return
}
g.m.Lock()
defer g.m.Unlock()
g.errors[re] += n
}
// EndTest checks if expected errors were not encountered.
func (g *tLogger) EndTest(t *testing.T) {
g.m.Lock()
defer g.m.Unlock()
for re, count := range g.errors {
if count > 0 {
t.Errorf("Expected error '%v' not encountered", re.String())
}
}
g.errors = map[*regexp.Regexp]int{}
}
// expected determines if the error string is protected or not.
func (g *tLogger) expected(s string) bool {
g.m.Lock()
defer g.m.Unlock()
for re, count := range g.errors {
if re.FindStringIndex(s) != nil {
g.errors[re]--
if count <= 1 {
delete(g.errors, re)
}
return true
}
}
return false
}
func (g *tLogger) Info(args ...interface{}) {
g.log(logLog, 0, "", args...)
}
func (g *tLogger) Infoln(args ...interface{}) {
g.log(logLog, 0, "", args...)
}
func (g *tLogger) Infof(format string, args ...interface{}) {
g.log(logLog, 0, format, args...)
}
func (g *tLogger) InfoDepth(depth int, args ...interface{}) {
g.log(logLog, depth, "", args...)
}
func (g *tLogger) Warning(args ...interface{}) {
g.log(logLog, 0, "", args...)
}
func (g *tLogger) Warningln(args ...interface{}) {
g.log(logLog, 0, "", args...)
}
func (g *tLogger) Warningf(format string, args ...interface{}) {
g.log(logLog, 0, format, args...)
}
func (g *tLogger) WarningDepth(depth int, args ...interface{}) {
g.log(logLog, depth, "", args...)
}
func (g *tLogger) Error(args ...interface{}) {
g.log(errorLog, 0, "", args...)
}
func (g *tLogger) Errorln(args ...interface{}) {
g.log(errorLog, 0, "", args...)
}
func (g *tLogger) Errorf(format string, args ...interface{}) {
g.log(errorLog, 0, format, args...)
}
func (g *tLogger) ErrorDepth(depth int, args ...interface{}) {
g.log(errorLog, depth, "", args...)
}
func (g *tLogger) Fatal(args ...interface{}) {
g.log(fatalLog, 0, "", args...)
}
func (g *tLogger) Fatalln(args ...interface{}) {
g.log(fatalLog, 0, "", args...)
}
func (g *tLogger) Fatalf(format string, args ...interface{}) {
g.log(fatalLog, 0, format, args...)
}
func (g *tLogger) FatalDepth(depth int, args ...interface{}) {
g.log(fatalLog, depth, "", args...)
}
func (g *tLogger) V(l int) bool {
return l <= g.v
}
|
[
"\"GRPC_GO_LOG_VERBOSITY_LEVEL\""
] |
[] |
[
"GRPC_GO_LOG_VERBOSITY_LEVEL"
] |
[]
|
["GRPC_GO_LOG_VERBOSITY_LEVEL"]
|
go
| 1 | 0 | |
tests/pkg/config/config.go
|
package config
import (
"errors"
"io/ioutil"
"os"
"gopkg.in/yaml.v2"
)
// Config holds the basic structure of test's YAML file
type Config struct {
ClusterName string `yaml:"clusterName"`
Namespaces map[string]K8SObjects `yaml:"namespaces"`
ExternalDNS ExternalDNS `yaml:"externalDNS"`
NginxIngressController NginxIngressController `yaml:"nginxIngressController"`
}
// K8SObjects are kubernetes objects nested from namespaces, we need to check
// these resources are checked for its existence
type K8SObjects struct {
Daemonsets []string `yaml:"daemonsets"`
Services []string `yaml:"services"`
Secrets []string `yaml:"secrets"`
}
// NginxIngressController holds the config for nginx ingress controller component
type NginxIngressController struct {
NamespacePrefix string `yaml:"namespacePrefix"`
}
// ParseConfigFile loads the test file supplied
func ParseConfigFile(f string) (*Config, error) {
testsFilePath, err := ioutil.ReadFile(f)
if err != nil {
return nil, err
}
t := Config{}
err = yaml.Unmarshal(testsFilePath, &t)
if err != nil {
return nil, err
}
err = t.defaultsFromEnvs()
if err != nil {
return nil, err
}
return &t, nil
}
// defaultsFromEnvs process the mandatory fields in the config. If they are not set,
// it tries to load them from environment variables
func (c Config) defaultsFromEnvs() error {
if c.ClusterName == "" {
c.ClusterName = os.Getenv("CP_CLUSTER_NAME")
if c.ClusterName == "" {
return errors.New("Cluster Name is mandatory - not found it neither in config file nor environment variable")
}
}
return nil
}
// defaultsFromEnvs process the mandatory fields in the config. If they are not set,
// it tries to load them from environment variables
func (c *Config) GetDaemonSets() map[string][]string {
r := make(map[string][]string)
for ns, val := range c.Namespaces {
var daemonSets []string
for _, ds := range val.Daemonsets {
daemonSets = append(daemonSets, ds)
}
if len(daemonSets) > 0 {
r[ns] = daemonSets
}
}
return r
}
|
[
"\"CP_CLUSTER_NAME\""
] |
[] |
[
"CP_CLUSTER_NAME"
] |
[]
|
["CP_CLUSTER_NAME"]
|
go
| 1 | 0 | |
vendor/github.com/ProtonMail/go-autostart/autostart_xdg.go
|
// +build !windows,!darwin
package autostart
import (
"os"
"path/filepath"
"strings"
"text/template"
)
const desktopTemplate = `[Desktop Entry]
Type=Application
Name={{.DisplayName}}
Exec={{.Exec}}
{{- if .Icon}}Icon={{.Icon}}{{end}}
X-GNOME-Autostart-enabled=true
`
var autostartDir string
func init() {
if os.Getenv("XDG_CONFIG_HOME") != "" {
autostartDir = os.Getenv("XDG_CONFIG_HOME")
} else {
autostartDir = filepath.Join(os.Getenv("HOME"), ".config")
}
autostartDir = filepath.Join(autostartDir, "autostart")
}
func (a *App) path() string {
return filepath.Join(autostartDir, a.Name+".desktop")
}
// Check if the app is enabled on startup.
func (a *App) IsEnabled() bool {
_, err := os.Stat(a.path())
return err == nil
}
type app struct {
*App
}
// Override App.Exec to return a string.
func (a *app) Exec() string {
return strings.Join(a.App.Exec, " ")
}
// Enable this app on startup.
func (a *App) Enable() error {
t := template.Must(template.New("desktop").Parse(desktopTemplate))
if err := os.MkdirAll(autostartDir, 0777); err != nil {
return err
}
f, err := os.Create(a.path())
if err != nil {
return err
}
defer f.Close()
return t.Execute(f, &app{a})
}
// Disable this app on startup.
func (a *App) Disable() error {
return os.Remove(a.path())
}
|
[
"\"XDG_CONFIG_HOME\"",
"\"XDG_CONFIG_HOME\"",
"\"HOME\""
] |
[] |
[
"HOME",
"XDG_CONFIG_HOME"
] |
[]
|
["HOME", "XDG_CONFIG_HOME"]
|
go
| 2 | 0 | |
python/ray/tune/suggest/sigopt.py
|
import copy
import os
import logging
import pickle
from typing import Dict, List, Optional, Union
try:
import sigopt as sgo
Connection = sgo.Connection
except ImportError:
sgo = None
Connection = None
from ray.tune.suggest import Searcher
logger = logging.getLogger(__name__)
class SigOptSearch(Searcher):
"""A wrapper around SigOpt to provide trial suggestions.
You must install SigOpt and have a SigOpt API key to use this module.
Store the API token as an environment variable ``SIGOPT_KEY`` as follows:
.. code-block:: bash
pip install -U sigopt
export SIGOPT_KEY= ...
You will need to use the `SigOpt experiment and space specification
<https://app.sigopt.com/docs/overview/create>`_.
This module manages its own concurrency.
Parameters:
space (list of dict): SigOpt configuration. Parameters will be sampled
from this configuration and will be used to override
parameters generated in the variant generation process.
Not used if existing experiment_id is given
name (str): Name of experiment. Required by SigOpt.
max_concurrent (int): Number of maximum concurrent trials supported
based on the user's SigOpt plan. Defaults to 1.
connection (Connection): An existing connection to SigOpt.
experiment_id (str): Optional, if given will connect to an existing
experiment. This allows for a more interactive experience with
SigOpt, such as prior beliefs and constraints.
observation_budget (int): Optional, can improve SigOpt performance.
project (str): Optional, Project name to assign this experiment to.
SigOpt can group experiments by project
metric (str or list(str)): If str then the training result
objective value attribute. If list(str) then a list of
metrics that can be optimized together. SigOpt currently
supports up to 2 metrics.
mode (str or list(str)): If experiment_id is given then this
field is ignored, If str then must be one of {min, max}.
If list then must be comprised of {min, max, obs}. Determines
whether objective is minimizing or maximizing the metric
attribute. If metrics is a list then mode must be a list
of the same length as metric.
Example:
.. code-block:: python
space = [
{
'name': 'width',
'type': 'int',
'bounds': {
'min': 0,
'max': 20
},
},
{
'name': 'height',
'type': 'int',
'bounds': {
'min': -100,
'max': 100
},
},
]
algo = SigOptSearch(
space, name="SigOpt Example Experiment",
max_concurrent=1, metric="mean_loss", mode="min")
Example:
.. code-block:: python
space = [
{
'name': 'width',
'type': 'int',
'bounds': {
'min': 0,
'max': 20
},
},
{
'name': 'height',
'type': 'int',
'bounds': {
'min': -100,
'max': 100
},
},
]
algo = SigOptSearch(
space, name="SigOpt Multi Objective Example Experiment",
max_concurrent=1, metric=["average", "std"], mode=["max", "min"])
"""
OBJECTIVE_MAP = {
"max": {
"objective": "maximize",
"strategy": "optimize"
},
"min": {
"objective": "minimize",
"strategy": "optimize"
},
"obs": {
"strategy": "store"
}
}
def __init__(self,
space: List[Dict] = None,
name: str = "Default Tune Experiment",
max_concurrent: int = 1,
connection: Optional[Connection] = None,
experiment_id: Optional[str] = None,
observation_budget: Optional[int] = None,
project: Optional[str] = None,
metric: Union[None, str, List[str]] = "episode_reward_mean",
mode: Union[None, str, List[str]] = "max",
points_to_evaluate: Optional[List[Dict]] = None,
**kwargs):
assert (experiment_id is
None) ^ (space is None), "space xor experiment_id must be set"
assert type(max_concurrent) is int and max_concurrent > 0
if connection is not None:
self.conn = connection
else:
assert sgo is not None, """SigOpt must be installed!
You can install SigOpt with the command:
`pip install -U sigopt`."""
assert "SIGOPT_KEY" in os.environ, \
"SigOpt API key must be stored as " \
"environ variable at SIGOPT_KEY"
# Create a connection with SigOpt API, requires API key
self.conn = sgo.Connection(client_token=os.environ["SIGOPT_KEY"])
self._max_concurrent = max_concurrent
if isinstance(metric, str):
metric = [metric]
mode = [mode]
self._metric = metric
self._live_trial_mapping = {}
if experiment_id is None:
sigopt_params = dict(
name=name,
parameters=space,
parallel_bandwidth=self._max_concurrent)
if observation_budget is not None:
sigopt_params["observation_budget"] = observation_budget
if project is not None:
sigopt_params["project"] = project
if len(metric) > 1 and observation_budget is None:
raise ValueError(
"observation_budget is required for an"
"experiment with more than one optimized metric")
sigopt_params["metrics"] = self.serialize_metric(metric, mode)
self.experiment = self.conn.experiments().create(**sigopt_params)
else:
self.experiment = self.conn.experiments(experiment_id).fetch()
self._points_to_evaluate = points_to_evaluate
super(SigOptSearch, self).__init__(metric=metric, mode=mode, **kwargs)
def suggest(self, trial_id: str):
if self._max_concurrent:
if len(self._live_trial_mapping) >= self._max_concurrent:
return None
suggestion_kwargs = {}
if self._points_to_evaluate:
config = self._points_to_evaluate.pop(0)
suggestion_kwargs = {"assignments": config}
# Get new suggestion from SigOpt
suggestion = self.conn.experiments(
self.experiment.id).suggestions().create(**suggestion_kwargs)
self._live_trial_mapping[trial_id] = suggestion.id
return copy.deepcopy(suggestion.assignments)
def on_trial_complete(self,
trial_id: str,
result: Optional[Dict] = None,
error: bool = False):
"""Notification for the completion of trial.
If a trial fails, it will be reported as a failed Observation, telling
the optimizer that the Suggestion led to a metric failure, which
updates the feasible region and improves parameter recommendation.
Creates SigOpt Observation object for trial.
"""
if result:
payload = dict(
suggestion=self._live_trial_mapping[trial_id],
values=self.serialize_result(result))
self.conn.experiments(
self.experiment.id).observations().create(**payload)
# Update the experiment object
self.experiment = self.conn.experiments(self.experiment.id).fetch()
elif error:
# Reports a failed Observation
self.conn.experiments(self.experiment.id).observations().create(
failed=True, suggestion=self._live_trial_mapping[trial_id])
del self._live_trial_mapping[trial_id]
@staticmethod
def serialize_metric(metrics: List[str], modes: List[str]):
"""
Converts metrics to https://app.sigopt.com/docs/objects/metric
"""
serialized_metric = []
for metric, mode in zip(metrics, modes):
serialized_metric.append(
dict(name=metric, **SigOptSearch.OBJECTIVE_MAP[mode].copy()))
return serialized_metric
def serialize_result(self, result: Dict):
"""
Converts experiments results to
https://app.sigopt.com/docs/objects/metric_evaluation
"""
missing_scores = [
metric for metric in self._metric if metric not in result
]
if missing_scores:
raise ValueError(
f"Some metrics specified during initialization are missing. "
f"Missing metrics: {missing_scores}, provided result {result}")
values = []
for metric in self._metric:
value = dict(name=metric, value=result[metric])
values.append(value)
return values
def save(self, checkpoint_path: str):
trials_object = (self.experiment.id, self._live_trial_mapping,
self._points_to_evaluate)
with open(checkpoint_path, "wb") as outputFile:
pickle.dump(trials_object, outputFile)
def restore(self, checkpoint_path: str):
with open(checkpoint_path, "rb") as inputFile:
trials_object = pickle.load(inputFile)
experiment_id, self._live_trial_mapping, self._points_to_evaluate = \
trials_object
self.experiment = self.conn.experiments(experiment_id).fetch()
|
[] |
[] |
[
"SIGOPT_KEY"
] |
[]
|
["SIGOPT_KEY"]
|
python
| 1 | 0 | |
metaflow/plugins/conda/conda_step_decorator.py
|
import os
import sys
from hashlib import sha1
from multiprocessing.dummy import Pool
import platform
import requests
import shutil
import tempfile
try:
from urlparse import urlparse
except:
from urllib.parse import urlparse
from metaflow.datastore.local import LocalDataStore
from metaflow.decorators import StepDecorator
from metaflow.environment import InvalidEnvironmentException
from metaflow.metadata import MetaDatum
from metaflow.metaflow_config import get_pinned_conda_libs, CONDA_PACKAGE_S3ROOT
from metaflow.util import get_metaflow_root
from metaflow.datatools import S3
from . import read_conda_manifest, write_to_conda_manifest
from .conda import Conda
try:
unicode
except NameError:
unicode = str
basestring = str
class CondaStepDecorator(StepDecorator):
"""
Conda decorator that sets the Conda environment for your step
To use, add this decorator to your step:
```
@conda
@step
def MyStep(self):
...
```
Information in this decorator will override any eventual @conda_base flow level decorator.
Parameters
----------
libraries : Dict
Libraries to use for this flow. The key is the name of the package and the value
is the version to use. Defaults to {}
python : string
Version of Python to use (for example: '3.7.4'). Defaults to None
(will use the current python version)
disabled : bool
If set to True, disables Conda. Defaults to False
"""
name = 'conda'
defaults = {'libraries': {},
'python': None,
'disabled': None,
'env_name': None}
conda = None
environments = None
def _get_base_attributes(self):
if 'conda_base' in self.flow._flow_decorators:
return self.flow._flow_decorators['conda_base'].attributes
return self.defaults
def _python_version(self):
return next(x for x in [
self.attributes['python'],
self.base_attributes['python'],
platform.python_version()] if x is not None)
def is_enabled(self):
return not next(x for x in [
self.attributes['disabled'],
self.base_attributes['disabled'],
False] if x is not None)
def _lib_deps(self):
deps = get_pinned_conda_libs()
base_deps = self.base_attributes['libraries']
deps.update(base_deps)
step_deps = self.attributes['libraries']
if isinstance(step_deps, (unicode, basestring)):
step_deps = step_deps.strip('"{}\'')
if step_deps:
step_deps = dict(map(lambda x: x.strip().strip('"\''), a.split(':')) for a in step_deps.split(','))
deps.update(step_deps)
return deps
def _step_deps(self):
deps = [b'python==%s' % self._python_version().encode()]
deps.extend(b'%s==%s' % (name.encode('ascii'), ver.encode('ascii'))
for name, ver in self._lib_deps().items())
return deps
def _env_id(self):
deps = self._step_deps()
return 'metaflow_%s_%s_%s' % (self.flow.name,
self.architecture,
sha1(b' '.join(sorted(deps))).hexdigest())
def _resolve_step_environment(self, ds_root, force=False):
env_id = self._env_id()
cached_deps = read_conda_manifest(ds_root, self.flow.name)
if CondaStepDecorator.conda is None:
CondaStepDecorator.conda = Conda()
CondaStepDecorator.environments = CondaStepDecorator.conda.environments(self.flow.name)
if force or env_id not in cached_deps or 'cache_urls' not in cached_deps[env_id]:
if force or env_id not in cached_deps:
deps = self._step_deps()
(exact_deps, urls, order) = \
self.conda.create(self.step, env_id, deps, architecture=self.architecture)
payload = {
'explicit': exact_deps,
'deps': [d.decode('ascii') for d in deps],
'urls': urls,
'order': order
}
else:
payload = cached_deps[env_id]
if self.datastore.TYPE == 's3' and 'cache_urls' not in payload:
payload['cache_urls'] = self._cache_env()
write_to_conda_manifest(ds_root, self.flow.name, env_id, payload)
CondaStepDecorator.environments = CondaStepDecorator.conda.environments(self.flow.name)
return env_id
def _cache_env(self):
def _download(entry):
url, local_path = entry
with requests.get(url, stream=True) as r:
with open(local_path, 'wb') as f:
shutil.copyfileobj(r.raw, f)
env_id = self._env_id()
files = []
to_download = []
for package_info in self.conda.package_info(env_id):
url = urlparse(package_info['url'])
path = os.path.join(CONDA_PACKAGE_S3ROOT,
url.netloc,
url.path.lstrip('/'),
package_info['md5'],
package_info['fn'])
tarball_path = package_info['package_tarball_full_path']
if tarball_path.endswith('.conda'):
#Conda doesn't set the metadata correctly for certain fields
# when the underlying OS is spoofed.
tarball_path = tarball_path[:-6]
if not tarball_path.endswith('.tar.bz2'):
tarball_path = '%s.tar.bz2' % tarball_path
if not os.path.isfile(tarball_path):
#The tarball maybe missing when user invokes `conda clean`!
to_download.append((package_info['url'], tarball_path))
files.append((path, tarball_path))
if to_download:
Pool(8).map(_download, to_download)
with S3() as s3:
s3.put_files(files, overwrite=False)
return [files[0] for files in files]
def _prepare_step_environment(self, step_name, ds_root):
env_id = self._resolve_step_environment(ds_root)
if env_id not in CondaStepDecorator.environments:
cached_deps = read_conda_manifest(ds_root, self.flow.name)
self.conda.create(self.step,
env_id,
cached_deps[env_id]['urls'],
architecture=self.architecture,
explicit=True)
CondaStepDecorator.environments = CondaStepDecorator.conda.environments(self.flow.name)
return env_id
def _architecture(self, decos):
for deco in decos:
if deco.name == 'batch':
# force conda resolution for linux-64 architectures
return 'linux-64'
bit = '32'
if platform.machine().endswith('64'):
bit = '64'
if platform.system() == 'Linux':
return 'linux-%s' % bit
elif platform.system() == 'Darwin':
return 'osx-%s' % bit
else:
raise InvalidEnvironmentException('The *@conda* decorator is not supported '
'outside of Linux and Darwin platforms')
def runtime_init(self, flow, graph, package, run_id):
# Create a symlink to installed version of metaflow to execute user code against
path_to_metaflow = os.path.join(get_metaflow_root(), 'metaflow')
self.metaflow_home = tempfile.mkdtemp(dir='/tmp')
os.symlink(path_to_metaflow, os.path.join(self.metaflow_home, 'metaflow'))
def step_init(self, flow, graph, step, decos, environment, datastore, logger):
if environment.TYPE != 'conda':
raise InvalidEnvironmentException('The *@conda* decorator requires '
'--environment=conda')
def _logger(line, **kwargs):
logger(line)
self.local_root = LocalDataStore.get_datastore_root_from_config(_logger)
environment.set_local_root(self.local_root)
self.architecture = self._architecture(decos)
self.step = step
self.flow = flow
self.datastore = datastore
self.base_attributes = self._get_base_attributes()
os.environ['PYTHONNOUSERSITE'] = '1'
def package_init(self, flow, step, environment):
if self.is_enabled():
self._prepare_step_environment(step, self.local_root)
def runtime_task_created(self, datastore, task_id, split_index, input_paths, is_cloned):
if self.is_enabled():
self.env_id = self._prepare_step_environment(self.step, self.local_root)
def task_pre_step(
self, step_name, ds, meta, run_id, task_id, flow, graph, retry_count, max_retries):
meta.register_metadata(run_id, step_name, task_id,
[MetaDatum(field='conda_env_id',
value=self._env_id(),
type='conda_env_id')])
def runtime_step_cli(self, cli_args, retry_count, max_user_code_retries):
if self.is_enabled() and 'batch' not in cli_args.commands:
python_path = self.metaflow_home
if os.environ.get('PYTHONPATH') is not None:
python_path = os.pathsep.join([os.environ['PYTHONPATH'], python_path])
if self.attributes['env_name']:
self.env_id = self.attributes['env_name']
cli_args.env['PYTHONPATH'] = python_path
cli_args.env['_METAFLOW_CONDA_ENV'] = self.env_id
cli_args.entrypoint[0] = self.conda.python(self.env_id)
def runtime_finished(self, exception):
shutil.rmtree(self.metaflow_home)
|
[] |
[] |
[
"PYTHONNOUSERSITE",
"PYTHONPATH"
] |
[]
|
["PYTHONNOUSERSITE", "PYTHONPATH"]
|
python
| 2 | 0 | |
external/src/github.com/se7enkings/quic-go/integrationtests/self/handshake_drop_test.go
|
package self_test
import (
"fmt"
mrand "math/rand"
"net"
"time"
quic "github.com/se7enkings/quic-go"
quicproxy "github.com/se7enkings/quic-go/integrationtests/tools/proxy"
"github.com/se7enkings/quic-go/internal/protocol"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
)
var directions = []quicproxy.Direction{quicproxy.DirectionIncoming, quicproxy.DirectionOutgoing, quicproxy.DirectionBoth}
type applicationProtocol struct {
name string
run func(protocol.VersionNumber)
}
var _ = Describe("Handshake drop tests", func() {
var (
proxy *quicproxy.QuicProxy
ln quic.Listener
)
startListenerAndProxy := func(dropCallback quicproxy.DropCallback, version protocol.VersionNumber) {
var err error
ln, err = quic.ListenAddr(
"localhost:0",
getTLSConfig(),
&quic.Config{
Versions: []protocol.VersionNumber{version},
},
)
Expect(err).ToNot(HaveOccurred())
serverPort := ln.Addr().(*net.UDPAddr).Port
proxy, err = quicproxy.NewQuicProxy("localhost:0", &quicproxy.Opts{
RemoteAddr: fmt.Sprintf("localhost:%d", serverPort),
DropPacket: dropCallback,
},
)
Expect(err).ToNot(HaveOccurred())
}
stochasticDropper := func(freq int) bool {
return mrand.Int63n(int64(freq)) == 0
}
clientSpeaksFirst := &applicationProtocol{
name: "client speaks first",
run: func(version protocol.VersionNumber) {
serverSessionChan := make(chan quic.Session)
go func() {
defer GinkgoRecover()
sess, err := ln.Accept()
Expect(err).ToNot(HaveOccurred())
defer sess.Close()
str, err := sess.AcceptStream()
Expect(err).ToNot(HaveOccurred())
b := make([]byte, 6)
_, err = gbytes.TimeoutReader(str, 10*time.Second).Read(b)
Expect(err).ToNot(HaveOccurred())
Expect(string(b)).To(Equal("foobar"))
serverSessionChan <- sess
}()
sess, err := quic.DialAddr(
fmt.Sprintf("localhost:%d", proxy.LocalPort()),
getTLSClientConfig(),
&quic.Config{Versions: []protocol.VersionNumber{version}},
)
Expect(err).ToNot(HaveOccurred())
str, err := sess.OpenStream()
Expect(err).ToNot(HaveOccurred())
_, err = str.Write([]byte("foobar"))
Expect(err).ToNot(HaveOccurred())
var serverSession quic.Session
Eventually(serverSessionChan, 10*time.Second).Should(Receive(&serverSession))
sess.Close()
serverSession.Close()
},
}
serverSpeaksFirst := &applicationProtocol{
name: "server speaks first",
run: func(version protocol.VersionNumber) {
serverSessionChan := make(chan quic.Session)
go func() {
defer GinkgoRecover()
sess, err := ln.Accept()
Expect(err).ToNot(HaveOccurred())
str, err := sess.OpenStream()
Expect(err).ToNot(HaveOccurred())
_, err = str.Write([]byte("foobar"))
Expect(err).ToNot(HaveOccurred())
serverSessionChan <- sess
}()
sess, err := quic.DialAddr(
fmt.Sprintf("localhost:%d", proxy.LocalPort()),
getTLSClientConfig(),
&quic.Config{Versions: []protocol.VersionNumber{version}},
)
Expect(err).ToNot(HaveOccurred())
str, err := sess.AcceptStream()
Expect(err).ToNot(HaveOccurred())
b := make([]byte, 6)
_, err = gbytes.TimeoutReader(str, 10*time.Second).Read(b)
Expect(err).ToNot(HaveOccurred())
Expect(string(b)).To(Equal("foobar"))
var serverSession quic.Session
Eventually(serverSessionChan, 10*time.Second).Should(Receive(&serverSession))
sess.Close()
serverSession.Close()
},
}
nobodySpeaks := &applicationProtocol{
name: "nobody speaks",
run: func(version protocol.VersionNumber) {
serverSessionChan := make(chan quic.Session)
go func() {
defer GinkgoRecover()
sess, err := ln.Accept()
Expect(err).ToNot(HaveOccurred())
serverSessionChan <- sess
}()
sess, err := quic.DialAddr(
fmt.Sprintf("localhost:%d", proxy.LocalPort()),
getTLSClientConfig(),
&quic.Config{Versions: []protocol.VersionNumber{version}},
)
Expect(err).ToNot(HaveOccurred())
var serverSession quic.Session
Eventually(serverSessionChan, 10*time.Second).Should(Receive(&serverSession))
// both server and client accepted a session. Close now.
sess.Close()
serverSession.Close()
},
}
AfterEach(func() {
Expect(proxy.Close()).To(Succeed())
})
for _, v := range protocol.SupportedVersions {
version := v
Context(fmt.Sprintf("with QUIC version %s", version), func() {
for _, d := range directions {
direction := d
for _, a := range []*applicationProtocol{clientSpeaksFirst, serverSpeaksFirst, nobodySpeaks} {
app := a
Context(app.name, func() {
It(fmt.Sprintf("establishes a connection when the first packet is lost in %s direction", d), func() {
startListenerAndProxy(func(d quicproxy.Direction, p uint64) bool {
return p == 1 && d.Is(direction)
}, version)
app.run(version)
})
It(fmt.Sprintf("establishes a connection when the second packet is lost in %s direction", d), func() {
startListenerAndProxy(func(d quicproxy.Direction, p uint64) bool {
return p == 2 && d.Is(direction)
}, version)
app.run(version)
})
It(fmt.Sprintf("establishes a connection when 1/5 of the packets are lost in %s direction", d), func() {
startListenerAndProxy(func(d quicproxy.Direction, p uint64) bool {
return d.Is(direction) && stochasticDropper(5)
}, version)
app.run(version)
})
})
}
}
})
}
})
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
able_app/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'able_app.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
test/src/eks_blueprints_e2e_test.go
|
//go:build e2e
// +build e2e
package src
import (
"context"
internal "github.com/aws-ia/terraform-aws-eks-blueprints/aws"
"github.com/aws/aws-sdk-go/aws"
"github.com/gruntwork-io/terratest/modules/terraform"
test_structure "github.com/gruntwork-io/terratest/modules/test-structure"
"github.com/stretchr/testify/assert"
core "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"strings"
"testing"
"time"
)
var (
//Test Driven tests Inputs https://github.com/golang/go/wiki/TableDrivenTests
testCases = []struct {
name string
region string
eks_cluster string
values map[string]string
}{
{
"eks-cluster-with-new-vpc",
"us-west-2",
"aws-terra-test-eks",
map[string]string{
"rootFolder": "../..",
"exampleFolderPath": "examples/eks-cluster-with-new-vpc"},
},
}
/* Commented for future use
S3BackendConfig = map[string]string{
"bucketName": "terraform-ssp-github-actions-state",
"s3Prefix": "terratest/examples/",
"awsRegion" : "us-west-2"}*/
destroyModules = []string{
"module.kubernetes_addons",
"module.eks_blueprints",
"module.aws_vpc",
"full_destroy",
}
/*Update the expected Output variables and values*/
outputParameters = [...]Outputs{
{"vpc_cidr", "10.0.0.0/16", "equal"},
{"vpc_private_subnet_cidr", "[10.0.10.0/24 10.0.11.0/24 10.0.12.0/24]", "equal"},
{"vpc_public_subnet_cidr", "[10.0.0.0/24 10.0.1.0/24 10.0.2.0/24]", "equal"},
{"eks_cluster_id", "aws-terra-test-eks", "equal"},
{"eks_managed_nodegroup_status", "[ACTIVE]", "equal"},
}
/*EKS API Validation*/
expectedEKSWorkerNodes = 3
/*Update the expected Deployments names and the namespace*/
expectedDeployments = [...]Deployment{
{"aws-load-balancer-controller", "kube-system"},
{"cluster-autoscaler-aws-cluster-autoscaler", "kube-system"},
{"coredns", "kube-system"},
{"metrics-server", "kube-system"},
}
/*Update the expected DaemonSet names and the namespace*/
expectedDaemonSets = [...]DaemonSet{
{"aws-node", "kube-system"},
{"kube-proxy", "kube-system"},
{"aws-cloudwatch-metrics", "amazon-cloudwatch"},
}
/*Update the expected K8s Services names and the namespace*/
expectedServices = [...]Services{
{"cluster-autoscaler-aws-cluster-autoscaler", "kube-system", "ClusterIP"},
{"kube-dns", "kube-system", "ClusterIP"},
{"kubernetes", "default", "ClusterIP"},
{"metrics-server", "kube-system", "ClusterIP"},
}
)
type Outputs struct {
OutputVariable string
ExpectedOutputValue string
AssertType string
}
type Deployment struct {
Name string
Namespace string
}
type DaemonSet struct {
Name string
Namespace string
}
type Services struct {
Name string
Namespace string
Type core.ServiceType
}
func TestEksBlueprintsE2E(t *testing.T) {
t.Parallel()
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(subT *testing.T) {
subT.Parallel()
/*This allows running multiple tests in parallel against the same terraform module*/
tempExampleFolder := test_structure.CopyTerraformFolderToTemp(t, testCase.values["rootFolder"], testCase.values["exampleFolderPath"])
//Uncomment for debugging the test code
//os.Setenv("SKIP_destroy", "true")
inputTfOptions := &terraform.Options{
/*The path to where our Terraform code is located*/
TerraformDir: tempExampleFolder,
VarFiles: []string{testCase.name + ".tfvars"}, // The var file paths to pass to Terraform commands using -var-file option.
//BackendConfig: map[string]interface{}{
// "bucket": S3BackendConfig["bucketName"],
// "key": S3BackendConfig["s3Prefix"]+testCase.name,
// "region": S3BackendConfig["awsRegion"],
//},
NoColor: true,
}
terratestOptions := getTerraformOptions(t, inputTfOptions)
/* At the end of the test, run `terraform destroy` to clean up any resources that were created */
defer test_structure.RunTestStage(t, "destroy", func() {
for _, target := range destroyModules {
if target != "full_destroy" {
destroyTFOptions := &terraform.Options{
/*The path to where our Terraform code is located*/
TerraformDir: tempExampleFolder,
VarFiles: []string{testCase.name + ".tfvars"}, // The var file paths to pass to Terraform commands using -var-file option.
//BackendConfig: map[string]interface{}{
// "bucket": S3BackendConfig["bucketName"],
// "key": S3BackendConfig["s3Prefix"]+testCase.name,
// "region": S3BackendConfig["awsRegion"],
//},
Targets: []string{target},
NoColor: true,
}
terraformOptions := getTerraformOptions(t, destroyTFOptions)
terraform.Destroy(t, terraformOptions)
time.Sleep(2 * time.Minute) // Workaround for cleaning up dangling ENIs
} else {
terraformOptions := getTerraformOptions(t, inputTfOptions)
terraform.Destroy(t, terraformOptions)
}
}
})
// Run Init and Apply
test_structure.RunTestStage(t, "apply", func() {
test_structure.SaveTerraformOptions(t, tempExampleFolder, terratestOptions)
/* This will run `terraform init` and `terraform apply` and fail the test if there are any errors */
terraform.InitAndApply(t, terratestOptions)
})
t.Run("TF_PLAN_VALIDATION", func(t *testing.T) {
// Run Plan diff
test_structure.RunTestStage(t, "plan", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, tempExampleFolder)
planResult := terraform.Plan(t, terraformOptions)
// Make sure the plan shows zero changes
assert.Contains(t, planResult, "No changes.")
})
})
t.Run("TF_OUTPUTS_VALIDATION", func(t *testing.T) {
/*Outputs Validation*/
test_structure.RunTestStage(t, "outputs_validation", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, tempExampleFolder)
for _, tc := range outputParameters {
t.Run(tc.OutputVariable, func(t *testing.T) {
ActualOutputValue := terraform.Output(t, terraformOptions, tc.OutputVariable)
switch strings.ToLower(tc.AssertType) {
case "equal":
assert.Equal(t, tc.ExpectedOutputValue, ActualOutputValue)
case "notempty":
assert.NotEmpty(t, ActualOutputValue)
case "contains":
assert.Contains(t, ActualOutputValue, tc.ExpectedOutputValue)
}
})
}
})
})
t.Run("EKS_ADDON_VALIDATION", func(t *testing.T) {
/*EKS and Addon Validation*/
test_structure.RunTestStage(t, "eks_addon_validation", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, tempExampleFolder)
eksClusterName := terraform.Output(t, terraformOptions, "eks_cluster_id")
awsRegion := terraform.Output(t, terraformOptions, "region")
eksAddonValidation(t, eksClusterName, awsRegion)
})
})
})
}
}
func getTerraformOptions(t *testing.T, inputTFOptions *terraform.Options) *terraform.Options {
return terraform.WithDefaultRetryableErrors(t, inputTFOptions)
}
func eksAddonValidation(t *testing.T, eksClusterName string, awsRegion string) {
/****************************************************************************/
/*EKS Cluster Result
/****************************************************************************/
result, err := internal.EksDescribeCluster(awsRegion, eksClusterName)
if err != nil {
t.Errorf("Error describing EKS Cluster: %v", err)
}
/****************************************************************************/
/*K8s ClientSet
/****************************************************************************/
k8sclient, err := internal.GetKubernetesClient(result.Cluster)
if err != nil {
t.Errorf("Error creating Kubernees clientset: %v", err)
}
/****************************************************************************/
/*TEST: Match Cluster Name
/****************************************************************************/
t.Run("MATCH_EKS_CLUSTER_NAME", func(t *testing.T) {
assert.Equal(t, eksClusterName, aws.StringValue(result.Cluster.Name))
})
/****************************************************************************/
/*TEST: Verify the total number of nodes running
/****************************************************************************/
nodes, err := k8sclient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
t.Errorf("Error getting EKS nodes: %v", err)
}
t.Run("MATCH_TOTAL_EKS_WORKER_NODES", func(t *testing.T) {
assert.Equal(t, expectedEKSWorkerNodes, len(nodes.Items))
})
/****************************************************************************/
/*Test: Validate Kubernetes Deployments
/****************************************************************************/
t.Run("EKS_DEPLOYMENTS_VALIDATION", func(t *testing.T) {
for _, dep := range expectedDeployments {
deployment, err := internal.GetDeployment(k8sclient, dep.Name, dep.Namespace)
if err != nil {
assert.Fail(t, "DEPLOYMENT: %s | NAMESPACE: %s | Error: %s", dep.Name, dep.Namespace, err)
} else {
t.Log("|-----------------------------------------------------------------------------------------------------------------------|")
t.Logf("DEPLOYMENT: %s | NAMESPACE: %s | READY: %d | AVAILABLE: %d | REPLICAS: %d | UNAVAILABLE: %d",
dep.Name, dep.Namespace,
deployment.Status.ReadyReplicas,
deployment.Status.AvailableReplicas,
deployment.Status.Replicas,
deployment.Status.UnavailableReplicas)
t.Logf("|-----------------------------------------------------------------------------------------------------------------------|")
t.Run("MATCH_REPLICAS_VS_READY-REPLICAS/"+dep.Name, func(t *testing.T) {
assert.Equal(t, aws.Int32Value(deployment.Spec.Replicas), deployment.Status.ReadyReplicas)
})
t.Run("UNAVAILABLE_REPLICAS/"+dep.Name, func(t *testing.T) {
assert.Equal(t, int32(0), deployment.Status.UnavailableReplicas)
})
}
}
})
/****************************************************************************/
/*Test: Validate Kubernetes DaemonSets
/****************************************************************************/
t.Run("EKS_DAEMONSETS_VALIDATION", func(t *testing.T) {
for _, daemon := range expectedDaemonSets {
daemonset, err := internal.GetDaemonSet(k8sclient, daemon.Name, daemon.Namespace)
if err != nil {
assert.Fail(t, "DaemonSet: %s | NAMESPACE: %s| Error: %s", daemon.Name, daemon.Namespace, err)
} else {
t.Log("|-----------------------------------------------------------------------------------------------------------------------|")
t.Logf("DaemonSet: %s | NAMESPACE: %s | DESIRED: %d | CURRENT: %d | READY: %d AVAILABLE: %d | UNAVAILABLE: %d",
daemon.Name,
daemon.Namespace,
daemonset.Status.DesiredNumberScheduled,
daemonset.Status.CurrentNumberScheduled,
daemonset.Status.NumberReady,
daemonset.Status.NumberAvailable,
daemonset.Status.NumberUnavailable)
t.Logf("|-----------------------------------------------------------------------------------------------------------------------|")
t.Run("MATCH_DESIRED_VS_CURRENT_PODS/"+daemon.Name, func(t *testing.T) {
assert.Equal(t, daemonset.Status.DesiredNumberScheduled, daemonset.Status.CurrentNumberScheduled)
})
t.Run("UNAVAILABLE_REPLICAS/"+daemon.Name, func(t *testing.T) {
assert.Equal(t, int32(0), daemonset.Status.NumberUnavailable)
})
}
}
})
/****************************************************************************/
/*Test: Validate Kubernetes Services
/****************************************************************************/
t.Run("EKS_SERVICES_VALIDATION", func(t *testing.T) {
for _, service := range expectedServices {
services, err := internal.GetServices(k8sclient, service.Name, service.Namespace)
if err != nil {
assert.Fail(t, "SERVICE NAME: %s | NAMESPACE: %s| Error: %s", service.Name, service.Namespace, err)
} else {
t.Log("|-----------------------------------------------------------------------------------------------------------------------|")
t.Logf("SERVICE NAME: %s | NAMESPACE: %s | STATUS: %s",
service.Name,
service.Namespace,
services.Spec.Type)
t.Logf("|-----------------------------------------------------------------------------------------------------------------------|")
t.Run("SERVICE_STATUS/"+service.Name, func(t *testing.T) {
assert.Equal(t, services.Spec.Type, service.Type)
})
}
}
})
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
_vendor/src/camlistore.org/pkg/images/fastjpeg/fastjpeg.go
|
/*
Copyright 2014 The Camlistore Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package fastjpeg uses djpeg(1), from the Independent JPEG Group's
// (www.ijg.org) jpeg package, to quickly down-sample images on load. It can
// sample images by a factor of 1, 2, 4 or 8.
// This reduces the amount of data that must be decompressed into memory when
// the full resolution image isn't required, i.e. in the case of generating
// thumbnails.
package fastjpeg
import (
"bytes"
"errors"
"expvar"
"fmt"
"image"
"image/color"
_ "image/jpeg"
"io"
"log"
"os"
"os/exec"
"strconv"
"sync"
"camlistore.org/pkg/buildinfo"
"go4.org/readerutil"
)
var (
ErrDjpegNotFound = errors.New("fastjpeg: djpeg not found in path")
)
// DjpegFailedError wraps errors returned when calling djpeg and handling its
// response. Used for type asserting and retrying with other jpeg decoders,
// i.e. the standard library's jpeg.Decode.
type DjpegFailedError struct {
Err error
}
func (dfe DjpegFailedError) Error() string {
return dfe.Err.Error()
}
// TODO(wathiede): do we need to conditionally add ".exe" on Windows? I have
// no access to test on Windows.
const djpegBin = "djpeg"
var (
checkAvailability sync.Once
available bool
)
var (
djpegSuccessVar = expvar.NewInt("fastjpeg-djpeg-success")
djpegFailureVar = expvar.NewInt("fastjpeg-djpeg-failure")
// Bytes read from djpeg subprocess
djpegBytesReadVar = expvar.NewInt("fastjpeg-djpeg-bytes-read")
// Bytes written to djpeg subprocess
djpegBytesWrittenVar = expvar.NewInt("fastjpeg-djpeg-bytes-written")
)
func Available() bool {
checkAvailability.Do(func() {
if ok, _ := strconv.ParseBool(os.Getenv("CAMLI_DISABLE_DJPEG")); ok {
log.Println("CAMLI_DISABLE_DJPEG set in environment. Disabling fastjpeg.")
return
}
if p, err := exec.LookPath(djpegBin); p != "" && err == nil {
available = true
log.Printf("fastjpeg enabled with %s.", p)
}
if !available {
log.Printf("%s not found in PATH, disabling fastjpeg.", djpegBin)
}
})
return available
}
func init() {
buildinfo.RegisterDjpegStatusFunc(djpegStatus)
}
func djpegStatus() string {
// TODO: more info: its path, whether it works, its version, etc.
if Available() {
return "djpeg available"
}
return "djpeg optimizaton unavailable"
}
func readPNM(buf *bytes.Buffer) (image.Image, error) {
var imgType, w, h int
nTokens, err := fmt.Fscanf(buf, "P%d\n%d %d\n255\n", &imgType, &w, &h)
if err != nil {
return nil, err
}
if nTokens != 3 {
hdr := buf.Bytes()
if len(hdr) > 100 {
hdr = hdr[:100]
}
return nil, fmt.Errorf("fastjpeg: Invalid PNM header: %q", hdr)
}
switch imgType {
case 5: // Gray
src := buf.Bytes()
if len(src) != w*h {
return nil, fmt.Errorf("fastjpeg: grayscale source buffer not sized w*h")
}
im := &image.Gray{
Pix: src,
Stride: w,
Rect: image.Rect(0, 0, w, h),
}
return im, nil
case 6: // RGB
src := buf.Bytes()
if len(src) != w*h*3 {
return nil, fmt.Errorf("fastjpeg: RGB source buffer not sized w*h*3")
}
im := image.NewRGBA(image.Rect(0, 0, w, h))
dst := im.Pix
for i := 0; i < len(src)/3; i++ {
dst[4*i+0] = src[3*i+0] // R
dst[4*i+1] = src[3*i+1] // G
dst[4*i+2] = src[3*i+2] // B
dst[4*i+3] = 255 // Alpha
}
return im, nil
default:
return nil, fmt.Errorf("fastjpeg: Unsupported PNM type P%d", imgType)
}
}
// Factor returns the sample factor DecodeSample should use to generate a
// sampled image greater than or equal to sw x sh pixels given a source image
// of w x h pixels.
func Factor(w, h, sw, sh int) int {
switch {
case w>>3 >= sw && h>>3 >= sh:
return 8
case w>>2 >= sw && h>>2 >= sh:
return 4
case w>>1 >= sw && h>>1 >= sh:
return 2
}
return 1
}
// DecodeDownsample decodes JPEG data in r, down-sampling it by factor.
// If djpeg is not found, err is ErrDjpegNotFound and r is not read from.
// If the execution of djpeg, or decoding the resulting PNM fails, error will
// be of type DjpegFailedError.
func DecodeDownsample(r io.Reader, factor int) (image.Image, error) {
if !Available() {
return nil, ErrDjpegNotFound
}
switch factor {
case 1, 2, 4, 8:
default:
return nil, fmt.Errorf("fastjpeg: unsupported sample factor %d", factor)
}
buf := new(bytes.Buffer)
tr := io.TeeReader(r, buf)
ic, format, err := image.DecodeConfig(tr)
if err != nil {
return nil, err
}
if format != "jpeg" {
return nil, fmt.Errorf("fastjpeg: Unsupported format %q", format)
}
var bpp int
switch ic.ColorModel {
case color.YCbCrModel:
bpp = 4 // JPEG will decode to RGB, and we'll expand inplace to RGBA.
case color.GrayModel:
bpp = 1
default:
return nil, fmt.Errorf("fastjpeg: Unsupported thumnbnail color model %T", ic.ColorModel)
}
args := []string{djpegBin, "-scale", fmt.Sprintf("1/%d", factor)}
cmd := exec.Command(args[0], args[1:]...)
cmd.Stdin = readerutil.NewStatsReader(djpegBytesWrittenVar, io.MultiReader(buf, r))
// Allocate space for the RGBA / Gray pixel data plus some extra for PNM
// header info. Explicitly allocate all the memory upfront to prevent
// many smaller allocations.
pixSize := ic.Width*ic.Height*bpp/factor/factor + 128
w := bytes.NewBuffer(make([]byte, 0, pixSize))
cmd.Stdout = w
stderrW := new(bytes.Buffer)
cmd.Stderr = stderrW
if err := cmd.Run(); err != nil {
djpegFailureVar.Add(1)
return nil, DjpegFailedError{Err: fmt.Errorf("%v: %s", err, stderrW)}
}
djpegSuccessVar.Add(1)
djpegBytesReadVar.Add(int64(w.Len()))
m, err := readPNM(w)
if err != nil {
return m, DjpegFailedError{Err: err}
}
return m, nil
}
|
[
"\"CAMLI_DISABLE_DJPEG\""
] |
[] |
[
"CAMLI_DISABLE_DJPEG"
] |
[]
|
["CAMLI_DISABLE_DJPEG"]
|
go
| 1 | 0 | |
cmd/config.go
|
package cmd
import (
"encoding/json"
"errors"
"fmt"
"io"
"log"
"os"
"path/filepath"
"runtime"
"github.com/noborus/trdsql"
)
// ErrNoFile is returned when there is no file.
var ErrNoFile = errors.New("no file")
type database struct {
Driver string `json:"driver"`
Dsn string `json:"dsn"`
}
type config struct {
Db string `json:"db"`
Database map[string]database `json:"database"`
}
func configOpen(config string) io.Reader {
var fileName string
switch {
case config != "":
fileName = config
case runtime.GOOS == "windows":
fileName = filepath.Join(os.Getenv("APPDATA"), trdsql.AppName, "config.json")
default:
fileName = filepath.Join(os.Getenv("HOME"), ".config", trdsql.AppName, "config.json")
}
cfg, err := os.Open(fileName)
if err != nil {
if Debug {
log.Printf("configOpen: %s", err.Error())
}
return nil
}
if Debug {
log.Printf("config found: %s", fileName)
}
return cfg
}
func loadConfig(conf io.Reader) (*config, error) {
var cfg config
if conf == nil {
return &cfg, ErrNoFile
}
err := json.NewDecoder(conf).Decode(&cfg)
if err != nil {
return &cfg, fmt.Errorf("config error: %w", err)
}
return &cfg, nil
}
|
[
"\"APPDATA\"",
"\"HOME\""
] |
[] |
[
"APPDATA",
"HOME"
] |
[]
|
["APPDATA", "HOME"]
|
go
| 2 | 0 | |
idm_lp/rules.py
|
import re
from typing import List, Union
from vkbottle.rule import AbstractMessageRule, Message
from idm_lp.database import Database
class DeleteNotifyRule(AbstractMessageRule):
notify_all_words = [
'all',
'online',
'here',
'everyone',
'все',
'онлайн',
'здесь',
'тут',
]
async def check(self, message: Message) -> bool:
db = Database.get_current()
if not db.delete_all_notify:
return False
if any([f"@{i}" in message.text.lower() for i in self.notify_all_words]):
return True
return False
class ChatEnterRule(AbstractMessageRule):
async def check(self, message: Message) -> bool:
db = Database.get_current()
for chat_enter_model in db.add_to_friends_on_chat_enter:
if chat_enter_model.peer_id == message.peer_id:
return True
return False
class IgnoredMembersRule(AbstractMessageRule):
async def check(self, message: Message) -> bool:
db = Database.get_current()
for ignore_member in db.ignored_members:
if ignore_member.chat_id == message.peer_id and ignore_member.member_id == message.from_id:
return True
return False
class IgnoredGlobalMembersRule(AbstractMessageRule):
async def check(self, message: Message) -> bool:
db = Database.get_current()
for ignore_member in db.ignored_global_members:
if ignore_member.member_id == message.from_id:
return True
return False
class MutedMembersRule(AbstractMessageRule):
async def check(self, message: Message):
db = Database.get_current()
for muted_member in db.muted_members:
if muted_member.chat_id == message.peer_id and muted_member.member_id == message.from_id:
return dict(member=muted_member)
return False
class SlouMoRule(AbstractMessageRule):
async def check(self, message: Message) -> bool:
db = Database.get_current()
for slou in db.sloumo:
if slou.chat_id == message.chat_id:
return True
return False
class TrustedRule(AbstractMessageRule):
async def check(self, message: Message) -> bool:
db = Database.get_current()
for trusted in db.trusted:
if trusted.user_id == message.from_id:
return True
return False
class RegexDeleter(AbstractMessageRule):
async def check(self, message: Message) -> bool:
db = Database.get_current()
for regex_del in db.regex_deleter:
if regex_del.chat_id == message.peer_id:
if re.findall(regex_del.regex, message.text):
return True
return False
class ContainsRule(AbstractMessageRule):
not_include: List[str]
def __init__(self, words: Union[str, List[str]], not_include: List[str] = None, upper: bool = True):
if not_include is None:
not_include = []
self.words = words if isinstance(words, list) else [words]
self.not_include = not_include if isinstance(not_include, list) else [not_include]
self.upper = upper
async def check(self, message: Message) -> bool:
checked = False
for word in self.words:
if word.upper() in message.text.upper():
checked = True
for ni_word in self.not_include:
if ni_word.upper() in message.text.upper():
checked = False
return checked
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
pycocosn/defaults.py
|
"""
"""
import os
import astropy.units as u
import matplotlib.pyplot as plt
__all__ = ["_default_data_dir_path",
"_default_filter_dir_path",
"_default_coco_dir_path",
"_default_recon_dir_path",
"_default_specphase_dir_path",
"_default_sn_dist_path",
"_default_lcsim_path",
"_default_list_dir_path",
"_colourmap_name",
"_spec_colourmap_name",
"spec_colourmap",
"_colour_upper_lambda_limit",
"_colour_lower_lambda_limit",
"_default_info_path",
"_default_kcorr_data_path",
"_default_lsst_throughputs_path"]
## Important variables
COCO_ROOT_DIR = os.environ["COCO_ROOT_DIR"]
LSST_THROUGHPUTS_ROOT = os.environ["LSST_THROUGHPUTS"]
SFD_DIR = os.environ["SFD_DIR"]
_default_list_dir_path = os.path.join(COCO_ROOT_DIR, "lists/")
_default_coco_dir_path = os.path.join(COCO_ROOT_DIR)
if "PYCOCO_DATA_DIR" in os.environ:
_default_data_dir_path = os.environ["PYCOCO_DATA_DIR"]
else:
_default_data_dir_path = os.path.join(COCO_ROOT_DIR, "data/")
_default_filter_dir_path = os.path.join(COCO_ROOT_DIR, "data/filters/")
_default_recon_dir_path = os.path.join(COCO_ROOT_DIR, "recon/")
_default_specphase_dir_path = os.path.join(COCO_ROOT_DIR, "spectra/")
_default_sn_dist_path = os.path.join(COCO_ROOT_DIR, "sndist.list")
_default_lcsim_path = os.path.join(COCO_ROOT_DIR, "sim/")
_default_info_path = os.path.join(_default_data_dir_path, "info/info.dat")
_default_kcorr_data_path = os.path.join(os.path.abspath(os.path.join(__file__, os.pardir)), 'kcorr_data/')
_default_lsst_throughputs_path = os.path.abspath(LSST_THROUGHPUTS_ROOT)
if "LSST_THROUGHPUTS_BASELINE" in os.environ:
_default_lsst_throughputs_baseline_path = os.environ["LSST_THROUGHPUTS_BASELINE"]
else:
_default_lsst_throughputs_baseline_path = os.path.join(os.environ["LSST_THROUGHPUTS_BASELINE"], "baseline/")
_default_dust_dir = os.path.abspath(SFD_DIR)
# _colormap_name = 'jet'
# _colourmap_name = 'rainbow'
_spec_colourmap_name = 'viridis'
# _spec_colourmap_name = 'plasma'
# _spec_colourmap_name = 'jet'
_colourmap_name = 'plasma'
colourmap = plt.get_cmap(_colourmap_name)
spec_colourmap = plt.get_cmap(_spec_colourmap_name)
_colour_upper_lambda_limit = 11000 * u.angstrom
_colour_lower_lambda_limit = 3500 * u.angstrom
|
[] |
[] |
[
"COCO_ROOT_DIR",
"PYCOCO_DATA_DIR",
"SFD_DIR",
"LSST_THROUGHPUTS_BASELINE",
"LSST_THROUGHPUTS"
] |
[]
|
["COCO_ROOT_DIR", "PYCOCO_DATA_DIR", "SFD_DIR", "LSST_THROUGHPUTS_BASELINE", "LSST_THROUGHPUTS"]
|
python
| 5 | 0 | |
pkg/query-service/app/server.go
|
package app
import (
"context"
"fmt"
"net"
"net/http"
"os"
"time"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"github.com/rs/cors"
"github.com/soheilhy/cmux"
"go.signoz.io/query-service/app/clickhouseReader"
"go.signoz.io/query-service/app/dashboards"
"go.signoz.io/query-service/constants"
"go.signoz.io/query-service/dao"
"go.signoz.io/query-service/healthcheck"
"go.signoz.io/query-service/telemetry"
"go.signoz.io/query-service/utils"
"go.uber.org/zap"
)
type ServerOptions struct {
HTTPHostPort string
// DruidClientUrl string
}
// Server runs HTTP, Mux and a grpc server
type Server struct {
// logger *zap.Logger
// querySvc *querysvc.QueryService
// queryOptions *QueryOptions
// tracer opentracing.Tracer // TODO make part of flags.Service
serverOptions *ServerOptions
conn net.Listener
// grpcConn net.Listener
httpConn net.Listener
// grpcServer *grpc.Server
httpServer *http.Server
separatePorts bool
unavailableChannel chan healthcheck.Status
}
// HealthCheckStatus returns health check status channel a client can subscribe to
func (s Server) HealthCheckStatus() chan healthcheck.Status {
return s.unavailableChannel
}
// NewServer creates and initializes Server
// func NewServer(logger *zap.Logger, querySvc *querysvc.QueryService, options *QueryOptions, tracer opentracing.Tracer) (*Server, error) {
func NewServer(serverOptions *ServerOptions) (*Server, error) {
// _, httpPort, err := net.SplitHostPort(serverOptions.HTTPHostPort)
// if err != nil {
// return nil, err
// }
// _, grpcPort, err := net.SplitHostPort(options.GRPCHostPort)
// if err != nil {
// return nil, err
// }
// grpcServer, err := createGRPCServer(querySvc, options, logger, tracer)
// if err != nil {
// return nil, err
// }
if err := dao.InitDao("sqlite", constants.RELATIONAL_DATASOURCE_PATH); err != nil {
return nil, err
}
s := &Server{
// logger: logger,
// querySvc: querySvc,
// queryOptions: options,
// tracer: tracer,
// grpcServer: grpcServer,
serverOptions: serverOptions,
separatePorts: true,
// separatePorts: grpcPort != httpPort,
unavailableChannel: make(chan healthcheck.Status),
}
httpServer, err := s.createHTTPServer()
if err != nil {
return nil, err
}
s.httpServer = httpServer
return s, nil
}
func (s *Server) createHTTPServer() (*http.Server, error) {
localDB, err := dashboards.InitDB(constants.RELATIONAL_DATASOURCE_PATH)
if err != nil {
return nil, err
}
localDB.SetMaxOpenConns(10)
var reader Reader
storage := os.Getenv("STORAGE")
if storage == "druid" {
zap.S().Info("Using Apache Druid as datastore ...")
// reader = druidReader.NewReader(localDB)
} else if storage == "clickhouse" {
zap.S().Info("Using ClickHouse as datastore ...")
clickhouseReader := clickhouseReader.NewReader(localDB)
go clickhouseReader.Start()
reader = clickhouseReader
} else {
return nil, fmt.Errorf("Storage type: %s is not supported in query service", storage)
}
apiHandler, err := NewAPIHandler(&reader, dao.DB())
if err != nil {
return nil, err
}
r := NewRouter()
r.Use(setTimeoutMiddleware)
r.Use(s.analyticsMiddleware)
r.Use(loggingMiddleware)
apiHandler.RegisterRoutes(r)
apiHandler.RegisterMetricsRoutes(r)
c := cors.New(cors.Options{
AllowedOrigins: []string{"*"},
// AllowCredentials: true,
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT"},
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type"},
})
handler := c.Handler(r)
// var handler http.Handler = r
handler = handlers.CompressHandler(handler)
return &http.Server{
Handler: handler,
}, nil
}
func loggingMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
route := mux.CurrentRoute(r)
path, _ := route.GetPathTemplate()
startTime := time.Now()
next.ServeHTTP(w, r)
zap.S().Info(path, "\ttimeTaken: ", time.Now().Sub(startTime))
})
}
func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
route := mux.CurrentRoute(r)
path, _ := route.GetPathTemplate()
data := map[string]interface{}{"path": path}
if _, ok := telemetry.IgnoredPaths()[path]; !ok {
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data)
}
next.ServeHTTP(w, r)
})
}
func setTimeoutMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), constants.ContextTimeout*time.Second)
defer cancel()
r = r.WithContext(ctx)
next.ServeHTTP(w, r)
})
}
// initListener initialises listeners of the server
func (s *Server) initListener() (cmux.CMux, error) {
if s.separatePorts { // use separate ports and listeners each for gRPC and HTTP requests
var err error
// s.grpcConn, err = net.Listen("tcp", s.queryOptions.GRPCHostPort)
// if err != nil {
// return nil, err
// }
s.httpConn, err = net.Listen("tcp", s.serverOptions.HTTPHostPort)
if err != nil {
return nil, err
}
zap.S().Info("Query server started ...")
return nil, nil
}
// // old behavior using cmux
// conn, err := net.Listen("tcp", s.queryOptions.HostPort)
// if err != nil {
// return nil, err
// }
// s.conn = conn
// var tcpPort int
// if port, err := netutils
// utils.GetPort(s.conn.Addr()); err == nil {
// tcpPort = port
// }
// zap.S().Info(
// "Query server started",
// zap.Int("port", tcpPort),
// zap.String("addr", s.queryOptions.HostPort))
// // cmux server acts as a reverse-proxy between HTTP and GRPC backends.
// cmuxServer := cmux.New(s.conn)
// s.grpcConn = cmuxServer.MatchWithWriters(
// cmux.HTTP2MatchHeaderFieldSendSettings("content-type", "application/grpc"),
// cmux.HTTP2MatchHeaderFieldSendSettings("content-type", "application/grpc+proto"),
// )
// s.httpConn = cmuxServer.Match(cmux.Any())
// s.queryOptions.HTTPHostPort = s.queryOptions.HostPort
// s.queryOptions.GRPCHostPort = s.queryOptions.HostPort
return nil, nil
}
// Start http, GRPC and cmux servers concurrently
func (s *Server) Start() error {
_, err := s.initListener()
if err != nil {
return err
}
var httpPort int
if port, err := utils.GetPort(s.httpConn.Addr()); err == nil {
httpPort = port
}
go func() {
zap.S().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort))
switch err := s.httpServer.Serve(s.httpConn); err {
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
// normal exit, nothing to do
default:
zap.S().Error("Could not start HTTP server", zap.Error(err))
}
s.unavailableChannel <- healthcheck.Unavailable
}()
return nil
}
|
[
"\"STORAGE\""
] |
[] |
[
"STORAGE"
] |
[]
|
["STORAGE"]
|
go
| 1 | 0 | |
src/main/java/walaniam/hackerrank/TowerBreakers.java
|
package walaniam.hackerrank;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.StringReader;
import java.util.stream.IntStream;
public class TowerBreakers {
public static int towerBreakers(int n, int m) {
if (m == 1) {
return 2;
}
return n % 2 == 0 ? 2 : 1;
}
public static void main(String[] args) throws IOException {
// assertThat(towerBreakers(2, 6)).isEqualTo(2);
// assertThat(towerBreakers(601251, 142899)).isEqualTo(1);
// assertThat(towerBreakers(926797, 842282)).isEqualTo(1);
String testcases = """
90
100000 1
100001 1
56389 75377
864918 691871
813085 534467
999943 750080
996862 913372
555557 585282
769161 726285
944979 757252
849839 377936
247404 241140
450646 205092
129189 251500
954266 274794
606312 207275
228695 878419
671852 757170
618268 46908
358244 268734
113584 22190
671725 498278
520425 476318
772493 831559
520281 307847
852374 816570
552032 968192
561065 88429
876852 791997
403574 590089
134046 480155
28790 420631
755308 784846
620450 639506
704239 805227
213013 903355
136403 617403
14548 980684
350667 608225
590051 636788
392333 554941
437574 91023
904363 726561
348334 547570
514106 451013
783830 910677
396633 298027
622227 523721
862558 697800
949735 796652
147107 459451
926797 842282
492228 769091
258303 66251
459240 45872
980254 620946
492730 347492
328826 209178
633544 579781
240200 341641
75881 537385
128909 460223
128075 584898
151937 400391
138859 697825
641020 180108
181922 696659
345746 411754
896991 874515
474069 515353
667709 973330
172359 602071
192333 223900
40878 821976
168974 345161
278654 347698
177051 31812
88723 548839
120664 534544
460883 356072
206381 894419
364352 128778
503531 330174
690551 321656
39321 92312
799591 481254
628042 687940
81778 511773
873776 157014
921080 377371
""";
BufferedReader bufferedReader = new BufferedReader(new StringReader(testcases));
//BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
int t = Integer.parseInt(bufferedReader.readLine().trim());
IntStream.range(0, t).forEach(tItr -> {
try {
String[] firstMultipleInput = bufferedReader.readLine().replaceAll("\\s+$", "").split(" ");
int n = Integer.parseInt(firstMultipleInput[0]);
int m = Integer.parseInt(firstMultipleInput[1]);
int result = towerBreakers(n, m);
// bufferedWriter.write(String.valueOf(result));
// bufferedWriter.newLine();
} catch (IOException ex) {
throw new RuntimeException(ex);
}
});
bufferedReader.close();
// bufferedWriter.close();
}
}
|
[
"\"OUTPUT_PATH\""
] |
[] |
[
"OUTPUT_PATH"
] |
[]
|
["OUTPUT_PATH"]
|
java
| 1 | 0 | |
config/wsgi.py
|
"""
WSGI config for Boats & Joy project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.production')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
storage/storage_test.go
|
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/httptest"
"net/url"
"os"
"reflect"
"regexp"
"sort"
"strings"
"testing"
"time"
"cloud.google.com/go/iam"
"cloud.google.com/go/internal/testutil"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
raw "google.golang.org/api/storage/v1"
)
func TestV2HeaderSanitization(t *testing.T) {
t.Parallel()
var tests = []struct {
desc string
in []string
want []string
}{
{
desc: "already sanitized headers should not be modified",
in: []string{"x-goog-header1:true", "x-goog-header2:0"},
want: []string{"x-goog-header1:true", "x-goog-header2:0"},
},
{
desc: "sanitized headers should be sorted",
in: []string{"x-goog-header2:0", "x-goog-header1:true"},
want: []string{"x-goog-header1:true", "x-goog-header2:0"},
},
{
desc: "non-canonical headers should be removed",
in: []string{"x-goog-header1:true", "x-goog-no-value", "non-canonical-header:not-of-use"},
want: []string{"x-goog-header1:true"},
},
{
desc: "excluded canonical headers should be removed",
in: []string{"x-goog-header1:true", "x-goog-encryption-key:my_key", "x-goog-encryption-key-sha256:my_sha256"},
want: []string{"x-goog-header1:true"},
},
{
desc: "dirty headers should be formatted correctly",
in: []string{" x-goog-header1 : \textra-spaces ", "X-Goog-Header2:CamelCaseValue"},
want: []string{"x-goog-header1:extra-spaces", "x-goog-header2:CamelCaseValue"},
},
{
desc: "duplicate headers should be merged",
in: []string{"x-goog-header1:value1", "X-Goog-Header1:value2"},
want: []string{"x-goog-header1:value1,value2"},
},
}
for _, test := range tests {
got := v2SanitizeHeaders(test.in)
if !testutil.Equal(got, test.want) {
t.Errorf("%s: got %v, want %v", test.desc, got, test.want)
}
}
}
func TestV4HeaderSanitization(t *testing.T) {
t.Parallel()
var tests = []struct {
desc string
in []string
want []string
}{
{
desc: "already sanitized headers should not be modified",
in: []string{"x-goog-header1:true", "x-goog-header2:0"},
want: []string{"x-goog-header1:true", "x-goog-header2:0"},
},
{
desc: "dirty headers should be formatted correctly",
in: []string{" x-goog-header1 : \textra-spaces ", "X-Goog-Header2:CamelCaseValue"},
want: []string{"x-goog-header1:extra-spaces", "x-goog-header2:CamelCaseValue"},
},
{
desc: "duplicate headers should be merged",
in: []string{"x-goog-header1:value1", "X-Goog-Header1:value2"},
want: []string{"x-goog-header1:value1,value2"},
},
{
desc: "multiple spaces in value are stripped down to one",
in: []string{"foo:bar gaz"},
want: []string{"foo:bar gaz"},
},
}
for _, test := range tests {
got := v4SanitizeHeaders(test.in)
sort.Strings(got)
sort.Strings(test.want)
if !testutil.Equal(got, test.want) {
t.Errorf("%s: got %v, want %v", test.desc, got, test.want)
}
}
}
func TestSignedURLV2(t *testing.T) {
expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00")
tests := []struct {
desc string
objectName string
opts *SignedURLOptions
want string
}{
{
desc: "SignedURLV2 works",
objectName: "object-name",
opts: &SignedURLOptions{
GoogleAccessID: "xxx@clientid",
PrivateKey: dummyKey("rsa"),
Method: "GET",
MD5: "ICy5YqxZB1uWSwcVLSNLcA==",
Expires: expires,
ContentType: "application/json",
Headers: []string{"x-goog-header1:true", "x-goog-header2:false"},
},
want: "https://storage.googleapis.com/bucket-name/object-name?" +
"Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" +
"RfsHlPtbB2JUYjzCgNr2Mi%2BjggdEuL1V7E6N9o6aaqwVLBDuTv3I0%2B9" +
"x94E6rmmr%2FVgnmZigkIUxX%2Blfl7LgKf30uPGLt0mjKGH2p7r9ey1ONJ" +
"%2BhVec23FnTRcSgopglvHPuCMWU2oNJE%2F1y8EwWE27baHrG1RhRHbLVF" +
"bPpLZ9xTRFK20pluIkfHV00JGljB1imqQHXM%2B2XPWqBngLr%2FwqxLN7i" +
"FcUiqR8xQEOHF%2F2e7fbkTHPNq4TazaLZ8X0eZ3eFdJ55A5QmNi8atlN4W" +
"5q7Hvs0jcxElG3yqIbx439A995BkspLiAcA%2Fo4%2BxAwEMkGLICdbvakq" +
"3eEprNCojw%3D%3D",
},
{
desc: "With a PEM Private Key",
objectName: "object-name",
opts: &SignedURLOptions{
GoogleAccessID: "xxx@clientid",
PrivateKey: dummyKey("pem"),
Method: "GET",
MD5: "ICy5YqxZB1uWSwcVLSNLcA==",
Expires: expires,
ContentType: "application/json",
Headers: []string{"x-goog-header1:true", "x-goog-header2:false"},
},
want: "https://storage.googleapis.com/bucket-name/object-name?" +
"Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" +
"TiyKD%2FgGb6Kh0kkb2iF%2FfF%2BnTx7L0J4YiZua8AcTmnidutePEGIU5" +
"NULYlrGl6l52gz4zqFb3VFfIRTcPXMdXnnFdMCDhz2QuJBUpsU1Ai9zlyTQ" +
"dkb6ShG03xz9%2BEXWAUQO4GBybJw%2FULASuv37xA00SwLdkqj8YdyS5II" +
"1lro%3D",
},
{
desc: "With custom SignBytes",
objectName: "object-name",
opts: &SignedURLOptions{
GoogleAccessID: "xxx@clientid",
SignBytes: func(b []byte) ([]byte, error) {
return []byte("signed"), nil
},
Method: "GET",
MD5: "ICy5YqxZB1uWSwcVLSNLcA==",
Expires: expires,
ContentType: "application/json",
Headers: []string{"x-goog-header1:true", "x-goog-header2:false"},
},
want: "https://storage.googleapis.com/bucket-name/object-name?" +
"Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" +
"c2lnbmVk", // base64('signed') == 'c2lnbmVk'
},
{
desc: "With unsafe object name",
objectName: "object name界",
opts: &SignedURLOptions{
GoogleAccessID: "xxx@clientid",
PrivateKey: dummyKey("pem"),
Method: "GET",
MD5: "ICy5YqxZB1uWSwcVLSNLcA==",
Expires: expires,
ContentType: "application/json",
Headers: []string{"x-goog-header1:true", "x-goog-header2:false"},
},
want: "https://storage.googleapis.com/bucket-name/object%20name%E7%95%8C?" +
"Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=bxVH1%2Bl%2" +
"BSxpnj3XuqKz6mOFk6M94Y%2B4w85J6FCmJan%2FNhGSpndP6fAw1uLHlOn%2F8xUaY%2F" +
"SfZ5GzcQ%2BbxOL1WA37yIwZ7xgLYlO%2ByAi3GuqMUmHZiNCai28emODXQ8RtWHvgv6dE" +
"SQ%2F0KpDMIWW7rYCaUa63UkUyeSQsKhrVqkIA%3D",
},
}
for _, test := range tests {
u, err := SignedURL("bucket-name", test.objectName, test.opts)
if err != nil {
t.Fatalf("[%s] %v", test.desc, err)
}
if u != test.want {
t.Fatalf("[%s] Unexpected signed URL; found %v", test.desc, u)
}
}
}
func TestSignedURLV4(t *testing.T) {
expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00")
tests := []struct {
desc string
objectName string
now time.Time
opts *SignedURLOptions
// Note for future implementors: X-Goog-Signature generated by having
// the client run through its algorithm with pre-defined input and copy
// pasting the output. These tests are not great for testing whether
// the right signature is calculated - instead we rely on the backend
// and integration tests for that.
want string
}{
{
desc: "SignURLV4 works",
objectName: "object-name",
now: expires.Add(-24 * time.Hour),
opts: &SignedURLOptions{
GoogleAccessID: "xxx@clientid",
PrivateKey: dummyKey("rsa"),
Method: "POST",
Expires: expires,
Scheme: SigningSchemeV4,
ContentType: "application/json",
MD5: "ICy5YqxZB1uWSwcVLSNLcA==",
Headers: []string{"x-goog-header1:true", "x-goog-header2:false"},
},
want: "https://storage.googleapis.com/bucket-name/object-name" +
"?X-Goog-Algorithm=GOOG4-RSA-SHA256" +
"&X-Goog-Credential=xxx%40clientid%2F20021001%2Fauto%2Fstorage%2Fgoog4_request" +
"&X-Goog-Date=20021001T100000Z&X-Goog-Expires=86400" +
"&X-Goog-Signature=774b11d89663d0562b0909131b8495e70d24e31f3417d3f8fd1438a72b620b256111a7221fecab14a6ebb7dc7eed7984316a794789beb4ecdda67a77407f6de1a68113e8fa2b885e330036a995c08f0f2a7d2c212a3d0a2fd1b392d40305d3fe31ab94c547a7541278f4a956ebb6565ebe4cb27f26e30b334adb7b065adc0d27f9eaa42ee76d75d673fc4523d023d9a636de0b5329f5dffbf80024cf21fdc6236e89aa41976572bfe4807be9a9a01f644ed9f546dcf1e0394665be7610f58c36b3d63379f4d1b64f646f7427f1fc55bb89d7fdd59017d007156c99e26440e828581cddf83faf03e739e5987c062d503f2b73f24049c25edc60ecbbc09f6ce945" +
"&X-Goog-SignedHeaders=content-md5%3Bcontent-type%3Bhost%3Bx-goog-header1%3Bx-goog-header2",
},
{
desc: "With PEM Private Key",
objectName: "object-name",
now: expires.Add(-24 * time.Hour),
opts: &SignedURLOptions{
GoogleAccessID: "xxx@clientid",
PrivateKey: dummyKey("pem"),
Method: "GET",
Expires: expires,
Scheme: SigningSchemeV4,
},
want: "https://storage.googleapis.com/bucket-name/object-name" +
"?X-Goog-Algorithm=GOOG4-RSA-SHA256" +
"&X-Goog-Credential=xxx%40clientid%2F20021001%2Fauto%2Fstorage%2Fgoog4_request" +
"&X-Goog-Date=20021001T100000Z&X-Goog-Expires=86400" +
"&X-Goog-Signature=5592f4b8b2cae14025b619546d69bb463ca8f2caaab538a3cc6b5868c8c64b83a8b04b57d8a82c8696a192f62abddc8d99e0454b3fc33feac5bf87c353f0703aab6cfee60364aaeecec2edd37c1d6e6793d90812b5811b7936a014a3efad5d08477b4fbfaebf04fa61f1ca03f31bcdc46a161868cd2f4e98def6c82634a01454" +
"&X-Goog-SignedHeaders=host",
},
{
desc: "Unsafe object name",
objectName: "object name界",
now: expires.Add(-24 * time.Hour),
opts: &SignedURLOptions{
GoogleAccessID: "xxx@clientid",
PrivateKey: dummyKey("pem"),
Method: "GET",
Expires: expires,
Scheme: SigningSchemeV4,
},
want: "https://storage.googleapis.com/bucket-name/object%20name%E7%95%8C" +
"?X-Goog-Algorithm=GOOG4-RSA-SHA256" +
"&X-Goog-Credential=xxx%40clientid%2F20021001%2Fauto%2Fstorage%2Fgoog4_request" +
"&X-Goog-Date=20021001T100000Z&X-Goog-Expires=86400" +
"&X-Goog-Signature=90fd455fb47725b45c08d65ddf99078184710ad30f09bc2a190c5416ba1596e4c58420e2e48744b03de2d1b85dc8679dcb4c36af6e7a1b2547cd62becaad72aebbbaf7c1686f1aa0fedf8a9b01cef20a8b8630d824a6f8b81bb9eb75f342a7d8a28457a4efd2baac93e37089b84b1506b2af72712187f638e0eafbac650b071a" +
"&X-Goog-SignedHeaders=host",
},
{
desc: "With custom SignBytes",
objectName: "object-name",
now: expires.Add(-24 * time.Hour),
opts: &SignedURLOptions{
GoogleAccessID: "xxx@clientid",
SignBytes: func(b []byte) ([]byte, error) {
return []byte("signed"), nil
},
Method: "GET",
Expires: expires,
Scheme: SigningSchemeV4,
},
want: "https://storage.googleapis.com/bucket-name/object-name" +
"?X-Goog-Algorithm=GOOG4-RSA-SHA256" +
"&X-Goog-Credential=xxx%40clientid%2F20021001%2Fauto%2Fstorage%2Fgoog4_request" +
"&X-Goog-Date=20021001T100000Z&X-Goog-Expires=86400" +
"&X-Goog-Signature=7369676e6564" + // hex('signed') = '7369676e6564'
"&X-Goog-SignedHeaders=host",
},
}
oldUTCNow := utcNow
defer func() {
utcNow = oldUTCNow
}()
for _, test := range tests {
t.Logf("Testcase: '%s'", test.desc)
utcNow = func() time.Time {
return test.now
}
got, err := SignedURL("bucket-name", test.objectName, test.opts)
if err != nil {
t.Fatal(err)
}
if got != test.want {
t.Fatalf("\n\tgot:\t%v\n\twant:\t%v", got, test.want)
}
}
}
func TestSignedURL_MissingOptions(t *testing.T) {
now, _ := time.Parse(time.RFC3339, "2002-10-01T00:00:00-05:00")
expires, _ := time.Parse(time.RFC3339, "2002-10-15T00:00:00-05:00")
pk := dummyKey("rsa")
var tests = []struct {
opts *SignedURLOptions
errMsg string
}{
{
&SignedURLOptions{},
"missing required GoogleAccessID",
},
{
&SignedURLOptions{GoogleAccessID: "access_id"},
"exactly one of PrivateKey or SignedBytes must be set",
},
{
&SignedURLOptions{
GoogleAccessID: "access_id",
SignBytes: func(b []byte) ([]byte, error) { return b, nil },
PrivateKey: pk,
},
"exactly one of PrivateKey or SignedBytes must be set",
},
{
&SignedURLOptions{
GoogleAccessID: "access_id",
PrivateKey: pk,
},
errMethodNotValid.Error(),
},
{
&SignedURLOptions{
GoogleAccessID: "access_id",
PrivateKey: pk,
Method: "getMethod", // wrong method name
},
errMethodNotValid.Error(),
},
{
&SignedURLOptions{
GoogleAccessID: "access_id",
PrivateKey: pk,
Method: "get", // name will be uppercased
},
"missing required expires",
},
{
&SignedURLOptions{
GoogleAccessID: "access_id",
SignBytes: func(b []byte) ([]byte, error) { return b, nil },
},
errMethodNotValid.Error(),
},
{
&SignedURLOptions{
GoogleAccessID: "access_id",
PrivateKey: pk,
Method: "PUT",
},
"missing required expires",
},
{
&SignedURLOptions{
GoogleAccessID: "access_id",
PrivateKey: pk,
Method: "PUT",
Expires: expires,
MD5: "invalid",
},
"invalid MD5 checksum",
},
// SigningSchemeV4 tests
{
&SignedURLOptions{
PrivateKey: pk,
Method: "GET",
Expires: expires,
Scheme: SigningSchemeV4,
},
"missing required GoogleAccessID",
},
{
&SignedURLOptions{
GoogleAccessID: "access_id",
Method: "GET",
Expires: expires,
SignBytes: func(b []byte) ([]byte, error) { return b, nil },
PrivateKey: pk,
Scheme: SigningSchemeV4,
},
"exactly one of PrivateKey or SignedBytes must be set",
},
{
&SignedURLOptions{
GoogleAccessID: "access_id",
PrivateKey: pk,
Expires: expires,
Scheme: SigningSchemeV4,
},
errMethodNotValid.Error(),
},
{
&SignedURLOptions{
GoogleAccessID: "access_id",
PrivateKey: pk,
Method: "PUT",
Scheme: SigningSchemeV4,
},
"missing required expires",
},
{
&SignedURLOptions{
GoogleAccessID: "access_id",
PrivateKey: pk,
Method: "PUT",
Expires: now.Add(time.Hour),
MD5: "invalid",
Scheme: SigningSchemeV4,
},
"invalid MD5 checksum",
},
{
&SignedURLOptions{
GoogleAccessID: "access_id",
PrivateKey: pk,
Method: "GET",
Expires: expires,
Scheme: SigningSchemeV4,
},
"expires must be within seven days from now",
},
{
&SignedURLOptions{
GoogleAccessID: "access_id",
PrivateKey: pk,
Method: "GET",
Expires: now.Add(time.Hour),
Scheme: SigningSchemeV2,
Style: VirtualHostedStyle(),
},
"are permitted with SigningSchemeV2",
},
}
oldUTCNow := utcNow
defer func() {
utcNow = oldUTCNow
}()
utcNow = func() time.Time {
return now
}
for _, test := range tests {
_, err := SignedURL("bucket", "name", test.opts)
if !strings.Contains(err.Error(), test.errMsg) {
t.Errorf("expected err: %v, found: %v", test.errMsg, err)
}
}
}
func TestPathEncodeV4(t *testing.T) {
tests := []struct {
input string
want string
}{
{
"path/with/slashes",
"path/with/slashes",
},
{
"path/with/speci@lchar$&",
"path/with/speci%40lchar%24%26",
},
{
"path/with/un_ersc_re/~tilde/sp ace/",
"path/with/un_ersc_re/~tilde/sp%20%20ace/",
},
}
for _, test := range tests {
if got := pathEncodeV4(test.input); got != test.want {
t.Errorf("pathEncodeV4(%q) = %q, want %q", test.input, got, test.want)
}
}
}
func dummyKey(kind string) []byte {
slurp, err := ioutil.ReadFile(fmt.Sprintf("./internal/test/dummy_%s", kind))
if err != nil {
log.Fatal(err)
}
return slurp
}
func TestObjectNames(t *testing.T) {
t.Parallel()
// Naming requirements: https://cloud.google.com/storage/docs/bucket-naming
const maxLegalLength = 1024
type testT struct {
name, want string
}
tests := []testT{
// Embedded characters important in URLs.
{"foo % bar", "foo%20%25%20bar"},
{"foo ? bar", "foo%20%3F%20bar"},
{"foo / bar", "foo%20/%20bar"},
{"foo %?/ bar", "foo%20%25%3F/%20bar"},
// Non-Roman scripts
{"타코", "%ED%83%80%EC%BD%94"},
{"世界", "%E4%B8%96%E7%95%8C"},
// Longest legal name
{strings.Repeat("a", maxLegalLength), strings.Repeat("a", maxLegalLength)},
// Line terminators besides CR and LF: https://en.wikipedia.org/wiki/Newline#Unicode
{"foo \u000b bar", "foo%20%0B%20bar"},
{"foo \u000c bar", "foo%20%0C%20bar"},
{"foo \u0085 bar", "foo%20%C2%85%20bar"},
{"foo \u2028 bar", "foo%20%E2%80%A8%20bar"},
{"foo \u2029 bar", "foo%20%E2%80%A9%20bar"},
// Null byte.
{"foo \u0000 bar", "foo%20%00%20bar"},
// Non-control characters that are discouraged, but not forbidden, according to the documentation.
{"foo # bar", "foo%20%23%20bar"},
{"foo []*? bar", "foo%20%5B%5D%2A%3F%20bar"},
// Angstrom symbol singleton and normalized forms: http://unicode.org/reports/tr15/
{"foo \u212b bar", "foo%20%E2%84%AB%20bar"},
{"foo \u0041\u030a bar", "foo%20A%CC%8A%20bar"},
{"foo \u00c5 bar", "foo%20%C3%85%20bar"},
// Hangul separating jamo: http://www.unicode.org/versions/Unicode7.0.0/ch18.pdf (Table 18-10)
{"foo \u3131\u314f bar", "foo%20%E3%84%B1%E3%85%8F%20bar"},
{"foo \u1100\u1161 bar", "foo%20%E1%84%80%E1%85%A1%20bar"},
{"foo \uac00 bar", "foo%20%EA%B0%80%20bar"},
}
// C0 control characters not forbidden by the docs.
var runes []rune
for r := rune(0x01); r <= rune(0x1f); r++ {
if r != '\u000a' && r != '\u000d' {
runes = append(runes, r)
}
}
tests = append(tests, testT{fmt.Sprintf("foo %s bar", string(runes)), "foo%20%01%02%03%04%05%06%07%08%09%0B%0C%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F%20bar"})
// C1 control characters, plus DEL.
runes = nil
for r := rune(0x7f); r <= rune(0x9f); r++ {
runes = append(runes, r)
}
tests = append(tests, testT{fmt.Sprintf("foo %s bar", string(runes)), "foo%20%7F%C2%80%C2%81%C2%82%C2%83%C2%84%C2%85%C2%86%C2%87%C2%88%C2%89%C2%8A%C2%8B%C2%8C%C2%8D%C2%8E%C2%8F%C2%90%C2%91%C2%92%C2%93%C2%94%C2%95%C2%96%C2%97%C2%98%C2%99%C2%9A%C2%9B%C2%9C%C2%9D%C2%9E%C2%9F%20bar"})
opts := &SignedURLOptions{
GoogleAccessID: "xxx@clientid",
PrivateKey: dummyKey("rsa"),
Method: "GET",
MD5: "ICy5YqxZB1uWSwcVLSNLcA==",
Expires: time.Date(2002, time.October, 2, 10, 0, 0, 0, time.UTC),
ContentType: "application/json",
Headers: []string{"x-goog-header1", "x-goog-header2"},
}
for _, test := range tests {
g, err := SignedURL("bucket-name", test.name, opts)
if err != nil {
t.Errorf("SignedURL(%q) err=%v, want nil", test.name, err)
}
if w := "/bucket-name/" + test.want; !strings.Contains(g, w) {
t.Errorf("SignedURL(%q)=%q, want substring %q", test.name, g, w)
}
}
}
func TestCondition(t *testing.T) {
t.Parallel()
gotReq := make(chan *http.Request, 1)
hc, close := newTestServer(func(w http.ResponseWriter, r *http.Request) {
io.Copy(ioutil.Discard, r.Body)
gotReq <- r
w.WriteHeader(200)
})
defer close()
ctx := context.Background()
c, err := NewClient(ctx, option.WithHTTPClient(hc))
if err != nil {
t.Fatal(err)
}
obj := c.Bucket("buck").Object("obj")
dst := c.Bucket("dstbuck").Object("dst")
tests := []struct {
fn func() error
want string
}{
{
func() error {
_, err := obj.Generation(1234).NewReader(ctx)
return err
},
"GET /buck/obj?generation=1234",
},
{
func() error {
_, err := obj.If(Conditions{GenerationMatch: 1234}).NewReader(ctx)
return err
},
"GET /buck/obj?ifGenerationMatch=1234",
},
{
func() error {
_, err := obj.If(Conditions{GenerationNotMatch: 1234}).NewReader(ctx)
return err
},
"GET /buck/obj?ifGenerationNotMatch=1234",
},
{
func() error {
_, err := obj.If(Conditions{MetagenerationMatch: 1234}).NewReader(ctx)
return err
},
"GET /buck/obj?ifMetagenerationMatch=1234",
},
{
func() error {
_, err := obj.If(Conditions{MetagenerationNotMatch: 1234}).NewReader(ctx)
return err
},
"GET /buck/obj?ifMetagenerationNotMatch=1234",
},
{
func() error {
_, err := obj.If(Conditions{MetagenerationNotMatch: 1234}).Attrs(ctx)
return err
},
"GET /storage/v1/b/buck/o/obj?alt=json&ifMetagenerationNotMatch=1234&prettyPrint=false&projection=full",
},
{
func() error {
_, err := obj.If(Conditions{MetagenerationMatch: 1234}).Update(ctx, ObjectAttrsToUpdate{})
return err
},
"PATCH /storage/v1/b/buck/o/obj?alt=json&ifMetagenerationMatch=1234&prettyPrint=false&projection=full",
},
{
func() error { return obj.Generation(1234).Delete(ctx) },
"DELETE /storage/v1/b/buck/o/obj?alt=json&generation=1234&prettyPrint=false",
},
{
func() error {
w := obj.If(Conditions{GenerationMatch: 1234}).NewWriter(ctx)
w.ContentType = "text/plain"
return w.Close()
},
"POST /upload/storage/v1/b/buck/o?alt=json&ifGenerationMatch=1234&name=obj&prettyPrint=false&projection=full&uploadType=multipart",
},
{
func() error {
w := obj.If(Conditions{DoesNotExist: true}).NewWriter(ctx)
w.ContentType = "text/plain"
return w.Close()
},
"POST /upload/storage/v1/b/buck/o?alt=json&ifGenerationMatch=0&name=obj&prettyPrint=false&projection=full&uploadType=multipart",
},
{
func() error {
_, err := dst.If(Conditions{MetagenerationMatch: 5678}).CopierFrom(obj.If(Conditions{GenerationMatch: 1234})).Run(ctx)
return err
},
"POST /storage/v1/b/buck/o/obj/rewriteTo/b/dstbuck/o/dst?alt=json&ifMetagenerationMatch=5678&ifSourceGenerationMatch=1234&prettyPrint=false&projection=full",
},
}
for i, tt := range tests {
if err := tt.fn(); err != nil && err != io.EOF {
t.Error(err)
continue
}
select {
case r := <-gotReq:
got := r.Method + " " + r.RequestURI
if got != tt.want {
t.Errorf("%d. RequestURI = %q; want %q", i, got, tt.want)
}
case <-time.After(5 * time.Second):
t.Fatalf("%d. timeout", i)
}
if err != nil {
t.Fatal(err)
}
}
// Test an error, too:
err = obj.Generation(1234).NewWriter(ctx).Close()
if err == nil || !strings.Contains(err.Error(), "NewWriter: generation not supported") {
t.Errorf("want error about unsupported generation; got %v", err)
}
}
func TestConditionErrors(t *testing.T) {
t.Parallel()
for _, conds := range []Conditions{
{GenerationMatch: 0},
{DoesNotExist: false}, // same as above, actually
{GenerationMatch: 1, GenerationNotMatch: 2},
{GenerationNotMatch: 2, DoesNotExist: true},
{MetagenerationMatch: 1, MetagenerationNotMatch: 2},
} {
if err := conds.validate(""); err == nil {
t.Errorf("%+v: got nil, want error", conds)
}
}
}
// Test object compose.
func TestObjectCompose(t *testing.T) {
t.Parallel()
gotURL := make(chan string, 1)
gotBody := make(chan []byte, 1)
hc, close := newTestServer(func(w http.ResponseWriter, r *http.Request) {
body, _ := ioutil.ReadAll(r.Body)
gotURL <- r.URL.String()
gotBody <- body
w.Write([]byte("{}"))
})
defer close()
ctx := context.Background()
c, err := NewClient(ctx, option.WithHTTPClient(hc))
if err != nil {
t.Fatal(err)
}
testCases := []struct {
desc string
dst *ObjectHandle
srcs []*ObjectHandle
attrs *ObjectAttrs
sendCRC32C bool
wantReq raw.ComposeRequest
wantURL string
wantErr bool
}{
{
desc: "basic case",
dst: c.Bucket("foo").Object("bar"),
srcs: []*ObjectHandle{
c.Bucket("foo").Object("baz"),
c.Bucket("foo").Object("quux"),
},
wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json&prettyPrint=false",
wantReq: raw.ComposeRequest{
Destination: &raw.Object{Bucket: "foo"},
SourceObjects: []*raw.ComposeRequestSourceObjects{
{Name: "baz"},
{Name: "quux"},
},
},
},
{
desc: "with object attrs",
dst: c.Bucket("foo").Object("bar"),
srcs: []*ObjectHandle{
c.Bucket("foo").Object("baz"),
c.Bucket("foo").Object("quux"),
},
attrs: &ObjectAttrs{
Name: "not-bar",
ContentType: "application/json",
},
wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json&prettyPrint=false",
wantReq: raw.ComposeRequest{
Destination: &raw.Object{
Bucket: "foo",
Name: "not-bar",
ContentType: "application/json",
},
SourceObjects: []*raw.ComposeRequestSourceObjects{
{Name: "baz"},
{Name: "quux"},
},
},
},
{
desc: "with conditions",
dst: c.Bucket("foo").Object("bar").If(Conditions{
GenerationMatch: 12,
MetagenerationMatch: 34,
}),
srcs: []*ObjectHandle{
c.Bucket("foo").Object("baz").Generation(56),
c.Bucket("foo").Object("quux").If(Conditions{GenerationMatch: 78}),
},
wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json&ifGenerationMatch=12&ifMetagenerationMatch=34&prettyPrint=false",
wantReq: raw.ComposeRequest{
Destination: &raw.Object{Bucket: "foo"},
SourceObjects: []*raw.ComposeRequestSourceObjects{
{
Name: "baz",
Generation: 56,
},
{
Name: "quux",
ObjectPreconditions: &raw.ComposeRequestSourceObjectsObjectPreconditions{
IfGenerationMatch: 78,
},
},
},
},
},
{
desc: "with crc32c",
dst: c.Bucket("foo").Object("bar"),
srcs: []*ObjectHandle{
c.Bucket("foo").Object("baz"),
c.Bucket("foo").Object("quux"),
},
attrs: &ObjectAttrs{
CRC32C: 42,
},
sendCRC32C: true,
wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json&prettyPrint=false",
wantReq: raw.ComposeRequest{
Destination: &raw.Object{Bucket: "foo", Crc32c: "AAAAKg=="},
SourceObjects: []*raw.ComposeRequestSourceObjects{
{Name: "baz"},
{Name: "quux"},
},
},
},
{
desc: "no sources",
dst: c.Bucket("foo").Object("bar"),
wantErr: true,
},
{
desc: "destination, no bucket",
dst: c.Bucket("").Object("bar"),
srcs: []*ObjectHandle{
c.Bucket("foo").Object("baz"),
},
wantErr: true,
},
{
desc: "destination, no object",
dst: c.Bucket("foo").Object(""),
srcs: []*ObjectHandle{
c.Bucket("foo").Object("baz"),
},
wantErr: true,
},
{
desc: "source, different bucket",
dst: c.Bucket("foo").Object("bar"),
srcs: []*ObjectHandle{
c.Bucket("otherbucket").Object("baz"),
},
wantErr: true,
},
{
desc: "source, no object",
dst: c.Bucket("foo").Object("bar"),
srcs: []*ObjectHandle{
c.Bucket("foo").Object(""),
},
wantErr: true,
},
{
desc: "destination, bad condition",
dst: c.Bucket("foo").Object("bar").Generation(12),
srcs: []*ObjectHandle{
c.Bucket("foo").Object("baz"),
},
wantErr: true,
},
{
desc: "source, bad condition",
dst: c.Bucket("foo").Object("bar"),
srcs: []*ObjectHandle{
c.Bucket("foo").Object("baz").If(Conditions{MetagenerationMatch: 12}),
},
wantErr: true,
},
}
for _, tt := range testCases {
composer := tt.dst.ComposerFrom(tt.srcs...)
if tt.attrs != nil {
composer.ObjectAttrs = *tt.attrs
}
composer.SendCRC32C = tt.sendCRC32C
_, err := composer.Run(ctx)
if gotErr := err != nil; gotErr != tt.wantErr {
t.Errorf("%s: got error %v; want err %t", tt.desc, err, tt.wantErr)
continue
}
if tt.wantErr {
continue
}
u, body := <-gotURL, <-gotBody
if u != tt.wantURL {
t.Errorf("%s: request URL\ngot %q\nwant %q", tt.desc, u, tt.wantURL)
}
var req raw.ComposeRequest
if err := json.Unmarshal(body, &req); err != nil {
t.Errorf("%s: json.Unmarshal %v (body %s)", tt.desc, err, body)
}
if !testutil.Equal(req, tt.wantReq) {
// Print to JSON.
wantReq, _ := json.Marshal(tt.wantReq)
t.Errorf("%s: request body\ngot %s\nwant %s", tt.desc, body, wantReq)
}
}
}
// Test that ObjectIterator's Next and NextPage methods correctly terminate
// if there is nothing to iterate over.
func TestEmptyObjectIterator(t *testing.T) {
t.Parallel()
hClient, close := newTestServer(func(w http.ResponseWriter, r *http.Request) {
io.Copy(ioutil.Discard, r.Body)
fmt.Fprintf(w, "{}")
})
defer close()
ctx := context.Background()
client, err := NewClient(ctx, option.WithHTTPClient(hClient))
if err != nil {
t.Fatal(err)
}
it := client.Bucket("b").Objects(ctx, nil)
_, err = it.Next()
if err != iterator.Done {
t.Errorf("got %v, want Done", err)
}
}
// Test that BucketIterator's Next method correctly terminates if there is
// nothing to iterate over.
func TestEmptyBucketIterator(t *testing.T) {
t.Parallel()
hClient, close := newTestServer(func(w http.ResponseWriter, r *http.Request) {
io.Copy(ioutil.Discard, r.Body)
fmt.Fprintf(w, "{}")
})
defer close()
ctx := context.Background()
client, err := NewClient(ctx, option.WithHTTPClient(hClient))
if err != nil {
t.Fatal(err)
}
it := client.Buckets(ctx, "project")
_, err = it.Next()
if err != iterator.Done {
t.Errorf("got %v, want Done", err)
}
}
func TestCodecUint32(t *testing.T) {
t.Parallel()
for _, u := range []uint32{0, 1, 256, 0xFFFFFFFF} {
s := encodeUint32(u)
d, err := decodeUint32(s)
if err != nil {
t.Fatal(err)
}
if d != u {
t.Errorf("got %d, want input %d", d, u)
}
}
}
func TestUserProject(t *testing.T) {
// Verify that the userProject query param is sent.
t.Parallel()
ctx := context.Background()
gotURL := make(chan *url.URL, 1)
hClient, close := newTestServer(func(w http.ResponseWriter, r *http.Request) {
io.Copy(ioutil.Discard, r.Body)
gotURL <- r.URL
if strings.Contains(r.URL.String(), "/rewriteTo/") {
res := &raw.RewriteResponse{Done: true}
bytes, err := res.MarshalJSON()
if err != nil {
t.Fatal(err)
}
w.Write(bytes)
} else {
fmt.Fprintf(w, "{}")
}
})
defer close()
client, err := NewClient(ctx, option.WithHTTPClient(hClient))
if err != nil {
t.Fatal(err)
}
re := regexp.MustCompile(`\buserProject=p\b`)
b := client.Bucket("b").UserProject("p")
o := b.Object("o")
check := func(msg string, f func()) {
f()
select {
case u := <-gotURL:
if !re.MatchString(u.RawQuery) {
t.Errorf("%s: query string %q does not contain userProject", msg, u.RawQuery)
}
case <-time.After(2 * time.Second):
t.Errorf("%s: timed out", msg)
}
}
check("buckets.delete", func() { b.Delete(ctx) })
check("buckets.get", func() { b.Attrs(ctx) })
check("buckets.patch", func() { b.Update(ctx, BucketAttrsToUpdate{}) })
check("storage.objects.compose", func() { o.ComposerFrom(b.Object("x")).Run(ctx) })
check("storage.objects.delete", func() { o.Delete(ctx) })
check("storage.objects.get", func() { o.Attrs(ctx) })
check("storage.objects.insert", func() { o.NewWriter(ctx).Close() })
check("storage.objects.list", func() { b.Objects(ctx, nil).Next() })
check("storage.objects.patch", func() { o.Update(ctx, ObjectAttrsToUpdate{}) })
check("storage.objects.rewrite", func() { o.CopierFrom(b.Object("x")).Run(ctx) })
check("storage.objectAccessControls.list", func() { o.ACL().List(ctx) })
check("storage.objectAccessControls.update", func() { o.ACL().Set(ctx, "", "") })
check("storage.objectAccessControls.delete", func() { o.ACL().Delete(ctx, "") })
check("storage.bucketAccessControls.list", func() { b.ACL().List(ctx) })
check("storage.bucketAccessControls.update", func() { b.ACL().Set(ctx, "", "") })
check("storage.bucketAccessControls.delete", func() { b.ACL().Delete(ctx, "") })
check("storage.defaultObjectAccessControls.list",
func() { b.DefaultObjectACL().List(ctx) })
check("storage.defaultObjectAccessControls.update",
func() { b.DefaultObjectACL().Set(ctx, "", "") })
check("storage.defaultObjectAccessControls.delete",
func() { b.DefaultObjectACL().Delete(ctx, "") })
check("buckets.getIamPolicy", func() { b.IAM().Policy(ctx) })
check("buckets.setIamPolicy", func() {
p := &iam.Policy{}
p.Add("m", iam.Owner)
b.IAM().SetPolicy(ctx, p)
})
check("buckets.testIamPermissions", func() { b.IAM().TestPermissions(ctx, nil) })
check("storage.notifications.insert", func() {
b.AddNotification(ctx, &Notification{TopicProjectID: "p", TopicID: "t"})
})
check("storage.notifications.delete", func() { b.DeleteNotification(ctx, "n") })
check("storage.notifications.list", func() { b.Notifications(ctx) })
}
func newTestServer(handler func(w http.ResponseWriter, r *http.Request)) (*http.Client, func()) {
ts := httptest.NewTLSServer(http.HandlerFunc(handler))
tlsConf := &tls.Config{InsecureSkipVerify: true}
tr := &http.Transport{
TLSClientConfig: tlsConf,
DialTLS: func(netw, addr string) (net.Conn, error) {
return tls.Dial("tcp", ts.Listener.Addr().String(), tlsConf)
},
}
return &http.Client{Transport: tr}, func() {
tr.CloseIdleConnections()
ts.Close()
}
}
func TestRawObjectToObjectAttrs(t *testing.T) {
t.Parallel()
tests := []struct {
in *raw.Object
want *ObjectAttrs
}{
{in: nil, want: nil},
{
in: &raw.Object{
Bucket: "Test",
ContentLanguage: "en-us",
ContentType: "video/mpeg",
CustomTime: "2020-08-25T19:33:36Z",
EventBasedHold: false,
Etag: "Zkyw9ACJZUvcYmlFaKGChzhmtnE/dt1zHSfweiWpwzdGsqXwuJZqiD0",
Generation: 7,
Md5Hash: "MTQ2ODNjYmE0NDRkYmNjNmRiMjk3NjQ1ZTY4M2Y1YzE=",
Name: "foo.mp4",
RetentionExpirationTime: "2019-03-31T19:33:36Z",
Size: 1 << 20,
TimeCreated: "2019-03-31T19:32:10Z",
TimeDeleted: "2019-03-31T19:33:39Z",
TemporaryHold: true,
},
want: &ObjectAttrs{
Bucket: "Test",
Created: time.Date(2019, 3, 31, 19, 32, 10, 0, time.UTC),
ContentLanguage: "en-us",
ContentType: "video/mpeg",
CustomTime: time.Date(2020, 8, 25, 19, 33, 36, 0, time.UTC),
Deleted: time.Date(2019, 3, 31, 19, 33, 39, 0, time.UTC),
EventBasedHold: false,
Etag: "Zkyw9ACJZUvcYmlFaKGChzhmtnE/dt1zHSfweiWpwzdGsqXwuJZqiD0",
Generation: 7,
MD5: []byte("14683cba444dbcc6db297645e683f5c1"),
Name: "foo.mp4",
RetentionExpirationTime: time.Date(2019, 3, 31, 19, 33, 36, 0, time.UTC),
Size: 1 << 20,
TemporaryHold: true,
},
},
}
for i, tt := range tests {
got := newObject(tt.in)
if diff := testutil.Diff(got, tt.want); diff != "" {
t.Errorf("#%d: newObject mismatches:\ngot=-, want=+:\n%s", i, diff)
}
}
}
func TestObjectAttrsToRawObject(t *testing.T) {
t.Parallel()
bucketName := "the-bucket"
in := &ObjectAttrs{
Bucket: "Test",
Created: time.Date(2019, 3, 31, 19, 32, 10, 0, time.UTC),
ContentLanguage: "en-us",
ContentType: "video/mpeg",
Deleted: time.Date(2019, 3, 31, 19, 33, 39, 0, time.UTC),
EventBasedHold: false,
Etag: "Zkyw9ACJZUvcYmlFaKGChzhmtnE/dt1zHSfweiWpwzdGsqXwuJZqiD0",
Generation: 7,
MD5: []byte("14683cba444dbcc6db297645e683f5c1"),
Name: "foo.mp4",
RetentionExpirationTime: time.Date(2019, 3, 31, 19, 33, 36, 0, time.UTC),
Size: 1 << 20,
TemporaryHold: true,
}
want := &raw.Object{
Bucket: bucketName,
ContentLanguage: "en-us",
ContentType: "video/mpeg",
EventBasedHold: false,
Name: "foo.mp4",
RetentionExpirationTime: "2019-03-31T19:33:36Z",
TemporaryHold: true,
}
got := in.toRawObject(bucketName)
if !testutil.Equal(got, want) {
if diff := testutil.Diff(got, want); diff != "" {
t.Errorf("toRawObject mismatches:\ngot=-, want=+:\n%s", diff)
}
}
}
func TestAttrToFieldMapCoverage(t *testing.T) {
t.Parallel()
oa := reflect.TypeOf((*ObjectAttrs)(nil)).Elem()
oaFields := make(map[string]bool)
for i := 0; i < oa.NumField(); i++ {
fieldName := oa.Field(i).Name
oaFields[fieldName] = true
}
// Check that all fields of attrToFieldMap exist in ObjectAttrs.
for k := range attrToFieldMap {
if _, ok := oaFields[k]; !ok {
t.Errorf("%v is not an ObjectAttrs field", k)
}
}
// Check that all fields of ObjectAttrs exist in attrToFieldMap, with
// known exceptions which aren't sent over the wire but are settable by
// the user.
for k := range oaFields {
if _, ok := attrToFieldMap[k]; !ok {
if k != "Prefix" && k != "PredefinedACL" {
t.Errorf("ObjectAttrs.%v is not in attrToFieldMap", k)
}
}
}
}
// Create a client using a combination of custom endpoint and
// STORAGE_EMULATOR_HOST env variable and verify that raw.BasePath (used
// for writes) and readHost and scheme (used for reads) are all set correctly.
func TestWithEndpoint(t *testing.T) {
originalStorageEmulatorHost := os.Getenv("STORAGE_EMULATOR_HOST")
testCases := []struct {
CustomEndpoint string
StorageEmulatorHost string
WantRawBasePath string
WantReadHost string
WantScheme string
}{
{
CustomEndpoint: "",
StorageEmulatorHost: "",
WantRawBasePath: "https://storage.googleapis.com/storage/v1/",
WantReadHost: "storage.googleapis.com",
WantScheme: "https",
},
{
CustomEndpoint: "https://fake.gcs.com:8080/storage/v1",
StorageEmulatorHost: "",
WantRawBasePath: "https://fake.gcs.com:8080/storage/v1",
WantReadHost: "fake.gcs.com:8080",
WantScheme: "https",
},
{
CustomEndpoint: "",
StorageEmulatorHost: "http://emu.com",
WantRawBasePath: "http://emu.com",
WantReadHost: "emu.com",
WantScheme: "http",
},
{
CustomEndpoint: "https://fake.gcs.com:8080/storage/v1",
StorageEmulatorHost: "http://emu.com",
WantRawBasePath: "https://fake.gcs.com:8080/storage/v1",
WantReadHost: "fake.gcs.com:8080",
WantScheme: "http",
},
}
ctx := context.Background()
for _, tc := range testCases {
os.Setenv("STORAGE_EMULATOR_HOST", tc.StorageEmulatorHost)
c, err := NewClient(ctx, option.WithEndpoint(tc.CustomEndpoint))
if err != nil {
t.Fatalf("error creating client: %v", err)
}
if err != nil {
t.Fatalf("error creating client: %v", err)
}
if c.raw.BasePath != tc.WantRawBasePath {
t.Errorf("raw.BasePath not set correctly: got %v, want %v", c.raw.BasePath, tc.WantRawBasePath)
}
if c.readHost != tc.WantReadHost {
t.Errorf("readHost not set correctly: got %v, want %v", c.readHost, tc.WantReadHost)
}
if c.scheme != tc.WantScheme {
t.Errorf("scheme not set correctly: got %v, want %v", c.scheme, tc.WantScheme)
}
}
os.Setenv("STORAGE_EMULATOR_HOST", originalStorageEmulatorHost)
}
|
[
"\"STORAGE_EMULATOR_HOST\""
] |
[] |
[
"STORAGE_EMULATOR_HOST"
] |
[]
|
["STORAGE_EMULATOR_HOST"]
|
go
| 1 | 0 | |
runners/java-fn-execution/src/main/java/org/apache/beam/runners/fnexecution/environment/DockerEnvironmentFactory.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.beam.runners.fnexecution.environment;
import static org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.MoreObjects.firstNonNull;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.time.Duration;
import java.util.List;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.beam.model.pipeline.v1.RunnerApi;
import org.apache.beam.model.pipeline.v1.RunnerApi.Environment;
import org.apache.beam.runners.core.construction.BeamUrns;
import org.apache.beam.runners.fnexecution.GrpcFnServer;
import org.apache.beam.runners.fnexecution.ServerFactory;
import org.apache.beam.runners.fnexecution.artifact.ArtifactRetrievalService;
import org.apache.beam.runners.fnexecution.control.ControlClientPool;
import org.apache.beam.runners.fnexecution.control.FnApiControlClientPoolService;
import org.apache.beam.runners.fnexecution.control.InstructionRequestHandler;
import org.apache.beam.runners.fnexecution.logging.GrpcLoggingService;
import org.apache.beam.runners.fnexecution.provisioning.StaticGrpcProvisionService;
import org.apache.beam.sdk.fn.IdGenerator;
import org.apache.beam.sdk.options.ManualDockerEnvironmentOptions;
import org.apache.beam.sdk.options.PipelineOptions;
import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Preconditions;
import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableList;
import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.net.HostAndPort;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* An {@link EnvironmentFactory} that creates docker containers by shelling out to docker. Returned
* {@link RemoteEnvironment RemoteEnvironments} own their respective docker containers. Not
* thread-safe.
*/
public class DockerEnvironmentFactory implements EnvironmentFactory {
private static final Logger LOG = LoggerFactory.getLogger(DockerEnvironmentFactory.class);
static DockerEnvironmentFactory forServicesWithDocker(
DockerCommand docker,
GrpcFnServer<FnApiControlClientPoolService> controlServiceServer,
GrpcFnServer<GrpcLoggingService> loggingServiceServer,
GrpcFnServer<ArtifactRetrievalService> retrievalServiceServer,
GrpcFnServer<StaticGrpcProvisionService> provisioningServiceServer,
ControlClientPool.Source clientSource,
IdGenerator idGenerator,
boolean retainDockerContainer) {
return new DockerEnvironmentFactory(
docker,
controlServiceServer,
loggingServiceServer,
retrievalServiceServer,
provisioningServiceServer,
idGenerator,
clientSource,
retainDockerContainer);
}
private final DockerCommand docker;
private final GrpcFnServer<FnApiControlClientPoolService> controlServiceServer;
private final GrpcFnServer<GrpcLoggingService> loggingServiceServer;
private final GrpcFnServer<ArtifactRetrievalService> retrievalServiceServer;
private final GrpcFnServer<StaticGrpcProvisionService> provisioningServiceServer;
private final IdGenerator idGenerator;
private final ControlClientPool.Source clientSource;
private final boolean retainDockerContainer;
private DockerEnvironmentFactory(
DockerCommand docker,
GrpcFnServer<FnApiControlClientPoolService> controlServiceServer,
GrpcFnServer<GrpcLoggingService> loggingServiceServer,
GrpcFnServer<ArtifactRetrievalService> retrievalServiceServer,
GrpcFnServer<StaticGrpcProvisionService> provisioningServiceServer,
IdGenerator idGenerator,
ControlClientPool.Source clientSource,
boolean retainDockerContainer) {
this.docker = docker;
this.controlServiceServer = controlServiceServer;
this.loggingServiceServer = loggingServiceServer;
this.retrievalServiceServer = retrievalServiceServer;
this.provisioningServiceServer = provisioningServiceServer;
this.idGenerator = idGenerator;
this.clientSource = clientSource;
this.retainDockerContainer = retainDockerContainer;
}
/** Creates a new, active {@link RemoteEnvironment} backed by a local Docker container. */
@Override
public RemoteEnvironment createEnvironment(Environment environment) throws Exception {
Preconditions.checkState(
environment
.getUrn()
.equals(BeamUrns.getUrn(RunnerApi.StandardEnvironments.Environments.DOCKER)),
"The passed environment does not contain a DockerPayload.");
final RunnerApi.DockerPayload dockerPayload =
RunnerApi.DockerPayload.parseFrom(environment.getPayload());
final String workerId = idGenerator.getId();
// Prepare docker invocation.
String containerImage = dockerPayload.getContainerImage();
// TODO: https://issues.apache.org/jira/browse/BEAM-4148 The default service address will not
// work for Docker for Mac.
String loggingEndpoint = loggingServiceServer.getApiServiceDescriptor().getUrl();
String artifactEndpoint = retrievalServiceServer.getApiServiceDescriptor().getUrl();
String provisionEndpoint = provisioningServiceServer.getApiServiceDescriptor().getUrl();
String controlEndpoint = controlServiceServer.getApiServiceDescriptor().getUrl();
ImmutableList.Builder<String> dockerArgsBuilder =
ImmutableList.<String>builder()
.addAll(gcsCredentialArgs())
// NOTE: Host networking does not work on Mac, but the command line flag is accepted.
.add("--network=host")
// We need to pass on the information about Docker-on-Mac environment (due to missing
// host networking on Mac)
.add("--env=DOCKER_MAC_CONTAINER=" + System.getenv("DOCKER_MAC_CONTAINER"));
List<String> args =
ImmutableList.of(
String.format("--id=%s", workerId),
String.format("--logging_endpoint=%s", loggingEndpoint),
String.format("--artifact_endpoint=%s", artifactEndpoint),
String.format("--provision_endpoint=%s", provisionEndpoint),
String.format("--control_endpoint=%s", controlEndpoint));
LOG.debug("Creating Docker Container with ID {}", workerId);
// Wrap the blocking call to clientSource.get in case an exception is thrown.
String containerId = null;
InstructionRequestHandler instructionHandler = null;
try {
containerId = docker.runImage(containerImage, dockerArgsBuilder.build(), args);
LOG.debug("Created Docker Container with Container ID {}", containerId);
// Wait on a client from the gRPC server.
try {
instructionHandler = clientSource.take(workerId, Duration.ofMinutes(1));
} catch (TimeoutException timeoutEx) {
RuntimeException runtimeException =
new RuntimeException(
String.format(
"Docker container %s failed to start up successfully within 1 minute.",
containerImage),
timeoutEx);
try {
String containerLogs = docker.getContainerLogs(containerId);
LOG.error("Docker container {} logs:\n{}", containerId, containerLogs);
} catch (Exception getLogsException) {
runtimeException.addSuppressed(getLogsException);
}
throw runtimeException;
} catch (InterruptedException interruptEx) {
Thread.currentThread().interrupt();
throw new RuntimeException(interruptEx);
}
} catch (Exception e) {
if (containerId != null) {
// Kill the launched docker container if we can't retrieve a client for it.
try {
docker.killContainer(containerId);
if (!retainDockerContainer) {
docker.removeContainer(containerId);
}
} catch (Exception dockerException) {
e.addSuppressed(dockerException);
}
}
throw e;
}
return DockerContainerEnvironment.create(
docker, environment, containerId, instructionHandler, retainDockerContainer);
}
private List<String> gcsCredentialArgs() {
String dockerGcloudConfig = "/root/.config/gcloud";
String localGcloudConfig =
firstNonNull(
System.getenv("CLOUDSDK_CONFIG"),
Paths.get(System.getProperty("user.home"), ".config", "gcloud").toString());
// TODO(BEAM-4729): Allow this to be disabled manually.
if (Files.exists(Paths.get(localGcloudConfig))) {
return ImmutableList.of(
"--mount",
String.format("type=bind,src=%s,dst=%s", localGcloudConfig, dockerGcloudConfig));
} else {
return ImmutableList.of();
}
}
/**
* NOTE: Deployment on Macs is intended for local development. As of 18.03, Docker-for-Mac does
* not implement host networking (--networking=host is effectively a no-op). Instead, we use a
* special DNS entry that points to the host:
* https://docs.docker.com/docker-for-mac/networking/#use-cases-and-workarounds The special
* hostname has historically changed between versions, so this is subject to breakages and will
* likely only support the latest version at any time.
*/
static class DockerOnMac {
// TODO: This host name seems to change with every other Docker release. Do we attempt to keep
// up
// or attempt to document the supported Docker version(s)?
private static final String DOCKER_FOR_MAC_HOST = "host.docker.internal";
// True if we're inside a container (i.e. job-server container) with MacOS as the host system
private static final boolean RUNNING_INSIDE_DOCKER_ON_MAC =
"1".equals(System.getenv("DOCKER_MAC_CONTAINER"));
// Port offset for MacOS since we don't have host networking and need to use published ports
private static final int MAC_PORT_START = 8100;
private static final int MAC_PORT_END = 8200;
private static final AtomicInteger MAC_PORT = new AtomicInteger(MAC_PORT_START);
static ServerFactory getServerFactory() {
ServerFactory.UrlFactory dockerUrlFactory =
(host, port) -> HostAndPort.fromParts(DOCKER_FOR_MAC_HOST, port).toString();
if (RUNNING_INSIDE_DOCKER_ON_MAC) {
// If we're already running in a container, we need to use a fixed port range due to
// non-existing host networking in Docker-for-Mac. The port range needs to be published
// when bringing up the Docker container, see DockerEnvironmentFactory.
return ServerFactory.createWithUrlFactoryAndPortSupplier(
dockerUrlFactory,
// We only use the published Docker ports 8100-8200 in a round-robin fashion
() -> MAC_PORT.getAndUpdate(val -> val == MAC_PORT_END ? MAC_PORT_START : val + 1));
} else {
return ServerFactory.createWithUrlFactory(dockerUrlFactory);
}
}
}
/** Provider for DockerEnvironmentFactory. */
public static class Provider implements EnvironmentFactory.Provider {
private final boolean retainDockerContainer;
public Provider(PipelineOptions options) {
this.retainDockerContainer =
options.as(ManualDockerEnvironmentOptions.class).getRetainDockerContainers();
}
@Override
public EnvironmentFactory createEnvironmentFactory(
GrpcFnServer<FnApiControlClientPoolService> controlServiceServer,
GrpcFnServer<GrpcLoggingService> loggingServiceServer,
GrpcFnServer<ArtifactRetrievalService> retrievalServiceServer,
GrpcFnServer<StaticGrpcProvisionService> provisioningServiceServer,
ControlClientPool clientPool,
IdGenerator idGenerator) {
return DockerEnvironmentFactory.forServicesWithDocker(
DockerCommand.getDefault(),
controlServiceServer,
loggingServiceServer,
retrievalServiceServer,
provisioningServiceServer,
clientPool.getSource(),
idGenerator,
retainDockerContainer);
}
@Override
public ServerFactory getServerFactory() {
switch (getPlatform()) {
case LINUX:
return ServerFactory.createDefault();
case MAC:
return DockerOnMac.getServerFactory();
default:
LOG.warn("Unknown Docker platform. Falling back to default server factory");
return ServerFactory.createDefault();
}
}
private static Platform getPlatform() {
String osName = System.getProperty("os.name").toLowerCase();
// TODO: Make this more robust?
// The DOCKER_MAC_CONTAINER environment variable is necessary to detect whether we run on
// a container on MacOs. MacOs internally uses a Linux VM which makes it indistinguishable
// from Linux.
// We still need to apply port mapping due to missing host networking.
if (osName.startsWith("mac") || DockerOnMac.RUNNING_INSIDE_DOCKER_ON_MAC) {
return Platform.MAC;
} else if (osName.startsWith("linux")) {
return Platform.LINUX;
}
return Platform.OTHER;
}
private enum Platform {
MAC,
LINUX,
OTHER,
}
}
}
|
[
"\"DOCKER_MAC_CONTAINER\"",
"\"CLOUDSDK_CONFIG\"",
"\"DOCKER_MAC_CONTAINER\""
] |
[] |
[
"CLOUDSDK_CONFIG",
"DOCKER_MAC_CONTAINER"
] |
[]
|
["CLOUDSDK_CONFIG", "DOCKER_MAC_CONTAINER"]
|
java
| 2 | 0 | |
inits/conf.go
|
package inits
import (
"blog/global"
"fmt"
"github.com/fsnotify/fsnotify"
"github.com/spf13/viper"
"os"
)
func init() {
var config string
if configEnv := os.Getenv("BA_CONFIG"); configEnv == "" {
config = "config.yaml"
} else {
config = configEnv
fmt.Printf("您正在使用BA_CONFIG环境变量,config的路径为%v\n", config)
}
v := viper.New()
v.SetConfigFile(config)
err := v.ReadInConfig()
if err != nil {
panic(fmt.Errorf("fatal error config file: %s ", err))
}
v.WatchConfig()
v.OnConfigChange(func(e fsnotify.Event) {
fmt.Println("config file changed:", e.Name)
if err := v.Unmarshal(&global.CONFIG); err != nil {
panic(err)
}
})
if err := v.Unmarshal(&global.CONFIG); err != nil {
panic(err)
}
}
|
[
"\"BA_CONFIG\""
] |
[] |
[
"BA_CONFIG"
] |
[]
|
["BA_CONFIG"]
|
go
| 1 | 0 | |
workflows/pipe-common/pipeline/autoscaling/gcpprovider.py
|
# Copyright 2017-2020 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import socket
import sys
import time
import uuid
from cloudprovider import AbstractInstanceProvider, LIMIT_EXCEEDED_ERROR_MASSAGE, LIMIT_EXCEEDED_EXIT_CODE
from random import randint
from time import sleep
from googleapiclient import discovery
from pipeline.autoscaling import utils
DISABLE_ACCESS = 'disable_external_access'
OS_DISK_SIZE = 10
INSTANCE_USER_NAME = "pipeline"
NO_BOOT_DEVICE_NAME = 'sdb1'
SWAP_DEVICE_NAME = 'sdb2'
# custom instance format
GPU_CUSTOM_INSTANCE_PARTS = 5
GPU_CUSTOM_INSTANCE_TYPE_INDEX = 3
GPU_CUSTOM_INSTANCE_COUNT_INDEX = 4
GPU_NVIDIA_PREFIX = 'nvidia-tesla-'
GPU_TYPE_PREFIX = 'gpu-'
class GCPInstanceProvider(AbstractInstanceProvider):
def __init__(self, cloud_region):
self.cloud_region = cloud_region
self.project_id = os.environ["GOOGLE_PROJECT_ID"]
self.client = discovery.build('compute', 'v1')
def run_instance(self, is_spot, bid_price, ins_type, ins_hdd, ins_img, ins_key, run_id, kms_encyr_key_id,
num_rep, time_rep, kube_ip, kubeadm_token):
ssh_pub_key = utils.read_ssh_key(ins_key)
swap_size = utils.get_swap_size(self.cloud_region, ins_type, is_spot, "GCP")
user_data_script = utils.get_user_data_script(self.cloud_region, ins_type, ins_img,
kube_ip, kubeadm_token, swap_size)
instance_type, gpu_type, gpu_count = self.parse_instance_type(ins_type)
machine_type = 'zones/{}/machineTypes/{}'.format(self.cloud_region, instance_type)
instance_name = "gcp-" + uuid.uuid4().hex[0:16]
network_interfaces = self.__build_networks()
if is_spot:
utils.pipe_log('Preemptible instance with run id: ' + run_id + ' will be launched')
body = {
'name': instance_name,
'machineType': machine_type,
'scheduling': {
'onHostMaintenance': 'terminate',
'preemptible': is_spot
},
'canIpForward': True,
'disks': self.__get_disk_devices(ins_img, OS_DISK_SIZE, ins_hdd, swap_size),
'networkInterfaces': network_interfaces,
'labels': GCPInstanceProvider.get_tags(run_id, self.cloud_region),
'tags': {
'items': utils.get_network_tags(self.cloud_region)
},
"metadata": {
"items": [
{
"key": "ssh-keys",
"value": "{user}:{key} {user}".format(key=ssh_pub_key, user=INSTANCE_USER_NAME)
},
{
"key": "startup-script",
"value": user_data_script
}
]
}
}
if gpu_type is not None and gpu_count > 0:
gpu = {"guestAccelerators": [
{
"acceleratorCount": [gpu_count],
"acceleratorType": "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/acceleratorTypes/{gpu_type}"
.format(project=self.project_id,
zone=self.cloud_region,
gpu_type=gpu_type)
}
]}
body.update(gpu)
try:
response = self.client.instances().insert(
project=self.project_id,
zone=self.cloud_region,
body=body).execute()
self.__wait_for_operation(response['name'])
except Exception as client_error:
if 'quota' in client_error.__str__().lower():
utils.pipe_log_warn(LIMIT_EXCEEDED_ERROR_MASSAGE)
sys.exit(LIMIT_EXCEEDED_EXIT_CODE)
else:
raise client_error
ip_response = self.client.instances().get(
project=self.project_id,
zone=self.cloud_region,
instance=instance_name
).execute()
private_ip = ip_response['networkInterfaces'][0]['networkIP']
return instance_name, private_ip
def parse_instance_type(self, ins_type):
# Custom type with GPU: gpu-custom-4-16000-k80-1
# Custom type with CPU only: custom-4-16000
# Predefined type: n1-standard-1
if not ins_type.startswith(GPU_TYPE_PREFIX):
return ins_type, None, 0
parts = ins_type[len(GPU_TYPE_PREFIX):].split('-')
if len(parts) != GPU_CUSTOM_INSTANCE_PARTS:
raise RuntimeError('Custom instance type with GPU "%s" does not match expected pattern.' % ins_type)
gpu_type = parts[GPU_CUSTOM_INSTANCE_TYPE_INDEX]
gpu_count = parts[GPU_CUSTOM_INSTANCE_COUNT_INDEX]
return '-'.join(parts[0:GPU_CUSTOM_INSTANCE_TYPE_INDEX]), GPU_NVIDIA_PREFIX + gpu_type, gpu_count
def find_and_tag_instance(self, old_id, new_id):
instance = self.__find_instance(old_id)
if instance:
labels = instance['labels']
labels['name'] = new_id
labels_body = {'labels': labels, 'labelFingerprint': instance['labelFingerprint']}
reassign = self.client.instances().setLabels(
project=self.project_id,
zone=self.cloud_region,
instance=instance['name'],
body=labels_body).execute()
self.__wait_for_operation(reassign['name'])
return instance['name']
else:
raise RuntimeError('Instance with id: {} not found!'.format(old_id))
def verify_run_id(self, run_id):
utils.pipe_log('Checking if instance already exists for RunID {}'.format(run_id))
instance = self.__find_instance(run_id)
if instance and len(instance['networkInterfaces'][0]) > 0:
ins_id = instance['name']
ins_ip = instance['networkInterfaces'][0]['networkIP']
utils.pipe_log('Found existing instance (ID: {}, IP: {}) for RunID {}\n-'.format(ins_id, ins_ip, run_id))
else:
ins_id = ''
ins_ip = ''
utils.pipe_log('No existing instance found for RunID {}\n-'.format(run_id))
return ins_id, ins_ip
def check_instance(self, ins_id, run_id, num_rep, time_rep):
utils.pipe_log('Checking instance ({}) boot state'.format(ins_id))
port = 8888
response = self.__find_instance(run_id)
ipaddr = response['networkInterfaces'][0]['networkIP']
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
utils.pipe_log('- Waiting for instance boot up...')
result = utils.poll_instance(sock, time_rep, ipaddr, port)
rep = 0
while result != 0:
sleep(time_rep)
result = utils.poll_instance(sock, time_rep, ipaddr, port)
rep = utils.increment_or_fail(num_rep, rep,
'Exceeded retry count ({}) for instance ({}) network check on port {}'.format(
num_rep, ins_id, port))
utils.pipe_log('Instance is booted. ID: {}, IP: {}\n-'.format(ins_id, ipaddr))
def get_instance_names(self, ins_id):
instance = self.client.instances().get(
project=self.project_id,
zone=self.cloud_region,
instance=ins_id).execute()
if instance:
# according to https://cloud.google.com/compute/docs/internal-dns#about_internal_dns
return '{}.{}.c.{}.internal'.format(instance['name'], self.cloud_region, self.project_id), instance['name']
return None, None
def find_instance(self, run_id):
instance = self.__find_instance(run_id)
if instance:
return instance['name']
return None
def terminate_instance(self, ins_id):
delete = self.client.instances().delete(
project=self.project_id,
zone=self.cloud_region,
instance=ins_id).execute()
self.__wait_for_operation(delete['name'])
def terminate_instance_by_ip_or_name(self, internal_ip, node_name):
items = self.__filter_instances("")
for instance in items:
if instance['networkInterfaces'][0]['networkIP'] == internal_ip:
self.terminate_instance(instance['name'])
def __find_instance(self, run_id):
items = self.__filter_instances('labels.name="{}"'.format(run_id))
if items:
filtered = [ins for ins in items if 'labels' in ins and ins['labels']['name'] == run_id]
if filtered and len(filtered) == 1:
return filtered[0]
return None
def __filter_instances(self, filter):
result = self.client.instances().list(
project=self.project_id,
zone=self.cloud_region,
filter=filter
).execute()
if 'items' in result:
return result['items']
else:
return None
def __get_boot_device(self, disk_size, image_family):
project_and_family = image_family.split("/")
if len(project_and_family) != 2:
# TODO: missing exception?
print("node_image parameter doesn't match to Google image name convention: <project>/<imageFamily>")
image = self.client.images().get(project=project_and_family[0], image=project_and_family[1]).execute()
if image is None or 'diskSizeGb' not in image:
utils.pipe_log('Failed to get image disk size info. Falling back to default size %d ' % disk_size)
image_disk_size = disk_size
else:
image_disk_size = image['diskSizeGb']
return {
'boot': True,
'autoDelete': True,
'deviceName': 'sda1',
'initializeParams': {
'diskSizeGb': image_disk_size,
'diskType': 'projects/{}/zones/{}/diskTypes/pd-ssd'.format(self.project_id, self.cloud_region),
'sourceImage': 'projects/{}/global/images/{}'.format(project_and_family[0], project_and_family[1])
},
'mode': 'READ_WRITE',
'type': 'PERSISTENT'
}
def __get_disk_devices(self, ins_img, os_disk_size, ins_hdd, swap_size):
disks = [self.__get_boot_device(os_disk_size, ins_img),
self.__get_device(ins_hdd, NO_BOOT_DEVICE_NAME)]
if swap_size is not None and swap_size > 0:
disks.append(self.__get_device(swap_size, SWAP_DEVICE_NAME))
return disks
def __get_device(self, ins_hdd, device_name):
return {
'boot': False,
'autoDelete': True,
'deviceName': device_name,
'mode': 'READ_WRITE',
'type': 'PERSISTENT',
'initializeParams': {
'diskSizeGb': ins_hdd,
'diskType': 'projects/{}/zones/{}/diskTypes/pd-ssd'.format(self.project_id, self.cloud_region)
}
}
def __wait_for_operation(self, operation):
while True:
result = self.client.zoneOperations().get(
project=self.project_id,
zone=self.cloud_region,
operation=operation).execute()
if result['status'] == 'DONE':
if 'error' in result:
raise Exception(result['error'])
return result
time.sleep(1)
def __build_networks(self):
region_name = self.cloud_region[:self.cloud_region.rfind('-')]
allowed_networks = utils.get_networks_config(self.cloud_region)
subnet_id = 'default'
network_name = 'default'
if allowed_networks and len(allowed_networks) > 0:
network_num = randint(0, len(allowed_networks) - 1)
network_name = allowed_networks.items()[network_num][0]
subnet_id = allowed_networks.items()[network_num][1]
utils.pipe_log(
'- Networks list found, subnet {} in Network {} will be used'.format(subnet_id, network_name))
else:
utils.pipe_log('- Networks list NOT found, default subnet in random AZ will be used')
access_config = utils.get_access_config(self.cloud_region)
disable_external_access = False
if access_config is not None:
disable_external_access = DISABLE_ACCESS in access_config and access_config[DISABLE_ACCESS]
network = {
'network': 'projects/{project}/global/networks/{network}'.format(project=self.project_id,
network=network_name),
'subnetwork': 'projects/{project}/regions/{region}/subnetworks/{subnet}'.format(
project=self.project_id, subnet=subnet_id, region=region_name)
}
if not disable_external_access:
network['accessConfigs'] = [
{
'name': 'External NAT',
'type': 'ONE_TO_ONE_NAT'
}
]
return [network]
@staticmethod
def resource_tags():
tags = {}
_, config_tags = utils.load_cloud_config()
if config_tags is None:
return tags
for key, value in config_tags.iteritems():
tags.update({key: value})
return tags
@staticmethod
def run_id_tag(run_id):
return {
'name': run_id,
}
@staticmethod
def get_tags(run_id, cloud_region):
tags = GCPInstanceProvider.run_id_tag(run_id)
GCPInstanceProvider.append_tags(tags, GCPInstanceProvider.resource_tags())
GCPInstanceProvider.append_tags(tags, utils.get_region_tags(cloud_region))
return tags
@staticmethod
def append_tags(tags, tags_to_add):
if tags_to_add is None:
return
for key in tags_to_add:
tags[key.lower()] = tags_to_add[key].lower()
|
[] |
[] |
[
"GOOGLE_PROJECT_ID"
] |
[]
|
["GOOGLE_PROJECT_ID"]
|
python
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.