filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
main.go
|
package main
import (
"crypto/sha1"
"encoding/hex"
"encoding/json"
"errors"
"flag"
"fmt"
"io"
"math/rand"
"net"
"os"
"strconv"
"strings"
"time"
)
const (
SEPERATOR = ","
NEWLINE = "\n"
NULL = "\x00"
BUF_SIZE = 256
)
var (
server = flag.String("server", "server.duinocoin.com:2817", "addr and port of server.")
name = flag.String("name", os.Getenv("MINERNAME"), "wallet/miner name.")
quiet = flag.Bool("quiet", false, "disable logging to console.")
debug = flag.Bool("debug", false, "console log send/receive messages.")
wait = flag.Int("wait", 10, "time to wait between task checks.")
batch = flag.Int("batch", 10, "how many jobs to create.")
version = "0.2.1"
)
func init() {
//Following is needed or else numbers aren't random.
rand.Seed(time.Now().UnixNano())
}
func main() {
flag.Parse()
logger("Starting ds-go-node version ", version)
if *name == "" {
logger("Name Not Set")
flag.PrintDefaults()
os.Exit(1)
}
conn, err := connect()
if err != nil {
recoverLoop(conn, err)
}
for {
cj := &CreateJob{
User: *name,
}
err := cj.sync(conn)
if err != nil {
recoverLoop(conn, err)
continue
}
err = cj.createJobs()
if err != nil {
recoverLoop(conn, err)
continue
}
err = cj.sendJobs(conn)
if err != nil {
recoverLoop(conn, err)
continue
}
}
}
// recoverLoop serves as a problem recovery mechanism.
func recoverLoop(conn net.Conn, err error) net.Conn {
loggerDebug("attempting to recover from ", err)
if nerr, ok := err.(*net.OpError); ok {
switch nerr.Err.(type) {
case *os.SyscallError:
conn.Close()
conn = nil
default:
conn = nil
}
}
// Should allow multiple attempts to reconnect.
if err == io.EOF || conn == nil {
for {
conn, err = connect()
if err == nil {
break
}
//sleep between retries
sleepTask(err.Error())
}
}
loggerDebug("continuing from recoverLoop")
return conn
}
// sleep provides a generic sleep task.
func sleepTask(msg ...interface{}) {
sleep := time.Duration(*wait) * time.Second
sleepmsg := fmt.Sprintf(" sleeping for %v", sleep)
msg = append(msg, sleepmsg)
logger(msg...)
time.Sleep(sleep)
}
// Provides a marshal for unit testing.
func (j *CreateJob) marshal() (string, error) {
res, err := json.Marshal(*j)
return string(res), err
}
// sendJobs sends the result of the job over the connection.
func (j *CreateJob) sendJobs(conn net.Conn) (err error) {
res, err := j.marshal()
if err != nil {
return
}
err = send(conn, res)
if err != nil {
return
}
resp, err := read(conn)
if err != nil {
return
}
logger("Submit Job Response: ", resp)
return
}
func makeJob(job *Job, diff uint64) (err error) {
job.Nonce = uint64(rand.Intn(int(diff * 100)))
nonce := strconv.FormatUint(job.Nonce, 10)
data := []byte(job.LastHash + nonce)
h := sha1.New()
h.Write(data)
job.ExpectedHash = hex.EncodeToString(h.Sum(nil))
loggerDebug("created job ", *job)
return
}
// createJobs loops to create (*batch) amount of jobs.
func (j *CreateJob) createJobs() (err error) {
for i := 0; i < *batch; i++ {
job := Job{
LastHash: j.LastHash,
}
err = makeJob(&job, j.Difficulty)
if err != nil {
return
}
j.Jobs = append(j.Jobs, job)
}
logger("created ", *batch, " jobs")
return
}
// parses string to uint64 base 10
func parseUint(str string) (uint64, error) {
return strconv.ParseUint(str, 10, 64)
}
// parseJobs parses the job request sent from the server.
func (j *CreateJob) parseJobs(buf *string) (err error) {
str := strings.Split(string(*buf), SEPERATOR)
if len(str) < 2 {
loggerDebug("string split error ", *buf)
return errors.New("str split error")
}
diff, err := parseUint(str[2])
if err != nil {
loggerDebug("unable to parse uint ", err)
return
}
switch str[0] {
case "CREATE_JOBS":
j.LastHash = str[1]
j.Difficulty = diff
case "NO_TASK":
sleepTask("no_task")
err = errors.New("no_task error")
default:
loggerDebug("task command error ", str[0])
err = errors.New("task command error")
}
return
}
// connect is used to connect to the server.
func connect() (conn net.Conn, err error) {
logger("Connecting to Server: ", *server)
conn, err = net.Dial("tcp", *server)
if err != nil {
return
}
resp, err := read(conn)
if err != nil {
return
}
logger("Connected to Server Version: ", resp)
return
}
// sync is used to request jobs.
func (j *CreateJob) sync(conn net.Conn) (err error) {
err = send(conn, "NODE")
if err != nil {
return
}
resp, err := read(conn)
if err != nil {
return
}
logger("Get Job Response: ", resp)
return j.parseJobs(&resp)
}
// logger is the general purpose logger
// which can be turned off w/ cmd line switch
func logger(msg ...interface{}) {
if *quiet {
return
}
tm := time.Now().Format(time.RFC3339)
fmt.Printf("[%s] ", tm)
for _, v := range msg {
fmt.Print(v)
}
fmt.Println()
}
func loggerDebug(msg ...interface{}) {
if !*debug {
return
}
dbgmsg := []interface{}{"[DEBUG] "}
msg = append(dbgmsg, msg...)
logger(msg...)
}
// cleanString cleans a string
func cleanString(str string) (ret string) {
ret = strings.TrimRight(str, NULL)
ret = strings.TrimRight(ret, NEWLINE)
return
}
// read is a helper for reciving a string
func read(conn net.Conn) (ret string, err error) {
buf := make([]byte, BUF_SIZE)
n, err := conn.Read(buf)
//if error, or no bytes read
if err != nil || n <= 0 {
return
}
ret = cleanString(string(buf))
loggerDebug("read ", n, " bytes ", ret)
return
}
// send is a helper for sending a string
func send(conn net.Conn, str string) (err error) {
n, err := fmt.Fprintln(conn, str)
loggerDebug("send ", n, " bytes ", str)
return
}
|
[
"\"MINERNAME\""
] |
[] |
[
"MINERNAME"
] |
[]
|
["MINERNAME"]
|
go
| 1 | 0 | |
internal/environments/subshell.go
|
package environments
import (
"os"
"os/exec"
)
// getShell : returns default system shell, if $SHELL is not set returns "/bin/sh"
func getShell() string {
shell := os.Getenv("SHELL")
if shell == "" {
shell = "/bin/sh"
}
return shell
}
// SpawnShell : Spawns default system shell with injected variables
func SpawnShell(environmentName string) {
shell := getShell()
cmd := exec.Command(shell)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Start()
cmd.Wait()
}
|
[
"\"SHELL\""
] |
[] |
[
"SHELL"
] |
[]
|
["SHELL"]
|
go
| 1 | 0 | |
vendor/github.com/moby/buildkit/source/git/gitsource.go
|
package git
import (
"bytes"
"context"
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"net/url"
"os"
"os/exec"
"os/user"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/secrets"
"github.com/moby/buildkit/session/sshforward"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/source"
"github.com/moby/buildkit/util/progress/logs"
"github.com/moby/locker"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
bolt "go.etcd.io/bbolt"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
var validHex = regexp.MustCompile(`^[a-f0-9]{40}$`)
type Opt struct {
CacheAccessor cache.Accessor
MetadataStore *metadata.Store
}
type gitSource struct {
md *metadata.Store
cache cache.Accessor
locker *locker.Locker
}
// Supported returns nil if the system supports Git source
func Supported() error {
if err := exec.Command("git", "version").Run(); err != nil {
return errors.Wrap(err, "failed to find git binary")
}
return nil
}
func NewSource(opt Opt) (source.Source, error) {
gs := &gitSource{
md: opt.MetadataStore,
cache: opt.CacheAccessor,
locker: locker.New(),
}
return gs, nil
}
func (gs *gitSource) ID() string {
return source.GitScheme
}
// needs to be called with repo lock
func (gs *gitSource) mountRemote(ctx context.Context, remote string, auth []string, g session.Group) (target string, release func(), retErr error) {
remoteKey := "git-remote::" + remote
sis, err := gs.md.Search(remoteKey)
if err != nil {
return "", nil, errors.Wrapf(err, "failed to search metadata for %s", redactCredentials(remote))
}
var remoteRef cache.MutableRef
for _, si := range sis {
remoteRef, err = gs.cache.GetMutable(ctx, si.ID())
if err != nil {
if errors.Is(err, cache.ErrLocked) {
// should never really happen as no other function should access this metadata, but lets be graceful
logrus.Warnf("mutable ref for %s %s was locked: %v", redactCredentials(remote), si.ID(), err)
continue
}
return "", nil, errors.Wrapf(err, "failed to get mutable ref for %s", redactCredentials(remote))
}
break
}
initializeRepo := false
if remoteRef == nil {
remoteRef, err = gs.cache.New(ctx, nil, g, cache.CachePolicyRetain, cache.WithDescription(fmt.Sprintf("shared git repo for %s", redactCredentials(remote))))
if err != nil {
return "", nil, errors.Wrapf(err, "failed to create new mutable for %s", redactCredentials(remote))
}
initializeRepo = true
}
releaseRemoteRef := func() {
remoteRef.Release(context.TODO())
}
defer func() {
if retErr != nil && remoteRef != nil {
releaseRemoteRef()
}
}()
mount, err := remoteRef.Mount(ctx, false, g)
if err != nil {
return "", nil, err
}
lm := snapshot.LocalMounter(mount)
dir, err := lm.Mount()
if err != nil {
return "", nil, err
}
defer func() {
if retErr != nil {
lm.Unmount()
}
}()
if initializeRepo {
if _, err := gitWithinDir(ctx, dir, "", "", "", auth, "init", "--bare"); err != nil {
return "", nil, errors.Wrapf(err, "failed to init repo at %s", dir)
}
if _, err := gitWithinDir(ctx, dir, "", "", "", auth, "remote", "add", "origin", remote); err != nil {
return "", nil, errors.Wrapf(err, "failed add origin repo at %s", dir)
}
// same new remote metadata
si, _ := gs.md.Get(remoteRef.ID())
v, err := metadata.NewValue(remoteKey)
if err != nil {
return "", nil, err
}
v.Index = remoteKey
if err := si.Update(func(b *bolt.Bucket) error {
return si.SetValue(b, "git-remote", v)
}); err != nil {
return "", nil, err
}
}
return dir, func() {
lm.Unmount()
releaseRemoteRef()
}, nil
}
type gitSourceHandler struct {
*gitSource
src source.GitIdentifier
cacheKey string
sm *session.Manager
auth []string
}
func (gs *gitSourceHandler) shaToCacheKey(sha string) string {
key := sha
if gs.src.KeepGitDir {
key += ".git"
}
if gs.src.Subdir != "" {
key += ":" + gs.src.Subdir
}
return key
}
func (gs *gitSource) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager, _ solver.Vertex) (source.SourceInstance, error) {
gitIdentifier, ok := id.(*source.GitIdentifier)
if !ok {
return nil, errors.Errorf("invalid git identifier %v", id)
}
return &gitSourceHandler{
src: *gitIdentifier,
gitSource: gs,
sm: sm,
}, nil
}
type authSecret struct {
token bool
name string
}
func (gs *gitSourceHandler) authSecretNames() (sec []authSecret, _ error) {
u, err := url.Parse(gs.src.Remote)
if err != nil {
return nil, err
}
if gs.src.AuthHeaderSecret != "" {
sec = append(sec, authSecret{name: gs.src.AuthHeaderSecret + "." + u.Host})
}
if gs.src.AuthTokenSecret != "" {
sec = append(sec, authSecret{name: gs.src.AuthTokenSecret + "." + u.Host, token: true})
}
if gs.src.AuthHeaderSecret != "" {
sec = append(sec, authSecret{name: gs.src.AuthHeaderSecret})
}
if gs.src.AuthTokenSecret != "" {
sec = append(sec, authSecret{name: gs.src.AuthTokenSecret, token: true})
}
return sec, nil
}
func (gs *gitSourceHandler) getAuthToken(ctx context.Context, g session.Group) error {
if gs.auth != nil {
return nil
}
sec, err := gs.authSecretNames()
if err != nil {
return err
}
return gs.sm.Any(ctx, g, func(ctx context.Context, _ string, caller session.Caller) error {
for _, s := range sec {
dt, err := secrets.GetSecret(ctx, caller, s.name)
if err != nil {
if errors.Is(err, secrets.ErrNotFound) {
continue
}
return err
}
if s.token {
dt = []byte("basic " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("x-access-token:%s", dt))))
}
gs.auth = []string{"-c", "http." + tokenScope(gs.src.Remote) + ".extraheader=Authorization: " + string(dt)}
break
}
return nil
})
}
func (gs *gitSourceHandler) mountSSHAuthSock(ctx context.Context, sshID string, g session.Group) (string, func() error, error) {
var caller session.Caller
err := gs.sm.Any(ctx, g, func(ctx context.Context, _ string, c session.Caller) error {
if err := sshforward.CheckSSHID(ctx, c, sshID); err != nil {
if st, ok := status.FromError(err); ok && st.Code() == codes.Unimplemented {
return errors.Errorf("no SSH key %q forwarded from the client", sshID)
}
return err
}
caller = c
return nil
})
if err != nil {
return "", nil, err
}
usr, err := user.Current()
if err != nil {
return "", nil, err
}
// best effort, default to root
uid, _ := strconv.Atoi(usr.Uid)
gid, _ := strconv.Atoi(usr.Gid)
sock, cleanup, err := sshforward.MountSSHSocket(ctx, caller, sshforward.SocketOpt{
ID: sshID,
UID: uid,
GID: gid,
Mode: 0700,
})
if err != nil {
return "", nil, err
}
return sock, cleanup, nil
}
func (gs *gitSourceHandler) mountKnownHosts(ctx context.Context) (string, func() error, error) {
if gs.src.KnownSSHHosts == "" {
return "", nil, errors.Errorf("no configured known hosts forwarded from the client")
}
knownHosts, err := ioutil.TempFile("", "")
if err != nil {
return "", nil, err
}
cleanup := func() error {
return os.Remove(knownHosts.Name())
}
_, err = knownHosts.Write([]byte(gs.src.KnownSSHHosts))
if err != nil {
cleanup()
return "", nil, err
}
err = knownHosts.Close()
if err != nil {
cleanup()
return "", nil, err
}
return knownHosts.Name(), cleanup, nil
}
func (gs *gitSourceHandler) CacheKey(ctx context.Context, g session.Group, index int) (string, solver.CacheOpts, bool, error) {
remote := gs.src.Remote
ref := gs.src.Ref
if ref == "" {
ref = "master"
}
gs.locker.Lock(remote)
defer gs.locker.Unlock(remote)
if isCommitSHA(ref) {
ref = gs.shaToCacheKey(ref)
gs.cacheKey = ref
return ref, nil, true, nil
}
gs.getAuthToken(ctx, g)
gitDir, unmountGitDir, err := gs.mountRemote(ctx, remote, gs.auth, g)
if err != nil {
return "", nil, false, err
}
defer unmountGitDir()
var sock string
if gs.src.MountSSHSock != "" {
var unmountSock func() error
sock, unmountSock, err = gs.mountSSHAuthSock(ctx, gs.src.MountSSHSock, g)
if err != nil {
return "", nil, false, err
}
defer unmountSock()
}
var knownHosts string
if gs.src.KnownSSHHosts != "" {
var unmountKnownHosts func() error
knownHosts, unmountKnownHosts, err = gs.mountKnownHosts(ctx)
if err != nil {
return "", nil, false, err
}
defer unmountKnownHosts()
}
// TODO: should we assume that remote tag is immutable? add a timer?
buf, err := gitWithinDir(ctx, gitDir, "", sock, knownHosts, gs.auth, "ls-remote", "origin", ref)
if err != nil {
return "", nil, false, errors.Wrapf(err, "failed to fetch remote %s", redactCredentials(remote))
}
out := buf.String()
idx := strings.Index(out, "\t")
if idx == -1 {
return "", nil, false, errors.Errorf("repository does not contain ref %s, output: %q", ref, string(out))
}
sha := string(out[:idx])
if !isCommitSHA(sha) {
return "", nil, false, errors.Errorf("invalid commit sha %q", sha)
}
sha = gs.shaToCacheKey(sha)
gs.cacheKey = sha
return sha, nil, true, nil
}
func (gs *gitSourceHandler) Snapshot(ctx context.Context, g session.Group) (out cache.ImmutableRef, retErr error) {
ref := gs.src.Ref
if ref == "" {
ref = "master"
}
cacheKey := gs.cacheKey
if cacheKey == "" {
var err error
cacheKey, _, _, err = gs.CacheKey(ctx, g, 0)
if err != nil {
return nil, err
}
}
gs.getAuthToken(ctx, g)
snapshotKey := "git-snapshot::" + cacheKey + ":" + gs.src.Subdir
gs.locker.Lock(snapshotKey)
defer gs.locker.Unlock(snapshotKey)
sis, err := gs.md.Search(snapshotKey)
if err != nil {
return nil, errors.Wrapf(err, "failed to search metadata for %s", snapshotKey)
}
if len(sis) > 0 {
return gs.cache.Get(ctx, sis[0].ID())
}
gs.locker.Lock(gs.src.Remote)
defer gs.locker.Unlock(gs.src.Remote)
gitDir, unmountGitDir, err := gs.mountRemote(ctx, gs.src.Remote, gs.auth, g)
if err != nil {
return nil, err
}
defer unmountGitDir()
var sock string
if gs.src.MountSSHSock != "" {
var unmountSock func() error
sock, unmountSock, err = gs.mountSSHAuthSock(ctx, gs.src.MountSSHSock, g)
if err != nil {
return nil, err
}
defer unmountSock()
}
var knownHosts string
if gs.src.KnownSSHHosts != "" {
var unmountKnownHosts func() error
knownHosts, unmountKnownHosts, err = gs.mountKnownHosts(ctx)
if err != nil {
return nil, err
}
defer unmountKnownHosts()
}
doFetch := true
if isCommitSHA(ref) {
// skip fetch if commit already exists
if _, err := gitWithinDir(ctx, gitDir, "", sock, knownHosts, nil, "cat-file", "-e", ref+"^{commit}"); err == nil {
doFetch = false
}
}
if doFetch {
// make sure no old lock files have leaked
os.RemoveAll(filepath.Join(gitDir, "shallow.lock"))
args := []string{"fetch"}
if !isCommitSHA(ref) { // TODO: find a branch from ls-remote?
args = append(args, "--depth=1", "--no-tags")
} else {
if _, err := os.Lstat(filepath.Join(gitDir, "shallow")); err == nil {
args = append(args, "--unshallow")
}
}
args = append(args, "origin")
if !isCommitSHA(ref) {
args = append(args, "--force", ref+":tags/"+ref)
// local refs are needed so they would be advertised on next fetches. Force is used
// in case the ref is a branch and it now points to a different commit sha
// TODO: is there a better way to do this?
}
if _, err := gitWithinDir(ctx, gitDir, "", sock, knownHosts, gs.auth, args...); err != nil {
return nil, errors.Wrapf(err, "failed to fetch remote %s", redactCredentials(gs.src.Remote))
}
}
checkoutRef, err := gs.cache.New(ctx, nil, g, cache.WithRecordType(client.UsageRecordTypeGitCheckout), cache.WithDescription(fmt.Sprintf("git snapshot for %s#%s", gs.src.Remote, ref)))
if err != nil {
return nil, errors.Wrapf(err, "failed to create new mutable for %s", redactCredentials(gs.src.Remote))
}
defer func() {
if retErr != nil && checkoutRef != nil {
checkoutRef.Release(context.TODO())
}
}()
mount, err := checkoutRef.Mount(ctx, false, g)
if err != nil {
return nil, err
}
lm := snapshot.LocalMounter(mount)
checkoutDir, err := lm.Mount()
if err != nil {
return nil, err
}
defer func() {
if retErr != nil && lm != nil {
lm.Unmount()
}
}()
subdir := path.Clean(gs.src.Subdir)
if subdir == "/" {
subdir = "."
}
if gs.src.KeepGitDir && subdir == "." {
checkoutDirGit := filepath.Join(checkoutDir, ".git")
if err := os.MkdirAll(checkoutDir, 0711); err != nil {
return nil, err
}
_, err = gitWithinDir(ctx, checkoutDirGit, "", sock, knownHosts, nil, "init")
if err != nil {
return nil, err
}
_, err = gitWithinDir(ctx, checkoutDirGit, "", sock, knownHosts, nil, "remote", "add", "origin", gitDir)
if err != nil {
return nil, err
}
pullref := ref
if isCommitSHA(ref) {
pullref = "refs/buildkit/" + identity.NewID()
_, err = gitWithinDir(ctx, gitDir, "", sock, knownHosts, gs.auth, "update-ref", pullref, ref)
if err != nil {
return nil, err
}
} else {
pullref += ":" + pullref
}
_, err = gitWithinDir(ctx, checkoutDirGit, "", sock, knownHosts, gs.auth, "fetch", "-u", "--depth=1", "origin", pullref)
if err != nil {
return nil, err
}
_, err = gitWithinDir(ctx, checkoutDirGit, checkoutDir, sock, knownHosts, nil, "checkout", "FETCH_HEAD")
if err != nil {
return nil, errors.Wrapf(err, "failed to checkout remote %s", redactCredentials(gs.src.Remote))
}
gitDir = checkoutDirGit
} else {
cd := checkoutDir
if subdir != "." {
cd, err = ioutil.TempDir(cd, "checkout")
if err != nil {
return nil, errors.Wrapf(err, "failed to create temporary checkout dir")
}
}
_, err = gitWithinDir(ctx, gitDir, cd, sock, knownHosts, nil, "checkout", ref, "--", ".")
if err != nil {
return nil, errors.Wrapf(err, "failed to checkout remote %s", redactCredentials(gs.src.Remote))
}
if subdir != "." {
d, err := os.Open(filepath.Join(cd, subdir))
if err != nil {
return nil, errors.Wrapf(err, "failed to open subdir %v", subdir)
}
defer func() {
if d != nil {
d.Close()
}
}()
names, err := d.Readdirnames(0)
if err != nil {
return nil, err
}
for _, n := range names {
if err := os.Rename(filepath.Join(cd, subdir, n), filepath.Join(checkoutDir, n)); err != nil {
return nil, err
}
}
if err := d.Close(); err != nil {
return nil, err
}
d = nil // reset defer
if err := os.RemoveAll(cd); err != nil {
return nil, err
}
}
}
_, err = gitWithinDir(ctx, gitDir, checkoutDir, sock, knownHosts, gs.auth, "submodule", "update", "--init", "--recursive", "--depth=1")
if err != nil {
return nil, errors.Wrapf(err, "failed to update submodules for %s", redactCredentials(gs.src.Remote))
}
if idmap := mount.IdentityMapping(); idmap != nil {
u := idmap.RootPair()
err := filepath.Walk(gitDir, func(p string, f os.FileInfo, err error) error {
return os.Lchown(p, u.UID, u.GID)
})
if err != nil {
return nil, errors.Wrap(err, "failed to remap git checkout")
}
}
lm.Unmount()
lm = nil
snap, err := checkoutRef.Commit(ctx)
if err != nil {
return nil, err
}
checkoutRef = nil
defer func() {
if retErr != nil {
snap.Release(context.TODO())
}
}()
si, _ := gs.md.Get(snap.ID())
v, err := metadata.NewValue(snapshotKey)
if err != nil {
return nil, err
}
v.Index = snapshotKey
if err := si.Update(func(b *bolt.Bucket) error {
return si.SetValue(b, "git-snapshot", v)
}); err != nil {
return nil, err
}
return snap, nil
}
func isCommitSHA(str string) bool {
return validHex.MatchString(str)
}
func gitWithinDir(ctx context.Context, gitDir, workDir, sshAuthSock, knownHosts string, auth []string, args ...string) (*bytes.Buffer, error) {
a := append([]string{"--git-dir", gitDir}, auth...)
if workDir != "" {
a = append(a, "--work-tree", workDir)
}
return git(ctx, workDir, sshAuthSock, knownHosts, append(a, args...)...)
}
func getGitSSHCommand(knownHosts string) string {
gitSSHCommand := "ssh -F /dev/null"
if knownHosts != "" {
gitSSHCommand += " -o UserKnownHostsFile=" + knownHosts
} else {
gitSSHCommand += " -o StrictHostKeyChecking=no"
}
return gitSSHCommand
}
func git(ctx context.Context, dir, sshAuthSock, knownHosts string, args ...string) (*bytes.Buffer, error) {
for {
stdout, stderr := logs.NewLogStreams(ctx, false)
defer stdout.Close()
defer stderr.Close()
cmd := exec.Command("git", args...)
cmd.Dir = dir // some commands like submodule require this
buf := bytes.NewBuffer(nil)
errbuf := bytes.NewBuffer(nil)
cmd.Stdin = nil
cmd.Stdout = io.MultiWriter(stdout, buf)
cmd.Stderr = io.MultiWriter(stderr, errbuf)
cmd.Env = []string{
"PATH=" + os.Getenv("PATH"),
"GIT_TERMINAL_PROMPT=0",
"GIT_SSH_COMMAND=" + getGitSSHCommand(knownHosts),
// "GIT_TRACE=1",
}
if sshAuthSock != "" {
cmd.Env = append(cmd.Env, "SSH_AUTH_SOCK="+sshAuthSock)
}
// remote git commands spawn helper processes that inherit FDs and don't
// handle parent death signal so exec.CommandContext can't be used
err := runProcessGroup(ctx, cmd)
if err != nil {
if strings.Contains(errbuf.String(), "--depth") || strings.Contains(errbuf.String(), "shallow") {
if newArgs := argsNoDepth(args); len(args) > len(newArgs) {
args = newArgs
continue
}
}
}
return buf, err
}
}
func argsNoDepth(args []string) []string {
out := make([]string, 0, len(args))
for _, a := range args {
if a != "--depth=1" {
out = append(out, a)
}
}
return out
}
func tokenScope(remote string) string {
// generally we can only use the token for fetching main remote but in case of github.com we do best effort
// to try reuse same token for all github.com remotes. This is the same behavior actions/checkout uses
for _, pfx := range []string{"https://github.com/", "https://www.github.com/"} {
if strings.HasPrefix(remote, pfx) {
return pfx
}
}
return remote
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
src/main/java/network/bitmesh/cloudserver/Bitcoin/WalletRunnable.java
|
package network.bitmesh.cloudserver.Bitcoin;
import network.bitmesh.cloudserver.ServerConfig;
import org.bitcoinj.core.NetworkParameters;
import org.bitcoinj.core.PeerGroup;
import org.bitcoinj.core.Wallet;
import org.bitcoinj.kits.WalletAppKit;
import org.bitcoinj.params.MainNetParams;
import org.bitcoinj.params.TestNet3Params;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
public class WalletRunnable implements Runnable
{
private static final Logger log = LoggerFactory.getLogger(WalletRunnable.class.getName());
private static WalletAppKit appKit = null;
private static WalletAppKit testAppKit = null;
public WalletRunnable()
{
super();
log.info("Starting wallet.");
// https://stackoverflow.com/questions/5115339/tomcat-opts-environment-variable-and-system-getenv
File walletLoc = ServerConfig.BITMESH_TEST ?
new File("./") :
new File(System.getenv("persistdir"));
if(!walletLoc.canRead() || !walletLoc.canWrite())
{
log.error("Cannot read or write to wallet location.");
return;
}
// Initialize wallet appkit with params, location and class name
appKit = new WalletAppKit(MainNetParams.get(), walletLoc, "mainnet");
appKit.setAutoSave(true);
testAppKit = new WalletAppKit(TestNet3Params.get(), walletLoc, "testnet");
testAppKit.setAutoSave(true);
}
// TODO: implement thread interruptable
public void run()
{
log.info("Beginning run loop");
// Start the sync in the run method to avoid blocking on assignment
appKit.startAsync();
testAppKit.startAsync();
appKit.awaitRunning();
testAppKit.awaitRunning();
appKit.setBlockingStartup(true);
testAppKit.setBlockingStartup(true);
appKit.peerGroup().setMinBroadcastConnections(ServerConfig.MIN_PEERS_TO_BROADCAST);
testAppKit.peerGroup().setMinBroadcastConnections(ServerConfig.MIN_PEERS_TO_BROADCAST);
appKit.peerGroup().setMaxConnections(ServerConfig.MAX_PEER_CONNECTIONS);
testAppKit.peerGroup().setMaxConnections(ServerConfig.MAX_PEER_CONNECTIONS);
// This is for testing - don't want to use localhost as a peer
appKit.peerGroup().setUseLocalhostPeerWhenPossible(false);
testAppKit.peerGroup().setUseLocalhostPeerWhenPossible(false);
appKit.setAutoStop(true);
testAppKit.setAutoStop(true);
}
public static Wallet getWallet()
{
if(appKit == null)
log.error("appKit not initialized when wallet requested.");
return appKit.wallet();
}
public static NetworkParameters getParams()
{
if(appKit == null)
log.error("appKit not initialized when params requested.");
return appKit.params();
}
public static PeerGroup getPeergroup()
{
if(appKit == null)
log.error("appKit not initialized when peergroup requested.");
return appKit.peerGroup();
}
public static WalletAppKit getAppKit()
{
if(appKit == null)
log.error("appKit not initialized when appkit requested.");
return appKit;
}
public static Wallet getTestWallet()
{
if(testAppKit == null)
log.error("appKit not initialized when wallet requested.");
return testAppKit.wallet();
}
public static NetworkParameters getTestParams()
{
if(testAppKit == null)
log.error("appKit not initialized when params requested.");
return testAppKit.params();
}
public static PeerGroup getTestPeergroup()
{
if(testAppKit == null)
log.error("appKit not initialized when peergroup requested.");
return testAppKit.peerGroup();
}
public static WalletAppKit getTestAppKit()
{
if(testAppKit == null)
log.error("appKit not initialized when appkit requested.");
return testAppKit;
}
}
|
[
"\"persistdir\""
] |
[] |
[
"persistdir"
] |
[]
|
["persistdir"]
|
java
| 1 | 0 | |
root.go
|
package editor
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
)
var editor = "vim"
func init() {
if editorVar := os.Getenv("EDITOR"); editorVar != "" {
editor = editorVar
}
}
// Opens the default editor and returns the value.
func Read() ([]byte, error) {
return ReadEditor(editor, "")
}
// Opens the default editor and returns the value in string format.
func ReadText() (string, error) {
text, err := Read()
return string(text), err
}
// Opens the editor and returns the value.
func ReadEditor(editor, programName string) ([]byte, error) {
if programName == "" {
programName = "editor"
}
// Create a temporary file.
tempFile, tmpFileError := ioutil.TempFile("", programName)
if tmpFileError != nil {
return nil, tmpFileError
}
defer os.Remove(tempFile.Name())
// open editor
cmd := exec.Command("sh", "-c", fmt.Sprintf("%s %s", editor, tempFile.Name()))
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmdError := cmd.Run()
if cmdError != nil {
return nil, cmdError
}
// read tmpfile
text, readingFileError := ioutil.ReadFile(tempFile.Name())
if readingFileError != nil {
return nil, readingFileError
}
return text, nil
}
|
[
"\"EDITOR\""
] |
[] |
[
"EDITOR"
] |
[]
|
["EDITOR"]
|
go
| 1 | 0 | |
pandas/io/tests/test_pytables.py
|
import nose
import sys
import os
import warnings
import tempfile
from contextlib import contextmanager
import datetime
import numpy as np
import pandas
import pandas as pd
from pandas import (Series, DataFrame, Panel, MultiIndex, Categorical, bdate_range,
date_range, timedelta_range, Index, DatetimeIndex, TimedeltaIndex, isnull)
from pandas.io.pytables import _tables
try:
_tables()
except ImportError as e:
raise nose.SkipTest(e)
from pandas.io.pytables import (HDFStore, get_store, Term, read_hdf,
IncompatibilityWarning, PerformanceWarning,
AttributeConflictWarning, DuplicateWarning,
PossibleDataLossError, ClosedFileError)
from pandas.io import pytables as pytables
import pandas.util.testing as tm
from pandas.util.testing import (assert_panel4d_equal,
assert_panel_equal,
assert_frame_equal,
assert_series_equal)
from pandas import concat, Timestamp
from pandas import compat
from pandas.compat import range, lrange, u
from pandas.util.testing import assert_produces_warning
from numpy.testing.decorators import slow
try:
import tables
except ImportError:
raise nose.SkipTest('no pytables')
from distutils.version import LooseVersion
_default_compressor = LooseVersion(tables.__version__) >= '2.2' \
and 'blosc' or 'zlib'
_multiprocess_can_split_ = False
# contextmanager to ensure the file cleanup
def safe_remove(path):
if path is not None:
try:
os.remove(path)
except:
pass
def safe_close(store):
try:
if store is not None:
store.close()
except:
pass
def create_tempfile(path):
""" create an unopened named temporary file """
return os.path.join(tempfile.gettempdir(),path)
@contextmanager
def ensure_clean_store(path, mode='a', complevel=None, complib=None,
fletcher32=False):
try:
# put in the temporary path if we don't have one already
if not len(os.path.dirname(path)):
path = create_tempfile(path)
store = HDFStore(path, mode=mode, complevel=complevel,
complib=complib, fletcher32=False)
yield store
finally:
safe_close(store)
if mode == 'w' or mode == 'a':
safe_remove(path)
@contextmanager
def ensure_clean_path(path):
"""
return essentially a named temporary file that is not opened
and deleted on existing; if path is a list, then create and
return list of filenames
"""
try:
if isinstance(path, list):
filenames = [ create_tempfile(p) for p in path ]
yield filenames
else:
filenames = [ create_tempfile(path) ]
yield filenames[0]
finally:
for f in filenames:
safe_remove(f)
# set these parameters so we don't have file sharing
tables.parameters.MAX_NUMEXPR_THREADS = 1
tables.parameters.MAX_BLOSC_THREADS = 1
tables.parameters.MAX_THREADS = 1
def _maybe_remove(store, key):
"""For tests using tables, try removing the table to be sure there is
no content from previous tests using the same table name."""
try:
store.remove(key)
except:
pass
def compat_assert_produces_warning(w,f):
""" don't produce a warning under PY3 """
if compat.PY3:
f()
else:
with tm.assert_produces_warning(expected_warning=w):
f()
class TestHDFStore(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestHDFStore, cls).setUpClass()
# Pytables 3.0.0 deprecates lots of things
tm.reset_testing_mode()
@classmethod
def tearDownClass(cls):
super(TestHDFStore, cls).tearDownClass()
# Pytables 3.0.0 deprecates lots of things
tm.set_testing_mode()
def setUp(self):
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.path = 'tmp.__%s__.h5' % tm.rands(10)
def tearDown(self):
pass
def test_factory_fun(self):
try:
with get_store(self.path) as tbl:
raise ValueError('blah')
except ValueError:
pass
finally:
safe_remove(self.path)
try:
with get_store(self.path) as tbl:
tbl['a'] = tm.makeDataFrame()
with get_store(self.path) as tbl:
self.assertEqual(len(tbl), 1)
self.assertEqual(type(tbl['a']), DataFrame)
finally:
safe_remove(self.path)
def test_context(self):
try:
with HDFStore(self.path) as tbl:
raise ValueError('blah')
except ValueError:
pass
finally:
safe_remove(self.path)
try:
with HDFStore(self.path) as tbl:
tbl['a'] = tm.makeDataFrame()
with HDFStore(self.path) as tbl:
self.assertEqual(len(tbl), 1)
self.assertEqual(type(tbl['a']), DataFrame)
finally:
safe_remove(self.path)
def test_conv_read_write(self):
try:
def roundtrip(key, obj,**kwargs):
obj.to_hdf(self.path, key,**kwargs)
return read_hdf(self.path, key)
o = tm.makeTimeSeries()
assert_series_equal(o, roundtrip('series',o))
o = tm.makeStringSeries()
assert_series_equal(o, roundtrip('string_series',o))
o = tm.makeDataFrame()
assert_frame_equal(o, roundtrip('frame',o))
o = tm.makePanel()
assert_panel_equal(o, roundtrip('panel',o))
# table
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
df.to_hdf(self.path,'table',append=True)
result = read_hdf(self.path, 'table', where = ['index>2'])
assert_frame_equal(df[df.index>2],result)
finally:
safe_remove(self.path)
def test_long_strings(self):
# GH6166
# unconversion of long strings was being chopped in earlier
# versions of numpy < 1.7.2
df = DataFrame({'a': tm.rands_array(100, size=10)},
index=tm.rands_array(100, size=10))
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=['a'])
result = store.select('df')
assert_frame_equal(df, result)
def test_api(self):
# GH4584
# API issue when to_hdf doesn't acdept append AND format args
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path,'df',append=True,format='table')
df.iloc[10:].to_hdf(path,'df',append=True,format='table')
assert_frame_equal(read_hdf(path,'df'),df)
# append to False
df.iloc[:10].to_hdf(path,'df',append=False,format='table')
df.iloc[10:].to_hdf(path,'df',append=True,format='table')
assert_frame_equal(read_hdf(path,'df'),df)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path,'df',append=True)
df.iloc[10:].to_hdf(path,'df',append=True,format='table')
assert_frame_equal(read_hdf(path,'df'),df)
# append to False
df.iloc[:10].to_hdf(path,'df',append=False,format='table')
df.iloc[10:].to_hdf(path,'df',append=True)
assert_frame_equal(read_hdf(path,'df'),df)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path,'df',append=False,format='fixed')
assert_frame_equal(read_hdf(path,'df'),df)
df.to_hdf(path,'df',append=False,format='f')
assert_frame_equal(read_hdf(path,'df'),df)
df.to_hdf(path,'df',append=False)
assert_frame_equal(read_hdf(path,'df'),df)
df.to_hdf(path,'df')
assert_frame_equal(read_hdf(path,'df'),df)
with ensure_clean_store(self.path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store,'df')
store.append('df',df.iloc[:10],append=True,format='table')
store.append('df',df.iloc[10:],append=True,format='table')
assert_frame_equal(store.select('df'),df)
# append to False
_maybe_remove(store,'df')
store.append('df',df.iloc[:10],append=False,format='table')
store.append('df',df.iloc[10:],append=True,format='table')
assert_frame_equal(store.select('df'),df)
# formats
_maybe_remove(store,'df')
store.append('df',df.iloc[:10],append=False,format='table')
store.append('df',df.iloc[10:],append=True,format='table')
assert_frame_equal(store.select('df'),df)
_maybe_remove(store,'df')
store.append('df',df.iloc[:10],append=False,format='table')
store.append('df',df.iloc[10:],append=True,format=None)
assert_frame_equal(store.select('df'),df)
with ensure_clean_path(self.path) as path:
# invalid
df = tm.makeDataFrame()
self.assertRaises(ValueError, df.to_hdf, path,'df',append=True,format='f')
self.assertRaises(ValueError, df.to_hdf, path,'df',append=True,format='fixed')
self.assertRaises(TypeError, df.to_hdf, path,'df',append=True,format='foo')
self.assertRaises(TypeError, df.to_hdf, path,'df',append=False,format='bar')
#File path doesn't exist
path = ""
self.assertRaises(IOError, read_hdf, path, 'df')
def test_api_default_format(self):
# default_format option
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
pandas.set_option('io.hdf.default_format','fixed')
_maybe_remove(store,'df')
store.put('df',df)
self.assertFalse(store.get_storer('df').is_table)
self.assertRaises(ValueError, store.append, 'df2',df)
pandas.set_option('io.hdf.default_format','table')
_maybe_remove(store,'df')
store.put('df',df)
self.assertTrue(store.get_storer('df').is_table)
_maybe_remove(store,'df2')
store.append('df2',df)
self.assertTrue(store.get_storer('df').is_table)
pandas.set_option('io.hdf.default_format',None)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
pandas.set_option('io.hdf.default_format','fixed')
df.to_hdf(path,'df')
with get_store(path) as store:
self.assertFalse(store.get_storer('df').is_table)
self.assertRaises(ValueError, df.to_hdf, path,'df2', append=True)
pandas.set_option('io.hdf.default_format','table')
df.to_hdf(path,'df3')
with HDFStore(path) as store:
self.assertTrue(store.get_storer('df3').is_table)
df.to_hdf(path,'df4',append=True)
with HDFStore(path) as store:
self.assertTrue(store.get_storer('df4').is_table)
pandas.set_option('io.hdf.default_format',None)
def test_keys(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeStringSeries()
store['c'] = tm.makeDataFrame()
store['d'] = tm.makePanel()
store['foo/bar'] = tm.makePanel()
self.assertEqual(len(store), 5)
self.assertTrue(set(
store.keys()) == set(['/a', '/b', '/c', '/d', '/foo/bar']))
def test_repr(self):
with ensure_clean_store(self.path) as store:
repr(store)
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeStringSeries()
store['c'] = tm.makeDataFrame()
store['d'] = tm.makePanel()
store['foo/bar'] = tm.makePanel()
store.append('e', tm.makePanel())
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001,1,2,0,0)
df['datetime2'] = datetime.datetime(2001,1,3,0,0)
df.ix[3:6,['obj1']] = np.nan
df = df.consolidate().convert_objects()
warnings.filterwarnings('ignore', category=PerformanceWarning)
store['df'] = df
warnings.filterwarnings('always', category=PerformanceWarning)
# make a random group in hdf space
store._handle.create_group(store._handle.root,'bah')
repr(store)
str(store)
# storers
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
store.append('df',df)
s = store.get_storer('df')
repr(s)
str(s)
def test_contains(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeDataFrame()
store['foo/bar'] = tm.makeDataFrame()
self.assertIn('a', store)
self.assertIn('b', store)
self.assertNotIn('c', store)
self.assertIn('foo/bar', store)
self.assertIn('/foo/bar', store)
self.assertNotIn('/foo/b', store)
self.assertNotIn('bar', store)
# GH 2694
warnings.filterwarnings('ignore', category=tables.NaturalNameWarning)
store['node())'] = tm.makeDataFrame()
self.assertIn('node())', store)
def test_versioning(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df[:10])
store.append('df1', df[10:])
self.assertEqual(store.root.a._v_attrs.pandas_version, '0.15.2')
self.assertEqual(store.root.b._v_attrs.pandas_version, '0.15.2')
self.assertEqual(store.root.df1._v_attrs.pandas_version, '0.15.2')
# write a file and wipe its versioning
_maybe_remove(store, 'df2')
store.append('df2', df)
# this is an error because its table_type is appendable, but no version
# info
store.get_node('df2')._v_attrs.pandas_version = None
self.assertRaises(Exception, store.select, 'df2')
def test_mode(self):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(self.path) as path:
# constructor
if mode in ['r','r+']:
self.assertRaises(IOError, HDFStore, path, mode=mode)
else:
store = HDFStore(path,mode=mode)
self.assertEqual(store._handle.mode, mode)
store.close()
with ensure_clean_path(self.path) as path:
# context
if mode in ['r','r+']:
def f():
with HDFStore(path,mode=mode) as store:
pass
self.assertRaises(IOError, f)
else:
with HDFStore(path,mode=mode) as store:
self.assertEqual(store._handle.mode, mode)
with ensure_clean_path(self.path) as path:
# conv write
if mode in ['r','r+']:
self.assertRaises(IOError, df.to_hdf, path, 'df', mode=mode)
df.to_hdf(path,'df',mode='w')
else:
df.to_hdf(path,'df',mode=mode)
# conv read
if mode in ['w']:
self.assertRaises(KeyError, read_hdf, path, 'df', mode=mode)
else:
result = read_hdf(path,'df',mode=mode)
assert_frame_equal(result,df)
check('r')
check('r+')
check('a')
check('w')
def test_reopen_handle(self):
with ensure_clean_path(self.path) as path:
store = HDFStore(path,mode='a')
store['a'] = tm.makeTimeSeries()
# invalid mode change
self.assertRaises(PossibleDataLossError, store.open, 'w')
store.close()
self.assertFalse(store.is_open)
# truncation ok here
store.open('w')
self.assertTrue(store.is_open)
self.assertEqual(len(store), 0)
store.close()
self.assertFalse(store.is_open)
store = HDFStore(path,mode='a')
store['a'] = tm.makeTimeSeries()
# reopen as read
store.open('r')
self.assertTrue(store.is_open)
self.assertEqual(len(store), 1)
self.assertEqual(store._mode, 'r')
store.close()
self.assertFalse(store.is_open)
# reopen as append
store.open('a')
self.assertTrue(store.is_open)
self.assertEqual(len(store), 1)
self.assertEqual(store._mode, 'a')
store.close()
self.assertFalse(store.is_open)
# reopen as append (again)
store.open('a')
self.assertTrue(store.is_open)
self.assertEqual(len(store), 1)
self.assertEqual(store._mode, 'a')
store.close()
self.assertFalse(store.is_open)
def test_open_args(self):
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(path,mode='a',driver='H5FD_CORE',driver_core_backing_store=0)
store['df'] = df
store.append('df2',df)
tm.assert_frame_equal(store['df'],df)
tm.assert_frame_equal(store['df2'],df)
store.close()
# the file should not have actually been written
self.assertFalse(os.path.exists(path))
def test_flush(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
left = store.get('a')
right = store['a']
tm.assert_series_equal(left, right)
left = store.get('/a')
right = store['/a']
tm.assert_series_equal(left, right)
self.assertRaises(KeyError, store.get, 'b')
def test_getattr(self):
with ensure_clean_store(self.path) as store:
s = tm.makeTimeSeries()
store['a'] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store,'a')
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store['df'] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
self.assertRaises(AttributeError, getattr, store, 'd')
for x in ['mode','path','handle','complib']:
self.assertRaises(AttributeError, getattr, store, x)
# not stores
for x in ['mode','path','handle','complib']:
getattr(store,"_%s" % x)
def test_put(self):
with ensure_clean_store(self.path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store['a'] = ts
store['b'] = df[:10]
store['foo/bar/bah'] = df[:10]
store['foo'] = df[:10]
store['/foo'] = df[:10]
store.put('c', df[:10], format='table')
# not OK, not a table
self.assertRaises(
ValueError, store.put, 'b', df[10:], append=True)
# node does not currently exist, test _is_table_type returns False in
# this case
# _maybe_remove(store, 'f')
# self.assertRaises(ValueError, store.put, 'f', df[10:], append=True)
# can't put to a table (use append instead)
self.assertRaises(ValueError, store.put, 'c', df[10:], append=True)
# overwrite table
store.put('c', df[:10], format='table', append=False)
tm.assert_frame_equal(df[:10], store['c'])
def test_put_string_index(self):
with ensure_clean_store(self.path) as store:
index = Index(
["I am a very long string index: %s" % i for i in range(20)])
s = Series(np.arange(20), index=index)
df = DataFrame({'A': s, 'B': s})
store['a'] = s
tm.assert_series_equal(store['a'], s)
store['b'] = df
tm.assert_frame_equal(store['b'], df)
# mixed length
index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] + ["I am a very long string index: %s" % i for i in range(20)])
s = Series(np.arange(21), index=index)
df = DataFrame({'A': s, 'B': s})
store['a'] = s
tm.assert_series_equal(store['a'], s)
store['b'] = df
tm.assert_frame_equal(store['b'], df)
def test_put_compression(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
store.put('c', df, format='table', complib='zlib')
tm.assert_frame_equal(store['c'], df)
# can't compress if format='fixed'
self.assertRaises(ValueError, store.put, 'b', df,
format='fixed', complib='zlib')
def test_put_compression_blosc(self):
tm.skip_if_no_package('tables', '2.2', app='blosc support')
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
# can't compress if format='fixed'
self.assertRaises(ValueError, store.put, 'b', df,
format='fixed', complib='blosc')
store.put('c', df, format='table', complib='blosc')
tm.assert_frame_equal(store['c'], df)
def test_put_integer(self):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal)
def test_put_mixed_type(self):
df = tm.makeTimeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.ix[3:6, ['obj1']] = np.nan
df = df.consolidate().convert_objects()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
# cannot use assert_produces_warning here for some reason
# a PendingDeprecationWarning is also raised?
warnings.filterwarnings('ignore', category=PerformanceWarning)
store.put('df',df)
warnings.filterwarnings('always', category=PerformanceWarning)
expected = store.get('df')
tm.assert_frame_equal(expected,df)
def test_append(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df[:10])
store.append('df1', df[10:])
tm.assert_frame_equal(store['df1'], df)
_maybe_remove(store, 'df2')
store.put('df2', df[:10], format='table')
store.append('df2', df[10:])
tm.assert_frame_equal(store['df2'], df)
_maybe_remove(store, 'df3')
store.append('/df3', df[:10])
store.append('/df3', df[10:])
tm.assert_frame_equal(store['df3'], df)
# this is allowed by almost always don't want to do it
with tm.assert_produces_warning(expected_warning=tables.NaturalNameWarning):
_maybe_remove(store, '/df3 foo')
store.append('/df3 foo', df[:10])
store.append('/df3 foo', df[10:])
tm.assert_frame_equal(store['df3 foo'], df)
# panel
wp = tm.makePanel()
_maybe_remove(store, 'wp1')
store.append('wp1', wp.ix[:, :10, :])
store.append('wp1', wp.ix[:, 10:, :])
assert_panel_equal(store['wp1'], wp)
# ndim
p4d = tm.makePanel4D()
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :])
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store['p4d'], p4d)
# test using axis labels
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=[
'items', 'major_axis', 'minor_axis'])
store.append('p4d', p4d.ix[:, :, 10:, :], axes=[
'items', 'major_axis', 'minor_axis'])
assert_panel4d_equal(store['p4d'], p4d)
# test using differnt number of items on each axis
p4d2 = p4d.copy()
p4d2['l4'] = p4d['l1']
p4d2['l5'] = p4d['l1']
_maybe_remove(store, 'p4d2')
store.append(
'p4d2', p4d2, axes=['items', 'major_axis', 'minor_axis'])
assert_panel4d_equal(store['p4d2'], p4d2)
# test using differt order of items on the non-index axes
_maybe_remove(store, 'wp1')
wp_append1 = wp.ix[:, :10, :]
store.append('wp1', wp_append1)
wp_append2 = wp.ix[:, 10:, :].reindex(items=wp.items[::-1])
store.append('wp1', wp_append2)
assert_panel_equal(store['wp1'], wp)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df['mixed_column'] = 'testing'
df.ix[2, 'mixed_column'] = np.nan
_maybe_remove(store, 'df')
store.append('df', df)
tm.assert_frame_equal(store['df'], df)
# uints - test storage of uints
uint_data = DataFrame({'u08' : Series(np.random.random_integers(0, high=255, size=5), dtype=np.uint8),
'u16' : Series(np.random.random_integers(0, high=65535, size=5), dtype=np.uint16),
'u32' : Series(np.random.random_integers(0, high=2**30, size=5), dtype=np.uint32),
'u64' : Series([2**58, 2**59, 2**60, 2**61, 2**62], dtype=np.uint64)},
index=np.arange(5))
_maybe_remove(store, 'uints')
store.append('uints', uint_data)
tm.assert_frame_equal(store['uints'], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, 'uints')
store.append('uints', uint_data, data_columns=['u08','u16','u32']) # 64-bit indices not yet supported
tm.assert_frame_equal(store['uints'], uint_data)
def test_append_series(self):
with ensure_clean_store(self.path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append('ss', ss)
result = store['ss']
tm.assert_series_equal(result, ss)
self.assertIsNone(result.name)
store.append('ts', ts)
result = store['ts']
tm.assert_series_equal(result, ts)
self.assertIsNone(result.name)
ns.name = 'foo'
store.append('ns', ns)
result = store['ns']
tm.assert_series_equal(result, ns)
self.assertEqual(result.name, ns.name)
# select on the values
expected = ns[ns>60]
result = store.select('ns',Term('foo>60'))
tm.assert_series_equal(result,expected)
# select on the index and values
expected = ns[(ns>70) & (ns.index<90)]
result = store.select('ns',[Term('foo>70'), Term('index<90')])
tm.assert_series_equal(result,expected)
# multi-index
mi = DataFrame(np.random.randn(5,1),columns=['A'])
mi['B'] = np.arange(len(mi))
mi['C'] = 'foo'
mi.loc[3:5,'C'] = 'bar'
mi.set_index(['C','B'],inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append('mi', s)
tm.assert_series_equal(store['mi'], s)
def test_store_index_types(self):
# GH5386
# test storing various index types
with ensure_clean_store(self.path) as store:
def check(format,index):
df = DataFrame(np.random.randn(10,2),columns=list('AB'))
df.index = index(len(df))
_maybe_remove(store, 'df')
store.put('df',df,format=format)
assert_frame_equal(df,store['df'])
for index in [ tm.makeFloatIndex, tm.makeStringIndex, tm.makeIntIndex,
tm.makeDateIndex ]:
check('table',index)
check('fixed',index)
# period index currently broken for table
# seee GH7796 FIXME
check('fixed',tm.makePeriodIndex)
#check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
if compat.PY3:
check('table',index)
check('fixed',index)
else:
# only support for fixed types (and they have a perf warning)
self.assertRaises(TypeError, check, 'table', index)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
check('fixed',index)
def test_encoding(self):
if sys.byteorder != 'little':
raise nose.SkipTest('system byteorder is not little')
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(A='foo',B='bar'),index=range(5))
df.loc[2,'A'] = np.nan
df.loc[3,'B'] = np.nan
_maybe_remove(store, 'df')
store.append('df', df, encoding='ascii')
tm.assert_frame_equal(store['df'], df)
expected = df.reindex(columns=['A'])
result = store.select('df',Term('columns=A',encoding='ascii'))
tm.assert_frame_equal(result,expected)
def test_append_some_nans(self):
with ensure_clean_store(self.path) as store:
df = DataFrame({'A' : Series(np.random.randn(20)).astype('int32'),
'A1' : np.random.randn(20),
'A2' : np.random.randn(20),
'B' : 'foo', 'C' : 'bar', 'D' : Timestamp("20010101"), 'E' : datetime.datetime(2001,1,2,0,0) },
index=np.arange(20))
# some nans
_maybe_remove(store, 'df1')
df.ix[0:15,['A1','B','D','E']] = np.nan
store.append('df1', df[:10])
store.append('df1', df[10:])
tm.assert_frame_equal(store['df1'], df)
# first column
df1 = df.copy()
df1.ix[:,'A1'] = np.nan
_maybe_remove(store, 'df1')
store.append('df1', df1[:10])
store.append('df1', df1[10:])
tm.assert_frame_equal(store['df1'], df1)
# 2nd column
df2 = df.copy()
df2.ix[:,'A2'] = np.nan
_maybe_remove(store, 'df2')
store.append('df2', df2[:10])
store.append('df2', df2[10:])
tm.assert_frame_equal(store['df2'], df2)
# datetimes
df3 = df.copy()
df3.ix[:,'E'] = np.nan
_maybe_remove(store, 'df3')
store.append('df3', df3[:10])
store.append('df3', df3[10:])
tm.assert_frame_equal(store['df3'], df3)
def test_append_all_nans(self):
with ensure_clean_store(self.path) as store:
df = DataFrame({'A1' : np.random.randn(20),
'A2' : np.random.randn(20)},
index=np.arange(20))
df.ix[0:15,:] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# tests the option io.hdf.dropna_table
pandas.set_option('io.hdf.dropna_table',False)
_maybe_remove(store, 'df3')
store.append('df3', df[:10])
store.append('df3', df[10:])
tm.assert_frame_equal(store['df3'], df)
pandas.set_option('io.hdf.dropna_table',True)
_maybe_remove(store, 'df4')
store.append('df4', df[:10])
store.append('df4', df[10:])
tm.assert_frame_equal(store['df4'], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame({'A1' : np.random.randn(20),
'A2' : np.random.randn(20),
'B' : 'foo', 'C' : 'bar'},
index=np.arange(20))
df.ix[0:15,:] = np.nan
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# nan some entire rows (but since we have dates they are still written!)
df = DataFrame({'A1' : np.random.randn(20),
'A2' : np.random.randn(20),
'B' : 'foo', 'C' : 'bar', 'D' : Timestamp("20010101"), 'E' : datetime.datetime(2001,1,2,0,0) },
index=np.arange(20))
df.ix[0:15,:] = np.nan
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
def test_append_frame_column_oriented(self):
with ensure_clean_store(self.path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df.ix[:, :2], axes=['columns'])
store.append('df1', df.ix[:, 2:])
tm.assert_frame_equal(store['df1'], df)
result = store.select('df1', 'columns=A')
expected = df.reindex(columns=['A'])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select(
'df1', ('columns=A', Term('index=df.index[0:4]')))
expected = df.reindex(columns=['A'], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
self.assertRaises(TypeError, store.select, 'df1', (
'columns=A', Term('index>df.index[4]')))
def test_append_with_different_block_ordering(self):
#GH 4096; using same frames, but different block orderings
with ensure_clean_store(self.path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10,2),columns=list('AB'))
df['index'] = range(10)
df['index'] += i*10
df['int64'] = Series([1]*len(df),dtype='int64')
df['int16'] = Series([1]*len(df),dtype='int16')
if i % 2 == 0:
del df['int64']
df['int64'] = Series([1]*len(df),dtype='int64')
if i % 3 == 0:
a = df.pop('A')
df['A'] = a
df.set_index('index',inplace=True)
store.append('df',df)
# test a different ordering but with more fields (like invalid combinate)
with ensure_clean_store(self.path) as store:
df = DataFrame(np.random.randn(10,2),columns=list('AB'), dtype='float64')
df['int64'] = Series([1]*len(df),dtype='int64')
df['int16'] = Series([1]*len(df),dtype='int16')
store.append('df',df)
# store additonal fields in different blocks
df['int16_2'] = Series([1]*len(df),dtype='int16')
self.assertRaises(ValueError, store.append, 'df', df)
# store multile additonal fields in different blocks
df['float_3'] = Series([1.]*len(df),dtype='float64')
self.assertRaises(ValueError, store.append, 'df', df)
def test_ndim_indexables(self):
""" test using ndim tables in new ways"""
with ensure_clean_store(self.path) as store:
p4d = tm.makePanel4D()
def check_indexers(key, indexers):
for i, idx in enumerate(indexers):
self.assertTrue(getattr(getattr(
store.root, key).table.description, idx)._v_pos == i)
# append then change (will take existing schema)
indexers = ['items', 'major_axis', 'minor_axis']
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store.select('p4d'), p4d)
check_indexers('p4d', indexers)
# same as above, but try to append with differnt axes
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :], axes=[
'labels', 'items', 'major_axis'])
assert_panel4d_equal(store.select('p4d'), p4d)
check_indexers('p4d', indexers)
# pass incorrect number of axes
_maybe_remove(store, 'p4d')
self.assertRaises(ValueError, store.append, 'p4d', p4d.ix[
:, :, :10, :], axes=['major_axis', 'minor_axis'])
# different than default indexables #1
indexers = ['labels', 'major_axis', 'minor_axis']
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store['p4d'], p4d)
check_indexers('p4d', indexers)
# different than default indexables #2
indexers = ['major_axis', 'labels', 'minor_axis']
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store['p4d'], p4d)
check_indexers('p4d', indexers)
# partial selection
result = store.select('p4d', ['labels=l1'])
expected = p4d.reindex(labels=['l1'])
assert_panel4d_equal(result, expected)
# partial selection2
result = store.select('p4d', [Term(
'labels=l1'), Term('items=ItemA'), Term('minor_axis=B')])
expected = p4d.reindex(
labels=['l1'], items=['ItemA'], minor_axis=['B'])
assert_panel4d_equal(result, expected)
# non-existant partial selection
result = store.select('p4d', [Term(
'labels=l1'), Term('items=Item1'), Term('minor_axis=B')])
expected = p4d.reindex(labels=['l1'], items=[], minor_axis=['B'])
assert_panel4d_equal(result, expected)
def test_append_with_strings(self):
with ensure_clean_store(self.path) as store:
wp = tm.makePanel()
wp2 = wp.rename_axis(
dict([(x, "%s_extra" % x) for x in wp.minor_axis]), axis=2)
def check_col(key,name,size):
self.assertEqual(getattr(store.get_storer(key).table.description,name).itemsize, size)
store.append('s1', wp, min_itemsize=20)
store.append('s1', wp2)
expected = concat([wp, wp2], axis=2)
expected = expected.reindex(minor_axis=sorted(expected.minor_axis))
assert_panel_equal(store['s1'], expected)
check_col('s1', 'minor_axis', 20)
# test dict format
store.append('s2', wp, min_itemsize={'minor_axis': 20})
store.append('s2', wp2)
expected = concat([wp, wp2], axis=2)
expected = expected.reindex(minor_axis=sorted(expected.minor_axis))
assert_panel_equal(store['s2'], expected)
check_col('s2', 'minor_axis', 20)
# apply the wrong field (similar to #1)
store.append('s3', wp, min_itemsize={'major_axis': 20})
self.assertRaises(ValueError, store.append, 's3', wp2)
# test truncation of bigger strings
store.append('s4', wp)
self.assertRaises(ValueError, store.append, 's4', wp2)
# avoid truncation on elements
df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
store.append('df_big', df)
tm.assert_frame_equal(store.select('df_big'), df)
check_col('df_big', 'values_block_1', 15)
# appending smaller string ok
df2 = DataFrame([[124, 'asdqy'], [346, 'dggnhefbdfb']])
store.append('df_big', df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select('df_big'), expected)
check_col('df_big', 'values_block_1', 15)
# avoid truncation on elements
df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
store.append('df_big2', df, min_itemsize={'values': 50})
tm.assert_frame_equal(store.select('df_big2'), df)
check_col('df_big2', 'values_block_1', 50)
# bigger string on next append
store.append('df_new', df)
df_new = DataFrame(
[[124, 'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])
self.assertRaises(ValueError, store.append, 'df_new', df_new)
# with nans
_maybe_remove(store, 'df')
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.ix[1:4, 'string'] = np.nan
df['string2'] = 'bar'
df.ix[4:8, 'string2'] = np.nan
df['string3'] = 'bah'
df.ix[1:, 'string3'] = np.nan
store.append('df', df)
result = store.select('df')
tm.assert_frame_equal(result, df)
with ensure_clean_store(self.path) as store:
def check_col(key,name,size):
self.assertEqual(getattr(store.get_storer(key).table.description,name).itemsize, size)
df = DataFrame(dict(A = 'foo', B = 'bar'),index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, 'df')
store.append('df', df, min_itemsize={'A' : 200 })
check_col('df', 'A', 200)
self.assertEqual(store.get_storer('df').data_columns, ['A'])
# a min_itemsize that creates a data_column2
_maybe_remove(store, 'df')
store.append('df', df, data_columns = ['B'], min_itemsize={'A' : 200 })
check_col('df', 'A', 200)
self.assertEqual(store.get_storer('df').data_columns, ['B','A'])
# a min_itemsize that creates a data_column2
_maybe_remove(store, 'df')
store.append('df', df, data_columns = ['B'], min_itemsize={'values' : 200 })
check_col('df', 'B', 200)
check_col('df', 'values_block_0', 200)
self.assertEqual(store.get_storer('df').data_columns, ['B'])
# infer the .typ on subsequent appends
_maybe_remove(store, 'df')
store.append('df', df[:5], min_itemsize=200)
store.append('df', df[5:], min_itemsize=200)
tm.assert_frame_equal(store['df'], df)
# invalid min_itemsize keys
df = DataFrame(['foo','foo','foo','barh','barh','barh'],columns=['A'])
_maybe_remove(store, 'df')
self.assertRaises(ValueError, store.append, 'df', df, min_itemsize={'foo' : 20, 'foobar' : 20})
def test_append_with_data_columns(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
df.loc[:,'B'].iloc[0] = 1.
_maybe_remove(store, 'df')
store.append('df', df[:2], data_columns=['B'])
store.append('df', df[2:])
tm.assert_frame_equal(store['df'], df)
# check that we have indicies created
assert(store._handle.root.df.table.cols.index.is_indexed is True)
assert(store._handle.root.df.table.cols.B.is_indexed is True)
# data column searching
result = store.select('df', [Term('B>0')])
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select(
'df', [Term('B>0'), Term('index>df.index[3]')])
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new['string'] = 'foo'
df_new.loc[1:4,'string'] = np.nan
df_new.loc[5:6,'string'] = 'bar'
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'])
result = store.select('df', [Term('string=foo')])
expected = df_new[df_new.string == 'foo']
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key,name,size):
self.assertEqual(getattr(store.get_storer(key).table.description,name).itemsize, size)
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'],
min_itemsize={'string': 30})
check_col('df', 'string', 30)
_maybe_remove(store, 'df')
store.append(
'df', df_new, data_columns=['string'], min_itemsize=30)
check_col('df', 'string', 30)
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'],
min_itemsize={'values': 30})
check_col('df', 'string', 30)
with ensure_clean_store(self.path) as store:
df_new['string2'] = 'foobarbah'
df_new['string_block1'] = 'foobarbah1'
df_new['string_block2'] = 'foobarbah2'
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string', 'string2'], min_itemsize={'string': 30, 'string2': 40, 'values': 50})
check_col('df', 'string', 30)
check_col('df', 'string2', 40)
check_col('df', 'values_block_1', 50)
with ensure_clean_store(self.path) as store:
# multiple data columns
df_new = df.copy()
df_new.ix[0,'A'] = 1.
df_new.ix[0,'B'] = -1.
df_new['string'] = 'foo'
df_new.loc[1:4,'string'] = np.nan
df_new.loc[5:6,'string'] = 'bar'
df_new['string2'] = 'foo'
df_new.loc[2:5,'string2'] = np.nan
df_new.loc[7:8,'string2'] = 'bar'
_maybe_remove(store, 'df')
store.append(
'df', df_new, data_columns=['A', 'B', 'string', 'string2'])
result = store.select('df', [Term('string=foo'), Term(
'string2=foo'), Term('A>0'), Term('B<0')])
expected = df_new[(df_new.string == 'foo') & (
df_new.string2 == 'foo') & (df_new.A > 0) & (df_new.B < 0)]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select('df', [Term('string=foo'), Term(
'string2=cool')])
expected = df_new[(df_new.string == 'foo') & (
df_new.string2 == 'cool')]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(self.path) as store:
# doc example
df_dc = df.copy()
df_dc['string'] = 'foo'
df_dc.ix[4:6, 'string'] = np.nan
df_dc.ix[7:9, 'string'] = 'bar'
df_dc['string2'] = 'cool'
df_dc['datetime'] = Timestamp('20010102')
df_dc = df_dc.convert_objects()
df_dc.ix[3:5, ['A', 'B', 'datetime']] = np.nan
_maybe_remove(store, 'df_dc')
store.append('df_dc', df_dc, data_columns=['B', 'C',
'string', 'string2', 'datetime'])
result = store.select('df_dc', [Term('B>0')])
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select(
'df_dc', ['B > 0', 'C > 0', 'string == foo'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (
df_dc.string == 'foo')]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(self.path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range('1/1/2000', periods=8)
df_dc = DataFrame(np.random.randn(8, 3), index=index,
columns=['A', 'B', 'C'])
df_dc['string'] = 'foo'
df_dc.ix[4:6,'string'] = np.nan
df_dc.ix[7:9,'string'] = 'bar'
df_dc.ix[:,['B','C']] = df_dc.ix[:,['B','C']].abs()
df_dc['string2'] = 'cool'
# on-disk operations
store.append('df_dc', df_dc, data_columns = ['B', 'C', 'string', 'string2'])
result = store.select('df_dc', [ Term('B>0') ])
expected = df_dc[df_dc.B>0]
tm.assert_frame_equal(result,expected)
result = store.select('df_dc', ['B > 0', 'C > 0', 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == 'foo')]
tm.assert_frame_equal(result,expected)
with ensure_clean_store(self.path) as store:
# panel
# GH5717 not handling data_columns
np.random.seed(1234)
p = tm.makePanel()
store.append('p1',p)
tm.assert_panel_equal(store.select('p1'),p)
store.append('p2',p,data_columns=True)
tm.assert_panel_equal(store.select('p2'),p)
result = store.select('p2',where='ItemA>0')
expected = p.to_frame()
expected = expected[expected['ItemA']>0]
tm.assert_frame_equal(result.to_frame(),expected)
result = store.select('p2',where='ItemA>0 & minor_axis=["A","B"]')
expected = p.to_frame()
expected = expected[expected['ItemA']>0]
expected = expected[expected.reset_index(level=['major']).index.isin(['A','B'])]
tm.assert_frame_equal(result.to_frame(),expected)
def test_create_table_index(self):
with ensure_clean_store(self.path) as store:
def col(t,column):
return getattr(store.get_storer(t).table.cols,column)
# index=False
wp = tm.makePanel()
store.append('p5', wp, index=False)
store.create_table_index('p5', columns=['major_axis'])
assert(col('p5', 'major_axis').is_indexed is True)
assert(col('p5', 'minor_axis').is_indexed is False)
# index=True
store.append('p5i', wp, index=True)
assert(col('p5i', 'major_axis').is_indexed is True)
assert(col('p5i', 'minor_axis').is_indexed is True)
# default optlevels
store.get_storer('p5').create_index()
assert(col('p5', 'major_axis').index.optlevel == 6)
assert(col('p5', 'minor_axis').index.kind == 'medium')
# let's change the indexing scheme
store.create_table_index('p5')
assert(col('p5', 'major_axis').index.optlevel == 6)
assert(col('p5', 'minor_axis').index.kind == 'medium')
store.create_table_index('p5', optlevel=9)
assert(col('p5', 'major_axis').index.optlevel == 9)
assert(col('p5', 'minor_axis').index.kind == 'medium')
store.create_table_index('p5', kind='full')
assert(col('p5', 'major_axis').index.optlevel == 9)
assert(col('p5', 'minor_axis').index.kind == 'full')
store.create_table_index('p5', optlevel=1, kind='light')
assert(col('p5', 'major_axis').index.optlevel == 1)
assert(col('p5', 'minor_axis').index.kind == 'light')
# data columns
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df['string2'] = 'bar'
store.append('f', df, data_columns=['string', 'string2'])
assert(col('f', 'index').is_indexed is True)
assert(col('f', 'string').is_indexed is True)
assert(col('f', 'string2').is_indexed is True)
# specify index=columns
store.append(
'f2', df, index=['string'], data_columns=['string', 'string2'])
assert(col('f2', 'index').is_indexed is False)
assert(col('f2', 'string').is_indexed is True)
assert(col('f2', 'string2').is_indexed is False)
# try to index a non-table
_maybe_remove(store, 'f2')
store.put('f2', df)
self.assertRaises(TypeError, store.create_table_index, 'f2')
def test_append_diff_item_order(self):
wp = tm.makePanel()
wp1 = wp.ix[:, :10, :]
wp2 = wp.ix[['ItemC', 'ItemB', 'ItemA'], 10:, :]
with ensure_clean_store(self.path) as store:
store.put('panel', wp1, format='table')
self.assertRaises(ValueError, store.put, 'panel', wp2,
append=True)
def test_append_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
with ensure_clean_store(self.path) as store:
store.append('mi', df)
result = store.select('mi')
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select('mi',columns=['A','B'])
expected = df.reindex(columns=['A','B'])
tm.assert_frame_equal(result,expected)
with ensure_clean_path('test.hdf') as path:
df.to_hdf(path,'df',format='table')
result = read_hdf(path,'df',columns=['A','B'])
expected = df.reindex(columns=['A','B'])
tm.assert_frame_equal(result,expected)
def test_column_multiindex(self):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples([('A','a'), ('A','b'), ('B','a'), ('B','b')], names=['first','second'])
df = DataFrame(np.arange(12).reshape(3,4), columns=index)
with ensure_clean_store(self.path) as store:
store.put('df',df)
tm.assert_frame_equal(store['df'],df,check_index_type=True,check_column_type=True)
store.put('df1',df,format='table')
tm.assert_frame_equal(store['df1'],df,check_index_type=True,check_column_type=True)
self.assertRaises(ValueError, store.put, 'df2',df,format='table',data_columns=['A'])
self.assertRaises(ValueError, store.put, 'df3',df,format='table',data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(self.path) as store:
store.append('df2', df)
store.append('df2', df)
tm.assert_frame_equal(store['df2'], concat((df,df)))
# non_index_axes name
df = DataFrame(np.arange(12).reshape(3,4), columns=Index(list('ABCD'),name='foo'))
with ensure_clean_store(self.path) as store:
store.put('df1',df,format='table')
tm.assert_frame_equal(store['df1'],df,check_index_type=True,check_column_type=True)
def test_store_multiindex(self):
# validate multi-index names
# GH 5527
with ensure_clean_store(self.path) as store:
def make_index(names=None):
return MultiIndex.from_tuples([( datetime.datetime(2013,12,d), s, t) for d in range(1,3) for s in range(2) for t in range(3)],
names=names)
# no names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index())
store.append('df',df)
tm.assert_frame_equal(store.select('df'),df)
# partial names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date',None,None]))
store.append('df',df)
tm.assert_frame_equal(store.select('df'),df)
# series
_maybe_remove(store, 's')
s = Series(np.zeros(12), index=make_index(['date',None,None]))
store.append('s',s)
tm.assert_series_equal(store.select('s'),s)
# dup with column
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date','a','t']))
self.assertRaises(ValueError, store.append, 'df',df)
# dup within level
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date','date','date']))
self.assertRaises(ValueError, store.append, 'df',df)
# fully names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date','s','t']))
store.append('df',df)
tm.assert_frame_equal(store.select('df'),df)
def test_select_columns_in_where(self):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo_name', 'bar_name'])
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table')
expected = df[['A']]
tm.assert_frame_equal(store.select('df', columns=['A']), expected)
tm.assert_frame_equal(store.select('df', where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index,
name='A')
with ensure_clean_store(self.path) as store:
store.put('s', s, format='table')
tm.assert_series_equal(store.select('s', where="columns=['A']"),s)
def test_pass_spec_to_storer(self):
df = tm.makeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('df',df)
self.assertRaises(TypeError, store.select, 'df', columns=['A'])
self.assertRaises(TypeError, store.select, 'df',where=[('columns=A')])
def test_append_misc(self):
with ensure_clean_store(self.path) as store:
# unsuported data types for non-tables
p4d = tm.makePanel4D()
self.assertRaises(TypeError, store.put,'p4d',p4d)
# unsuported data types
self.assertRaises(TypeError, store.put,'abc',None)
self.assertRaises(TypeError, store.put,'abc','123')
self.assertRaises(TypeError, store.put,'abc',123)
self.assertRaises(TypeError, store.put,'abc',np.arange(5))
df = tm.makeDataFrame()
store.append('df', df, chunksize=1)
result = store.select('df')
tm.assert_frame_equal(result, df)
store.append('df1', df, expectedrows=10)
result = store.select('df1')
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(self.path,mode='w') as store:
store.append('obj', obj, chunksize=c)
result = store.select('obj')
comparator(result,obj)
df = tm.makeDataFrame()
df['string'] = 'foo'
df['float322'] = 1.
df['float322'] = df['float322'].astype('float32')
df['bool'] = df['float322'] > 0
df['time1'] = Timestamp('20130101')
df['time2'] = Timestamp('20130102')
check(df, tm.assert_frame_equal)
p = tm.makePanel()
check(p, assert_panel_equal)
p4d = tm.makePanel4D()
check(p4d, assert_panel4d_equal)
# empty frame, GH4273
with ensure_clean_store(self.path) as store:
# 0 len
df_empty = DataFrame(columns=list('ABC'))
store.append('df',df_empty)
self.assertRaises(KeyError,store.select, 'df')
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10,3),columns=list('ABC'))
store.append('df',df)
assert_frame_equal(store.select('df'),df)
store.append('df',df_empty)
assert_frame_equal(store.select('df'),df)
# store
df = DataFrame(columns=list('ABC'))
store.put('df2',df)
assert_frame_equal(store.select('df2'),df)
# 0 len
p_empty = Panel(items=list('ABC'))
store.append('p',p_empty)
self.assertRaises(KeyError,store.select, 'p')
# repeated append of 0/non-zero frames
p = Panel(np.random.randn(3,4,5),items=list('ABC'))
store.append('p',p)
assert_panel_equal(store.select('p'),p)
store.append('p',p_empty)
assert_panel_equal(store.select('p'),p)
# store
store.put('p2',p_empty)
assert_panel_equal(store.select('p2'),p_empty)
def test_append_raise(self):
with ensure_clean_store(self.path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df['invalid'] = [['a']] * len(df)
self.assertEqual(df.dtypes['invalid'], np.object_)
self.assertRaises(TypeError, store.append,'df',df)
# multiple invalid columns
df['invalid2'] = [['a']] * len(df)
df['invalid3'] = [['a']] * len(df)
self.assertRaises(TypeError, store.append,'df',df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001,1,2),index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df['invalid'] = s
self.assertEqual(df.dtypes['invalid'], np.object_)
self.assertRaises(TypeError, store.append,'df', df)
# directy ndarray
self.assertRaises(TypeError, store.append,'df',np.arange(10))
# series directly
self.assertRaises(TypeError, store.append,'df',Series(np.arange(10)))
# appending an incompatbile table
df = tm.makeDataFrame()
store.append('df',df)
df['foo'] = 'foo'
self.assertRaises(ValueError, store.append,'df',df)
def test_table_index_incompatible_dtypes(self):
df1 = DataFrame({'a': [1, 2, 3]})
df2 = DataFrame({'a': [4, 5, 6]},
index=date_range('1/1/2000', periods=3))
with ensure_clean_store(self.path) as store:
store.put('frame', df1, format='table')
self.assertRaises(TypeError, store.put, 'frame', df2,
format='table', append=True)
def test_table_values_dtypes_roundtrip(self):
with ensure_clean_store(self.path) as store:
df1 = DataFrame({'a': [1, 2, 3]}, dtype='f8')
store.append('df_f8', df1)
assert_series_equal(df1.dtypes,store['df_f8'].dtypes)
df2 = DataFrame({'a': [1, 2, 3]}, dtype='i8')
store.append('df_i8', df2)
assert_series_equal(df2.dtypes,store['df_i8'].dtypes)
# incompatible dtype
self.assertRaises(ValueError, store.append, 'df_i8', df1)
# check creation/storage/retrieval of float32 (a bit hacky to actually create them thought)
df1 = DataFrame(np.array([[1],[2],[3]],dtype='f4'),columns = ['A'])
store.append('df_f4', df1)
assert_series_equal(df1.dtypes,store['df_f4'].dtypes)
assert df1.dtypes[0] == 'float32'
# check with mixed dtypes
df1 = DataFrame(dict([ (c,Series(np.random.randn(5),dtype=c)) for c in
['float32','float64','int32','int64','int16','int8'] ]))
df1['string'] = 'foo'
df1['float322'] = 1.
df1['float322'] = df1['float322'].astype('float32')
df1['bool'] = df1['float32'] > 0
df1['time1'] = Timestamp('20130101')
df1['time2'] = Timestamp('20130102')
store.append('df_mixed_dtypes1', df1)
result = store.select('df_mixed_dtypes1').get_dtype_counts()
expected = Series({ 'float32' : 2, 'float64' : 1,'int32' : 1, 'bool' : 1,
'int16' : 1, 'int8' : 1, 'int64' : 1, 'object' : 1,
'datetime64[ns]' : 2})
result.sort()
expected.sort()
tm.assert_series_equal(result,expected)
def test_table_mixed_dtypes(self):
# frame
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.ix[3:6, ['obj1']] = np.nan
df = df.consolidate().convert_objects()
with ensure_clean_store(self.path) as store:
store.append('df1_mixed', df)
tm.assert_frame_equal(store.select('df1_mixed'), df)
# panel
wp = tm.makePanel()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['ItemA'] > 0
wp['bool2'] = wp['ItemB'] > 0
wp['int1'] = 1
wp['int2'] = 2
wp = wp.consolidate()
with ensure_clean_store(self.path) as store:
store.append('p1_mixed', wp)
assert_panel_equal(store.select('p1_mixed'), wp)
# ndim
wp = tm.makePanel4D()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['l1'] > 0
wp['bool2'] = wp['l2'] > 0
wp['int1'] = 1
wp['int2'] = 2
wp = wp.consolidate()
with ensure_clean_store(self.path) as store:
store.append('p4d_mixed', wp)
assert_panel4d_equal(store.select('p4d_mixed'), wp)
def test_unimplemented_dtypes_table_columns(self):
with ensure_clean_store(self.path) as store:
l = [('date', datetime.date(2001, 1, 2))]
# py3 ok for unicode
if not compat.PY3:
l.append(('unicode', u('\\u03c3')))
### currently not supported dtypes ####
for n, f in l:
df = tm.makeDataFrame()
df[n] = f
self.assertRaises(
TypeError, store.append, 'df1_%s' % n, df)
# frame
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['datetime1'] = datetime.date(2001, 1, 2)
df = df.consolidate().convert_objects()
with ensure_clean_store(self.path) as store:
# this fails because we have a date in the object block......
self.assertRaises(TypeError, store.append, 'df_unimplemented', df)
def test_append_with_timezones_pytz(self):
from datetime import timedelta
def compare(a,b):
tm.assert_frame_equal(a,b)
# compare the zones on each element
for c in a.columns:
for i in a.index:
a_e = a[c][i]
b_e = b[c][i]
if not (a_e == b_e and a_e.tz == b_e.tz):
raise AssertionError("invalid tz comparsion [%s] [%s]" % (a_e,b_e))
# as columns
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A = [ Timestamp('20130102 2:00:00',tz='US/Eastern') + timedelta(hours=1)*i for i in range(5) ]))
store.append('df_tz',df,data_columns=['A'])
result = store['df_tz']
compare(result,df)
assert_frame_equal(result,df)
# select with tz aware
compare(store.select('df_tz',where=Term('A>=df.A[3]')),df[df.A>=df.A[3]])
_maybe_remove(store, 'df_tz')
# ensure we include dates in DST and STD time here.
df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130603',tz='US/Eastern')),index=range(5))
store.append('df_tz',df)
result = store['df_tz']
compare(result,df)
assert_frame_equal(result,df)
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130102',tz='EET')),index=range(5))
self.assertRaises(TypeError, store.append, 'df_tz', df)
# this is ok
_maybe_remove(store, 'df_tz')
store.append('df_tz',df,data_columns=['A','B'])
result = store['df_tz']
compare(result,df)
assert_frame_equal(result,df)
# can't append with diff timezone
df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130102',tz='CET')),index=range(5))
self.assertRaises(ValueError, store.append, 'df_tz', df)
# as index
with ensure_clean_store(self.path) as store:
# GH 4098 example
df = DataFrame(dict(A = Series(lrange(3), index=date_range('2000-1-1',periods=3,freq='H', tz='US/Eastern'))))
_maybe_remove(store, 'df')
store.put('df',df)
result = store.select('df')
assert_frame_equal(result,df)
_maybe_remove(store, 'df')
store.append('df',df)
result = store.select('df')
assert_frame_equal(result,df)
def test_calendar_roundtrip_issue(self):
# 8591
# doc example from tseries holiday section
weekmask_egypt = 'Sun Mon Tue Wed Thu'
holidays = ['2012-05-01', datetime.datetime(2013, 5, 1), np.datetime64('2014-05-01')]
bday_egypt = pandas.offsets.CustomBusinessDay(holidays=holidays, weekmask=weekmask_egypt)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = (Series(dts.weekday, dts).map(Series('Mon Tue Wed Thu Fri Sat Sun'.split())))
with ensure_clean_store(self.path) as store:
store.put('fixed',s)
result = store.select('fixed')
assert_series_equal(result, s)
store.append('table',s)
result = store.select('table')
assert_series_equal(result, s)
def test_append_with_timezones_dateutil(self):
from datetime import timedelta
tm._skip_if_no_dateutil()
# use maybe_get_tz instead of dateutil.tz.gettz to handle the windows filename issues.
from pandas.tslib import maybe_get_tz
gettz = lambda x: maybe_get_tz('dateutil/' + x)
def compare(a, b):
tm.assert_frame_equal(a, b)
# compare the zones on each element
for c in a.columns:
for i in a.index:
a_e = a[c][i]
b_e = b[c][i]
if not (a_e == b_e and a_e.tz == b_e.tz):
raise AssertionError("invalid tz comparsion [%s] [%s]" % (a_e, b_e))
# as columns
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A=[ Timestamp('20130102 2:00:00', tz=gettz('US/Eastern')) + timedelta(hours=1) * i for i in range(5) ]))
store.append('df_tz', df, data_columns=['A'])
result = store['df_tz']
compare(result, df)
assert_frame_equal(result, df)
# select with tz aware
compare(store.select('df_tz', where=Term('A>=df.A[3]')), df[df.A >= df.A[3]])
_maybe_remove(store, 'df_tz')
# ensure we include dates in DST and STD time here.
df = DataFrame(dict(A=Timestamp('20130102', tz=gettz('US/Eastern')), B=Timestamp('20130603', tz=gettz('US/Eastern'))), index=range(5))
store.append('df_tz', df)
result = store['df_tz']
compare(result, df)
assert_frame_equal(result, df)
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A=Timestamp('20130102', tz=gettz('US/Eastern')), B=Timestamp('20130102', tz=gettz('EET'))), index=range(5))
self.assertRaises(TypeError, store.append, 'df_tz', df)
# this is ok
_maybe_remove(store, 'df_tz')
store.append('df_tz', df, data_columns=['A', 'B'])
result = store['df_tz']
compare(result, df)
assert_frame_equal(result, df)
# can't append with diff timezone
df = DataFrame(dict(A=Timestamp('20130102', tz=gettz('US/Eastern')), B=Timestamp('20130102', tz=gettz('CET'))), index=range(5))
self.assertRaises(ValueError, store.append, 'df_tz', df)
# as index
with ensure_clean_store(self.path) as store:
# GH 4098 example
df = DataFrame(dict(A=Series(lrange(3), index=date_range('2000-1-1', periods=3, freq='H', tz=gettz('US/Eastern')))))
_maybe_remove(store, 'df')
store.put('df', df)
result = store.select('df')
assert_frame_equal(result, df)
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df')
assert_frame_equal(result, df)
def test_store_timezone(self):
# GH2852
# issue storing datetime.date with a timezone as it resets when read back in a new timezone
import platform
if platform.system() == "Windows":
raise nose.SkipTest("timezone setting not supported on windows")
import datetime
import time
import os
# original method
with ensure_clean_store(self.path) as store:
today = datetime.date(2013,9,10)
df = DataFrame([1,2,3], index = [today, today, today])
store['obj1'] = df
result = store['obj1']
assert_frame_equal(result, df)
# with tz setting
orig_tz = os.environ.get('TZ')
def setTZ(tz):
if tz is None:
try:
del os.environ['TZ']
except:
pass
else:
os.environ['TZ']=tz
time.tzset()
try:
with ensure_clean_store(self.path) as store:
setTZ('EST5EDT')
today = datetime.date(2013,9,10)
df = DataFrame([1,2,3], index = [today, today, today])
store['obj1'] = df
setTZ('CST6CDT')
result = store['obj1']
assert_frame_equal(result, df)
finally:
setTZ(orig_tz)
def test_append_with_timedelta(self):
# GH 3577
# append timedelta
from datetime import timedelta
df = DataFrame(dict(A = Timestamp('20130101'), B = [ Timestamp('20130101') + timedelta(days=i,seconds=10) for i in range(10) ]))
df['C'] = df['A']-df['B']
df.ix[3:5,'C'] = np.nan
with ensure_clean_store(self.path) as store:
# table
_maybe_remove(store, 'df')
store.append('df',df,data_columns=True)
result = store.select('df')
assert_frame_equal(result,df)
result = store.select('df',Term("C<100000"))
assert_frame_equal(result,df)
result = store.select('df',Term("C","<",-3*86400))
assert_frame_equal(result,df.iloc[3:])
result = store.select('df',"C<'-3D'")
assert_frame_equal(result,df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select('df',"C<'-500000s'")
result = result.dropna(subset=['C'])
assert_frame_equal(result,df.iloc[6:])
result = store.select('df',"C<'-3.5D'")
result = result.iloc[1:]
assert_frame_equal(result,df.iloc[4:])
# fixed
_maybe_remove(store, 'df2')
store.put('df2',df)
result = store.select('df2')
assert_frame_equal(result,df)
def test_remove(self):
with ensure_clean_store(self.path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store['a'] = ts
store['b'] = df
_maybe_remove(store, 'a')
self.assertEqual(len(store), 1)
tm.assert_frame_equal(df, store['b'])
_maybe_remove(store, 'b')
self.assertEqual(len(store), 0)
# nonexistence
self.assertRaises(KeyError, store.remove, 'a_nonexistent_store')
# pathing
store['a'] = ts
store['b/foo'] = df
_maybe_remove(store, 'foo')
_maybe_remove(store, 'b/foo')
self.assertEqual(len(store), 1)
store['a'] = ts
store['b/foo'] = df
_maybe_remove(store, 'b')
self.assertEqual(len(store), 1)
# __delitem__
store['a'] = ts
store['b'] = df
del store['a']
del store['b']
self.assertEqual(len(store), 0)
def test_remove_where(self):
with ensure_clean_store(self.path) as store:
# non-existance
crit1 = Term('index>foo')
self.assertRaises(KeyError, store.remove, 'a', [crit1])
# try to remove non-table (with crit)
# non-table ok (where = None)
wp = tm.makePanel(30)
store.put('wp', wp, format='table')
store.remove('wp', ["minor_axis=['A', 'D']"])
rs = store.select('wp')
expected = wp.reindex(minor_axis=['B', 'C'])
assert_panel_equal(rs, expected)
# empty where
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
# deleted number (entire table)
n = store.remove('wp', [])
self.assertTrue(n == 120)
# non - empty where
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
self.assertRaises(ValueError, store.remove,
'wp', ['foo'])
# selectin non-table with a where
# store.put('wp2', wp, format='f')
# self.assertRaises(ValueError, store.remove,
# 'wp2', [('column', ['A', 'D'])])
def test_remove_startstop(self):
# GH #4835 and #6177
with ensure_clean_store(self.path) as store:
wp = tm.makePanel(30)
# start
_maybe_remove(store, 'wp1')
store.put('wp1', wp, format='t')
n = store.remove('wp1', start=32)
self.assertTrue(n == 120-32)
result = store.select('wp1')
expected = wp.reindex(major_axis=wp.major_axis[:32//4])
assert_panel_equal(result, expected)
_maybe_remove(store, 'wp2')
store.put('wp2', wp, format='t')
n = store.remove('wp2', start=-32)
self.assertTrue(n == 32)
result = store.select('wp2')
expected = wp.reindex(major_axis=wp.major_axis[:-32//4])
assert_panel_equal(result, expected)
# stop
_maybe_remove(store, 'wp3')
store.put('wp3', wp, format='t')
n = store.remove('wp3', stop=32)
self.assertTrue(n == 32)
result = store.select('wp3')
expected = wp.reindex(major_axis=wp.major_axis[32//4:])
assert_panel_equal(result, expected)
_maybe_remove(store, 'wp4')
store.put('wp4', wp, format='t')
n = store.remove('wp4', stop=-32)
self.assertTrue(n == 120-32)
result = store.select('wp4')
expected = wp.reindex(major_axis=wp.major_axis[-32//4:])
assert_panel_equal(result, expected)
# start n stop
_maybe_remove(store, 'wp5')
store.put('wp5', wp, format='t')
n = store.remove('wp5', start=16, stop=-16)
self.assertTrue(n == 120-32)
result = store.select('wp5')
expected = wp.reindex(major_axis=wp.major_axis[:16//4].union(wp.major_axis[-16//4:]))
assert_panel_equal(result, expected)
_maybe_remove(store, 'wp6')
store.put('wp6', wp, format='t')
n = store.remove('wp6', start=16, stop=16)
self.assertTrue(n == 0)
result = store.select('wp6')
expected = wp.reindex(major_axis=wp.major_axis)
assert_panel_equal(result, expected)
# with where
_maybe_remove(store, 'wp7')
date = wp.major_axis.take(np.arange(0,30,3))
crit = Term('major_axis=date')
store.put('wp7', wp, format='t')
n = store.remove('wp7', where=[crit], stop=80)
self.assertTrue(n == 28)
result = store.select('wp7')
expected = wp.reindex(major_axis=wp.major_axis.difference(wp.major_axis[np.arange(0,20,3)]))
assert_panel_equal(result, expected)
def test_remove_crit(self):
with ensure_clean_store(self.path) as store:
wp = tm.makePanel(30)
# group row removal
_maybe_remove(store, 'wp3')
date4 = wp.major_axis.take([0, 1, 2, 4, 5, 6, 8, 9, 10])
crit4 = Term('major_axis=date4')
store.put('wp3', wp, format='t')
n = store.remove('wp3', where=[crit4])
self.assertTrue(n == 36)
result = store.select('wp3')
expected = wp.reindex(major_axis=wp.major_axis.difference(date4))
assert_panel_equal(result, expected)
# upper half
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = Term('major_axis>date')
crit2 = Term("minor_axis=['A', 'D']")
n = store.remove('wp', where=[crit1])
self.assertTrue(n == 56)
n = store.remove('wp', where=[crit2])
self.assertTrue(n == 32)
result = store['wp']
expected = wp.truncate(after=date).reindex(minor=['B', 'C'])
assert_panel_equal(result, expected)
# individual row elements
_maybe_remove(store, 'wp2')
store.put('wp2', wp, format='table')
date1 = wp.major_axis[1:3]
crit1 = Term('major_axis=date1')
store.remove('wp2', where=[crit1])
result = store.select('wp2')
expected = wp.reindex(major_axis=wp.major_axis.difference(date1))
assert_panel_equal(result, expected)
date2 = wp.major_axis[5]
crit2 = Term('major_axis=date2')
store.remove('wp2', where=[crit2])
result = store['wp2']
expected = wp.reindex(
major_axis=wp.major_axis.difference(date1).difference(Index([date2])))
assert_panel_equal(result, expected)
date3 = [wp.major_axis[7], wp.major_axis[9]]
crit3 = Term('major_axis=date3')
store.remove('wp2', where=[crit3])
result = store['wp2']
expected = wp.reindex(
major_axis=wp.major_axis.difference(date1).difference(Index([date2])).difference(Index(date3)))
assert_panel_equal(result, expected)
# corners
_maybe_remove(store, 'wp4')
store.put('wp4', wp, format='table')
n = store.remove(
'wp4', where=[Term('major_axis>wp.major_axis[-1]')])
result = store.select('wp4')
assert_panel_equal(result, wp)
def test_invalid_terms(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.ix[0:4,'string'] = 'bar'
wp = tm.makePanel()
p4d = tm.makePanel4D()
store.put('df', df, format='table')
store.put('wp', wp, format='table')
store.put('p4d', p4d, format='table')
# some invalid terms
self.assertRaises(ValueError, store.select, 'wp', "minor=['A', 'B']")
self.assertRaises(ValueError, store.select, 'wp', ["index=['20121114']"])
self.assertRaises(ValueError, store.select, 'wp', ["index=['20121114', '20121114']"])
self.assertRaises(TypeError, Term)
# more invalid
self.assertRaises(ValueError, store.select, 'df','df.index[3]')
self.assertRaises(SyntaxError, store.select, 'df','index>')
self.assertRaises(ValueError, store.select, 'wp', "major_axis<'20000108' & minor_axis['A', 'B']")
# from the docs
with ensure_clean_path(self.path) as path:
dfq = DataFrame(np.random.randn(10,4),columns=list('ABCD'),index=date_range('20130101',periods=10))
dfq.to_hdf(path,'dfq',format='table',data_columns=True)
# check ok
read_hdf(path,'dfq',where="index>Timestamp('20130104') & columns=['A', 'B']")
read_hdf(path,'dfq',where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(self.path) as path:
dfq = DataFrame(np.random.randn(10,4),columns=list('ABCD'),index=date_range('20130101',periods=10))
dfq.to_hdf(path,'dfq',format='table')
self.assertRaises(ValueError, read_hdf, path,'dfq',where="A>0 or C>0")
def test_terms(self):
with ensure_clean_store(self.path) as store:
wp = tm.makePanel()
p4d = tm.makePanel4D()
wpneg = Panel.fromDict({-1: tm.makeDataFrame(), 0: tm.makeDataFrame(),
1: tm.makeDataFrame()})
store.put('wp', wp, table=True)
store.put('p4d', p4d, table=True)
store.put('wpneg', wpneg, table=True)
# panel
result = store.select('wp', [Term(
'major_axis<"20000108"'), Term("minor_axis=['A', 'B']")])
expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])
assert_panel_equal(result, expected)
# with deprecation
result = store.select('wp', [Term(
'major_axis','<',"20000108"), Term("minor_axis=['A', 'B']")])
expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])
tm.assert_panel_equal(result, expected)
# p4d
result = store.select('p4d', [Term('major_axis<"20000108"'),
Term("minor_axis=['A', 'B']"),
Term("items=['ItemA', 'ItemB']")])
expected = p4d.truncate(after='20000108').reindex(
minor=['A', 'B'], items=['ItemA', 'ItemB'])
assert_panel4d_equal(result, expected)
# back compat invalid terms
terms = [
dict(field='major_axis', op='>', value='20121114'),
[ dict(field='major_axis', op='>', value='20121114') ],
[ "minor_axis=['A','B']", dict(field='major_axis', op='>', value='20121114') ]
]
for t in terms:
with tm.assert_produces_warning(expected_warning=DeprecationWarning):
Term(t)
# valid terms
terms = [
('major_axis=20121114'),
('major_axis>20121114'),
(("major_axis=['20121114', '20121114']"),),
('major_axis=datetime.datetime(2012, 11, 14)'),
'major_axis> 20121114',
'major_axis >20121114',
'major_axis > 20121114',
(("minor_axis=['A', 'B']"),),
(("minor_axis=['A', 'B']"),),
((("minor_axis==['A', 'B']"),),),
(("items=['ItemA', 'ItemB']"),),
('items=ItemA'),
]
for t in terms:
store.select('wp', t)
store.select('p4d', t)
# valid for p4d only
terms = [
(("labels=['l1', 'l2']"),),
Term("labels=['l1', 'l2']"),
]
for t in terms:
store.select('p4d', t)
with tm.assertRaisesRegexp(TypeError, 'Only named functions are supported'):
store.select('wp', Term('major_axis == (lambda x: x)("20130101")'))
# check USub node parsing
res = store.select('wpneg', Term('items == -1'))
expected = Panel({-1: wpneg[-1]})
tm.assert_panel_equal(res, expected)
with tm.assertRaisesRegexp(NotImplementedError,
'Unary addition not supported'):
store.select('wpneg', Term('items == +1'))
def test_term_compat(self):
with ensure_clean_store(self.path) as store:
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp',wp)
result = store.select('wp', [Term('major_axis>20000102'),
Term('minor_axis', '=', ['A','B']) ])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102'),['A','B']]
assert_panel_equal(result, expected)
store.remove('wp', Term('major_axis>20000103'))
result = store.select('wp')
expected = wp.loc[:,wp.major_axis<=Timestamp('20000103'),:]
assert_panel_equal(result, expected)
with ensure_clean_store(self.path) as store:
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp',wp)
# stringified datetimes
result = store.select('wp', [Term('major_axis','>',datetime.datetime(2000,1,2))])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
assert_panel_equal(result, expected)
result = store.select('wp', [Term('major_axis','>',datetime.datetime(2000,1,2,0,0))])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
assert_panel_equal(result, expected)
result = store.select('wp', [Term('major_axis','=',[datetime.datetime(2000,1,2,0,0),datetime.datetime(2000,1,3,0,0)])])
expected = wp.loc[:,[Timestamp('20000102'),Timestamp('20000103')]]
assert_panel_equal(result, expected)
result = store.select('wp', [Term('minor_axis','=',['A','B'])])
expected = wp.loc[:,:,['A','B']]
assert_panel_equal(result, expected)
def test_backwards_compat_without_term_object(self):
with ensure_clean_store(self.path) as store:
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp',wp)
with tm.assert_produces_warning(expected_warning=DeprecationWarning):
result = store.select('wp', [('major_axis>20000102'),
('minor_axis', '=', ['A','B']) ])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102'),['A','B']]
assert_panel_equal(result, expected)
store.remove('wp', ('major_axis>20000103'))
result = store.select('wp')
expected = wp.loc[:,wp.major_axis<=Timestamp('20000103'),:]
assert_panel_equal(result, expected)
with ensure_clean_store(self.path) as store:
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp',wp)
# stringified datetimes
with tm.assert_produces_warning(expected_warning=DeprecationWarning):
result = store.select('wp', [('major_axis','>',datetime.datetime(2000,1,2))])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
assert_panel_equal(result, expected)
with tm.assert_produces_warning(expected_warning=DeprecationWarning):
result = store.select('wp', [('major_axis','>',datetime.datetime(2000,1,2,0,0))])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
assert_panel_equal(result, expected)
with tm.assert_produces_warning(expected_warning=DeprecationWarning):
result = store.select('wp', [('major_axis','=',[datetime.datetime(2000,1,2,0,0),
datetime.datetime(2000,1,3,0,0)])])
expected = wp.loc[:,[Timestamp('20000102'),Timestamp('20000103')]]
assert_panel_equal(result, expected)
with tm.assert_produces_warning(expected_warning=DeprecationWarning):
result = store.select('wp', [('minor_axis','=',['A','B'])])
expected = wp.loc[:,:,['A','B']]
assert_panel_equal(result, expected)
def test_same_name_scoping(self):
with ensure_clean_store(self.path) as store:
import pandas as pd
df = DataFrame(np.random.randn(20, 2),index=pd.date_range('20130101',periods=20))
store.put('df', df, table=True)
expected = df[df.index>pd.Timestamp('20130105')]
import datetime
result = store.select('df','index>datetime.datetime(2013,1,5)')
assert_frame_equal(result,expected)
from datetime import datetime
# technically an error, but allow it
result = store.select('df','index>datetime.datetime(2013,1,5)')
assert_frame_equal(result,expected)
result = store.select('df','index>datetime(2013,1,5)')
assert_frame_equal(result,expected)
def test_series(self):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object),
dtype=object))
self._check_roundtrip(ts3, tm.assert_series_equal)
def test_sparse_series(self):
s = tm.makeStringSeries()
s[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal,
check_series_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_series_equal,
check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.ix[3:5, 1:3] = np.nan
s.ix[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_double_roundtrip(ss, tm.assert_frame_equal,
check_frame_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_double_roundtrip(ss2, tm.assert_frame_equal,
check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_double_roundtrip(ss3, tm.assert_frame_equal,
check_frame_type=True)
def test_sparse_panel(self):
items = ['x', 'y', 'z']
p = Panel(dict((i, tm.makeDataFrame().ix[:2, :2]) for i in items))
sp = p.to_sparse()
self._check_double_roundtrip(sp, assert_panel_equal,
check_panel_type=True)
sp2 = p.to_sparse(kind='integer')
self._check_double_roundtrip(sp2, assert_panel_equal,
check_panel_type=True)
sp3 = p.to_sparse(fill_value=0)
self._check_double_roundtrip(sp3, assert_panel_equal,
check_panel_type=True)
def test_float_index(self):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal)
def test_tuple_index(self):
# GH #492
col = np.arange(10)
idx = [(0., 1.), (2., 3.), (4., 5.)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
self._check_roundtrip(DF, tm.assert_frame_equal)
def test_index_types(self):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(l, r,
check_dtype=True,
check_index_type=True,
check_series_type=True)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
ser = Series(values, [0, 'y'])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
ser = Series(values, ['y', 0])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
ser = Series(values, [datetime.date.today(), 'a'])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
ser = Series(values, [1.23, 'b'])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.datetime(
2012, 1, 1), datetime.datetime(2012, 1, 2)])
self._check_roundtrip(ser, func)
def test_timeseries_preepoch(self):
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
raise nose.SkipTest("won't work on Python < 2.7")
dr = bdate_range('1/1/1940', '1/1/1960')
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal)
except OverflowError:
raise nose.SkipTest('known failer on some windows platforms')
def test_frame(self):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(df, tm.assert_frame_equal)
self._check_roundtrip(df, tm.assert_frame_equal)
self._check_roundtrip_table(df, tm.assert_frame_equal,
compression=True)
self._check_roundtrip(df, tm.assert_frame_equal,
compression=True)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(tdf, tm.assert_frame_equal)
self._check_roundtrip(tdf, tm.assert_frame_equal,
compression=True)
with ensure_clean_store(self.path) as store:
# not consolidated
df['foo'] = np.random.randn(len(df))
store['df'] = df
recons = store['df']
self.assertTrue(recons._data.is_consolidated())
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal)
def test_empty_series_frame(self):
s0 = Series()
s1 = Series(name='myseries')
df0 = DataFrame()
df1 = DataFrame(index=['a', 'b', 'c'])
df2 = DataFrame(columns=['d', 'e', 'f'])
self._check_roundtrip(s0, tm.assert_series_equal)
self._check_roundtrip(s1, tm.assert_series_equal)
self._check_roundtrip(df0, tm.assert_frame_equal)
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
def test_empty_series(self):
for dtype in [np.int64, np.float64, np.object, 'm8[ns]', 'M8[ns]']:
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal)
def test_can_serialize_dates(self):
rng = [x.date() for x in bdate_range('1/1/2000', '1/30/2000')]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal)
def test_timezones(self):
rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
self.assertTrue(recons.index.equals(rng))
self.assertEqual(rng.tz, recons.index.tz)
def test_fixed_offset_tz(self):
rng = date_range('1/1/2000 00:00:00-07:00', '1/30/2000 00:00:00-07:00')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
self.assertTrue(recons.index.equals(rng))
self.assertEqual(rng.tz, recons.index.tz)
def test_store_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
frame = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self._check_roundtrip(frame, tm.assert_frame_equal)
self._check_roundtrip(frame.T, tm.assert_frame_equal)
self._check_roundtrip(frame['A'], tm.assert_series_equal)
# check that the names are stored
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
assert(recons.index.names == ('foo', 'bar'))
def test_store_index_name(self):
df = tm.makeDataFrame()
df.index.name = 'foo'
with ensure_clean_store(self.path) as store:
store['frame'] = df
recons = store['frame']
assert(recons.index.name == 'foo')
def test_store_series_name(self):
df = tm.makeDataFrame()
series = df['A']
with ensure_clean_store(self.path) as store:
store['series'] = series
recons = store['series']
assert(recons.name == 'A')
def test_store_mixed(self):
def _make_one():
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['int1'] = 1
df['int2'] = 2
return df.consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
with ensure_clean_store(self.path) as store:
store['obj'] = df1
tm.assert_frame_equal(store['obj'], df1)
store['obj'] = df2
tm.assert_frame_equal(store['obj'], df2)
# check that can store Series of all of these types
self._check_roundtrip(df1['obj1'], tm.assert_series_equal)
self._check_roundtrip(df1['bool1'], tm.assert_series_equal)
self._check_roundtrip(df1['int1'], tm.assert_series_equal)
# try with compression
self._check_roundtrip(df1['obj1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1['bool1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1['int1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1, tm.assert_frame_equal,
compression=True)
def test_wide(self):
wp = tm.makePanel()
self._check_roundtrip(wp, assert_panel_equal)
def test_wide_table(self):
wp = tm.makePanel()
self._check_roundtrip_table(wp, assert_panel_equal)
def test_select_with_dups(self):
# single dtypes
df = DataFrame(np.random.randn(10,4),columns=['A','A','B','B'])
df.index = date_range('20130101 9:30',periods=10,freq='T')
with ensure_clean_store(self.path) as store:
store.append('df',df)
result = store.select('df')
expected = df
assert_frame_equal(result,expected,by_blocks=True)
result = store.select('df',columns=df.columns)
expected = df
assert_frame_equal(result,expected,by_blocks=True)
result = store.select('df',columns=['A'])
expected = df.loc[:,['A']]
assert_frame_equal(result,expected)
# dups accross dtypes
df = concat([DataFrame(np.random.randn(10,4),columns=['A','A','B','B']),
DataFrame(np.random.randint(0,10,size=20).reshape(10,2),columns=['A','C'])],
axis=1)
df.index = date_range('20130101 9:30',periods=10,freq='T')
with ensure_clean_store(self.path) as store:
store.append('df',df)
result = store.select('df')
expected = df
assert_frame_equal(result,expected,by_blocks=True)
result = store.select('df',columns=df.columns)
expected = df
assert_frame_equal(result,expected,by_blocks=True)
expected = df.loc[:,['A']]
result = store.select('df',columns=['A'])
assert_frame_equal(result,expected,by_blocks=True)
expected = df.loc[:,['B','A']]
result = store.select('df',columns=['B','A'])
assert_frame_equal(result,expected,by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(self.path) as store:
store.append('df',df)
store.append('df',df)
expected = df.loc[:,['B','A']]
expected = concat([expected, expected])
result = store.select('df',columns=['B','A'])
assert_frame_equal(result,expected,by_blocks=True)
def test_wide_table_dups(self):
wp = tm.makePanel()
with ensure_clean_store(self.path) as store:
store.put('panel', wp, format='table')
store.put('panel', wp, format='table', append=True)
with tm.assert_produces_warning(expected_warning=DuplicateWarning):
recons = store['panel']
assert_panel_equal(recons, wp)
def test_long(self):
def _check(left, right):
assert_panel_equal(left.to_panel(), right.to_panel())
wp = tm.makePanel()
self._check_roundtrip(wp.to_frame(), _check)
# empty
# self._check_roundtrip(wp.to_frame()[:0], _check)
def test_longpanel(self):
pass
def test_overwrite_node(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store['a'] = ts
tm.assert_series_equal(store['a'], ts)
def test_sparse_with_compression(self):
# GH 2931
# make sparse dataframe
df = DataFrame(np.random.binomial(n=1, p=.01, size=(1e3, 10))).to_sparse(fill_value=0)
# case 1: store uncompressed
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression = False,
check_frame_type=True)
# case 2: store compressed (works)
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression = 'zlib',
check_frame_type=True)
# set one series to be completely sparse
df[0] = np.zeros(1e3)
# case 3: store df with completely sparse series uncompressed
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression = False,
check_frame_type=True)
# case 4: try storing df with completely sparse series compressed (fails)
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression = 'zlib',
check_frame_type=True)
def test_select(self):
wp = tm.makePanel()
with ensure_clean_store(self.path) as store:
# put/select ok
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
store.select('wp')
# non-table ok (where = None)
_maybe_remove(store, 'wp')
store.put('wp2', wp)
store.select('wp2')
# selection on the non-indexable with a large number of columns
wp = Panel(
np.random.randn(100, 100, 100), items=['Item%03d' % i for i in range(100)],
major_axis=date_range('1/1/2000', periods=100), minor_axis=['E%03d' % i for i in range(100)])
_maybe_remove(store, 'wp')
store.append('wp', wp)
items = ['Item%03d' % i for i in range(80)]
result = store.select('wp', Term('items=items'))
expected = wp.reindex(items=items)
assert_panel_equal(expected, result)
# selectin non-table with a where
# self.assertRaises(ValueError, store.select,
# 'wp2', ('column', ['A', 'D']))
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df', columns=['A', 'B'])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# equivalentsly
result = store.select('df', [("columns=['A', 'B']")])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['A'])
result = store.select('df', ['A > 0'], columns=['A', 'B'])
expected = df[df.A > 0].reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, 'df')
store.append('df', df, data_columns=True)
result = store.select('df', ['A > 0'], columns=['A', 'B'])
expected = df[df.A > 0].reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['A'])
result = store.select('df', ['A > 0'], columns=['C', 'D'])
expected = df[df.A > 0].reindex(columns=['C', 'D'])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self):
with ensure_clean_store(self.path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300), A=np.random.randn(300)))
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['ts', 'A'])
result = store.select('df', [Term("ts>=Timestamp('2012-02-01')")])
expected = df[df.ts >= Timestamp('2012-02-01')]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5,2), columns =['A','B'])
df['object'] = 'foo'
df.ix[4:5,'object'] = 'bar'
df['boolv'] = df['A'] > 0
_maybe_remove(store, 'df')
store.append('df', df, data_columns = True)
expected = df[df.boolv == True].reindex(columns=['A','boolv'])
for v in [True,'true',1]:
result = store.select('df', Term('boolv == %s' % str(v)), columns = ['A','boolv'])
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False ].reindex(columns=['A','boolv'])
for v in [False,'false',0]:
result = store.select('df', Term('boolv == %s' % str(v)), columns = ['A','boolv'])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, 'df_int')
store.append('df_int', df)
result = store.select(
'df_int', [Term("index<10"), Term("columns=['A']")])
expected = df.reindex(index=list(df.index)[0:10],columns=['A'])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(dict(A=np.random.rand(
20), B=np.random.rand(20), index=np.arange(20, dtype='f8')))
_maybe_remove(store, 'df_float')
store.append('df_float', df)
result = store.select(
'df_float', [Term("index<10.0"), Term("columns=['A']")])
expected = df.reindex(index=list(df.index)[0:10],columns=['A'])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(self.path) as store:
# floats w/o NaN
df = DataFrame(dict(cols = range(11), values = range(11)),dtype='float64')
df['cols'] = (df['cols']+10).apply(str)
store.append('df1',df,data_columns=True)
result = store.select(
'df1', where='values>2.0')
expected = df[df['values']>2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df['values']>2.0]
store.append('df2',df,data_columns=True,index=False)
result = store.select(
'df2', where='values>2.0')
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
#store.append('df3',df,data_columns=True)
#result = store.select(
# 'df3', where='values>2.0')
#tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols = range(11), values = range(11)),dtype='float64')
df['cols'] = (df['cols']+10).apply(str)
df.iloc[1] = np.nan
expected = df[df['values']>2.0]
store.append('df4',df,data_columns=True)
result = store.select(
'df4', where='values>2.0')
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300),
A=np.random.randn(300),
B=range(300),
users = ['a']*50 + ['b']*50 + ['c']*100 + ['a%03d' % i for i in range(100)]))
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['ts', 'A', 'B', 'users'])
# regular select
result = store.select('df', [Term("ts>=Timestamp('2012-02-01')")])
expected = df[df.ts >= Timestamp('2012-02-01')]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select('df', [Term("ts>=Timestamp('2012-02-01') & users=['a','b','c']")])
expected = df[ (df.ts >= Timestamp('2012-02-01')) & df.users.isin(['a','b','c']) ]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = [ 'a','b','c' ] + [ 'a%03d' % i for i in range(60) ]
result = store.select('df', [Term("ts>=Timestamp('2012-02-01')"),Term('users=selector')])
expected = df[ (df.ts >= Timestamp('2012-02-01')) & df.users.isin(selector) ]
tm.assert_frame_equal(expected, result)
selector = range(100,200)
result = store.select('df', [Term('B=selector')])
expected = df[ df.B.isin(selector) ]
tm.assert_frame_equal(expected, result)
self.assertEqual(len(result), 100)
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select('df', [Term('ts=selector')])
expected = df[ df.ts.isin(selector.values) ]
tm.assert_frame_equal(expected, result)
self.assertEqual(len(result), 100)
def test_select_iterator(self):
# single table
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, 'df')
store.append('df', df)
expected = store.select('df')
results = [ s for s in store.select('df',iterator=True) ]
result = concat(results)
tm.assert_frame_equal(expected, result)
results = [ s for s in store.select('df',chunksize=100) ]
self.assertEqual(len(results), 5)
result = concat(results)
tm.assert_frame_equal(expected, result)
results = [ s for s in store.select('df',chunksize=150) ]
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(self.path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path,'df_non_table')
self.assertRaises(TypeError, read_hdf, path,'df_non_table',chunksize=100)
self.assertRaises(TypeError, read_hdf, path,'df_non_table',iterator=True)
with ensure_clean_path(self.path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path,'df',format='table')
results = [ s for s in read_hdf(path,'df',chunksize=100) ]
result = concat(results)
self.assertEqual(len(results), 5)
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path,'df'))
# multiple
with ensure_clean_store(self.path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append('df1',df1,data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
store.append('df2',df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(
['df1', 'df2'], selector='df1')
results = [ s for s in store.select_as_multiple(
['df1', 'df2'], selector='df1', chunksize=150) ]
result = concat(results)
tm.assert_frame_equal(expected, result)
# where selection
#expected = store.select_as_multiple(
# ['df1', 'df2'], where= Term('A>0'), selector='df1')
#results = []
#for s in store.select_as_multiple(
# ['df1', 'df2'], where= Term('A>0'), selector='df1', chunksize=25):
# results.append(s)
#result = concat(results)
#tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self):
# GH 8014
# using iterator and where clause
chunksize=1e4
# no iterator
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df',expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select('df')
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '%s'" % beg_dt
result = store.select('df',where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '%s'" % end_dt
result = store.select('df',where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
result = store.select('df',where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df',expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = [ s for s in store.select('df',chunksize=chunksize) ]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self):
# GH 8014
# using iterator and where clause
chunksize=1e4
# with iterator, non complete range
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df',expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df',expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '%s'" % end_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
self.assertEqual(0, len(results))
def test_select_iterator_many_empty_frames(self):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize=int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100000, 'S')
_maybe_remove(store, 'df')
store.append('df',expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize-1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
tm.assert_equal(1, len(results))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
# should be 1, is 10
tm.assert_equal(1, len(results))
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '%s' & index >= '%s'" % (beg_dt, end_dt)
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
# should be []
tm.assert_equal(0, len(results))
def test_retain_index_attributes(self):
# GH 3499, losing frequency info on index recreation
df = DataFrame(dict(A = Series(lrange(3),
index=date_range('2000-1-1',periods=3,freq='H'))))
with ensure_clean_store(self.path) as store:
_maybe_remove(store,'data')
store.put('data', df, format='table')
result = store.get('data')
tm.assert_frame_equal(df,result)
for attr in ['freq','tz','name']:
for idx in ['index','columns']:
self.assertEqual(getattr(getattr(df,idx),attr,None),
getattr(getattr(result,idx),attr,None))
# try to append a table with a different frequency
with tm.assert_produces_warning(expected_warning=AttributeConflictWarning):
df2 = DataFrame(dict(A = Series(lrange(3),
index=date_range('2002-1-1',periods=3,freq='D'))))
store.append('data',df2)
self.assertIsNone(store.get_storer('data').info['index']['freq'])
# this is ok
_maybe_remove(store,'df2')
df2 = DataFrame(dict(A = Series(lrange(3),
index=[Timestamp('20010101'),Timestamp('20010102'),Timestamp('20020101')])))
store.append('df2',df2)
df3 = DataFrame(dict(A = Series(lrange(3),index=date_range('2002-1-1',periods=3,freq='D'))))
store.append('df2',df3)
def test_retain_index_attributes2(self):
with ensure_clean_path(self.path) as path:
with tm.assert_produces_warning(expected_warning=AttributeConflictWarning):
df = DataFrame(dict(A = Series(lrange(3), index=date_range('2000-1-1',periods=3,freq='H'))))
df.to_hdf(path,'data',mode='w',append=True)
df2 = DataFrame(dict(A = Series(lrange(3), index=date_range('2002-1-1',periods=3,freq='D'))))
df2.to_hdf(path,'data',append=True)
idx = date_range('2000-1-1',periods=3,freq='H')
idx.name = 'foo'
df = DataFrame(dict(A = Series(lrange(3), index=idx)))
df.to_hdf(path,'data',mode='w',append=True)
self.assertEqual(read_hdf(path,'data').index.name, 'foo')
with tm.assert_produces_warning(expected_warning=AttributeConflictWarning):
idx2 = date_range('2001-1-1',periods=3,freq='H')
idx2.name = 'bar'
df2 = DataFrame(dict(A = Series(lrange(3), index=idx2)))
df2.to_hdf(path,'data',append=True)
self.assertIsNone(read_hdf(path,'data').index.name)
def test_panel_select(self):
wp = tm.makePanel()
with ensure_clean_store(self.path) as store:
store.put('wp', wp, format='table')
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = ('major_axis>=date')
crit2 = ("minor_axis=['A', 'D']")
result = store.select('wp', [crit1, crit2])
expected = wp.truncate(before=date).reindex(minor=['A', 'D'])
assert_panel_equal(result, expected)
result = store.select(
'wp', ['major_axis>="20000124"', ("minor_axis=['A', 'B']")])
expected = wp.truncate(before='20000124').reindex(minor=['A', 'B'])
assert_panel_equal(result, expected)
def test_frame_select(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('frame', df,format='table')
date = df.index[len(df) // 2]
crit1 = Term('index>=date')
self.assertEqual(crit1.env.scope['date'], date)
crit2 = ("columns=['A', 'D']")
crit3 = ('columns=A')
result = store.select('frame', [crit1, crit2])
expected = df.ix[date:, ['A', 'D']]
tm.assert_frame_equal(result, expected)
result = store.select('frame', [crit3])
expected = df.ix[:, ['A']]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append('df_time', df)
self.assertRaises(
ValueError, store.select, 'df_time', [Term("index>0")])
# can't select if not written as table
# store['frame'] = df
# self.assertRaises(ValueError, store.select,
# 'frame', [crit1, crit2])
def test_frame_select_complex(self):
# select via complex criteria
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.loc[df.index[0:4],'string'] = 'bar'
with ensure_clean_store(self.path) as store:
store.put('df', df, table=True, data_columns=['string'])
# empty
result = store.select('df', 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index>df.index[3]) & (df.string=='bar')]
tm.assert_frame_equal(result, expected)
result = store.select('df', 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index>df.index[3]) & (df.string=='foo')]
tm.assert_frame_equal(result, expected)
# or
result = store.select('df', 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index>df.index[3]) | (df.string=='bar')]
tm.assert_frame_equal(result, expected)
result = store.select('df', '(index>df.index[3] & index<=df.index[6]) | string="bar"')
expected = df.loc[((df.index>df.index[3]) & (df.index<=df.index[6])) | (df.string=='bar')]
tm.assert_frame_equal(result, expected)
# invert
result = store.select('df', 'string!="bar"')
expected = df.loc[df.string!='bar']
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
self.assertRaises(NotImplementedError, store.select, 'df', '~(string="bar")')
# invert ok for filters
result = store.select('df', "~(columns=['A','B'])")
expected = df.loc[:,df.columns-['A','B']]
tm.assert_frame_equal(result, expected)
# in
result = store.select('df', "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index>df.index[3]].reindex(columns=['A','B'])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(self):
with ensure_clean_path(['parms.hdf','hist.hdf']) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({ 'A' : [1,1,2,2,3] })
parms.to_hdf(pp,'df',mode='w',format='table',data_columns=['A'])
selection = read_hdf(pp,'df',where='A=[2,3]')
hist = DataFrame(np.random.randn(25,1),columns=['data'],
index=MultiIndex.from_tuples([ (i,j) for i in range(5) for j in range(5) ],
names=['l1','l2']))
hist.to_hdf(hh,'df',mode='w',format='table')
expected = read_hdf(hh,'df',where=Term('l1','=',[2,3,4]))
# list like
result = read_hdf(hh,'df',where=Term('l1','=',selection.index.tolist()))
assert_frame_equal(result, expected)
l = selection.index.tolist()
# sccope with list like
store = HDFStore(hh)
result = store.select('df',where='l1=l')
assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh,'df',where='l1=l')
assert_frame_equal(result, expected)
# index
index = selection.index
result = read_hdf(hh,'df',where='l1=index')
assert_frame_equal(result, expected)
result = read_hdf(hh,'df',where='l1=selection.index')
assert_frame_equal(result, expected)
result = read_hdf(hh,'df',where='l1=selection.index.tolist()')
assert_frame_equal(result, expected)
result = read_hdf(hh,'df',where='l1=list(selection.index)')
assert_frame_equal(result, expected)
# sccope with index
store = HDFStore(hh)
result = store.select('df',where='l1=index')
assert_frame_equal(result, expected)
result = store.select('df',where='l1=selection.index')
assert_frame_equal(result, expected)
result = store.select('df',where='l1=selection.index.tolist()')
assert_frame_equal(result, expected)
result = store.select('df',where='l1=list(selection.index)')
assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(self):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('df', df, table=True)
# not implemented
self.assertRaises(NotImplementedError, store.select, 'df', "columns=['A'] | columns=['B']")
# in theory we could deal with this
self.assertRaises(NotImplementedError, store.select, 'df', "columns=['A','B'] & columns=['C']")
def test_string_select(self):
# GH 2973
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df['x'] = 'none'
df.ix[2:7,'x'] = ''
store.append('df',df,data_columns=['x'])
result = store.select('df',Term('x=none'))
expected = df[df.x == 'none']
assert_frame_equal(result,expected)
try:
result = store.select('df',Term('x!=none'))
expected = df[df.x != 'none']
assert_frame_equal(result,expected)
except Exception as detail:
com.pprint_thing("[{0}]".format(detail))
com.pprint_thing(store)
com.pprint_thing(expected)
df2 = df.copy()
df2.loc[df2.x=='','x'] = np.nan
store.append('df2',df2,data_columns=['x'])
result = store.select('df2',Term('x!=none'))
expected = df2[isnull(df2.x)]
assert_frame_equal(result,expected)
# int ==/!=
df['int'] = 1
df.ix[2:7,'int'] = 2
store.append('df3',df,data_columns=['int'])
result = store.select('df3',Term('int=2'))
expected = df[df.int==2]
assert_frame_equal(result,expected)
result = store.select('df3',Term('int!=2'))
expected = df[df.int!=2]
assert_frame_equal(result,expected)
def test_read_column(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df)
# error
self.assertRaises(KeyError, store.select_column, 'df', 'foo')
def f():
store.select_column('df', 'index', where = ['index>5'])
self.assertRaises(Exception, f)
# valid
result = store.select_column('df', 'index')
tm.assert_almost_equal(result.values, Series(df.index).values)
self.assertIsInstance(result,Series)
# not a data indexable column
self.assertRaises(
ValueError, store.select_column, 'df', 'values_block_0')
# a data column
df2 = df.copy()
df2['string'] = 'foo'
store.append('df2', df2, data_columns=['string'])
result = store.select_column('df2', 'string')
tm.assert_almost_equal(result.values, df2['string'].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3['string'] = 'foo'
df3.ix[4:6, 'string'] = np.nan
store.append('df3', df3, data_columns=['string'])
result = store.select_column('df3', 'string')
tm.assert_almost_equal(result.values, df3['string'].values)
# start/stop
result = store.select_column('df3', 'string', start=2)
tm.assert_almost_equal(result.values, df3['string'].values[2:])
result = store.select_column('df3', 'string', start=-2)
tm.assert_almost_equal(result.values, df3['string'].values[-2:])
result = store.select_column('df3', 'string', stop=2)
tm.assert_almost_equal(result.values, df3['string'].values[:2])
result = store.select_column('df3', 'string', stop=-2)
tm.assert_almost_equal(result.values, df3['string'].values[:-2])
result = store.select_column('df3', 'string', start=2, stop=-2)
tm.assert_almost_equal(result.values, df3['string'].values[2:-2])
result = store.select_column('df3', 'string', start=-2, stop=2)
tm.assert_almost_equal(result.values, df3['string'].values[-2:2])
def test_coordinates(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df)
# all
c = store.select_as_coordinates('df')
assert((c.values == np.arange(len(df.index))).all() == True)
# get coordinates back & test vs frame
_maybe_remove(store, 'df')
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
store.append('df', df)
c = store.select_as_coordinates('df', ['index<3'])
assert((c.values == np.arange(3)).all() == True)
result = store.select('df', where=c)
expected = df.ix[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates('df', ['index>=3', 'index<=4'])
assert((c.values == np.arange(2) + 3).all() == True)
result = store.select('df', where=c)
expected = df.ix[3:4, :]
tm.assert_frame_equal(result, expected)
self.assertIsInstance(c, Index)
# multiple tables
_maybe_remove(store, 'df1')
_maybe_remove(store, 'df2')
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
store.append('df1', df1, data_columns=['A', 'B'])
store.append('df2', df2)
c = store.select_as_coordinates('df1', ['A>0', 'B>0'])
df1_result = store.select('df1', c)
df2_result = store.select('df2', c)
result = concat([df1_result, df2_result], axis=1)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# pass array/mask as the coordinates
with ensure_clean_store(self.path) as store:
df = DataFrame(np.random.randn(1000,2),index=date_range('20000101',periods=1000))
store.append('df',df)
c = store.select_column('df','index')
where = c[DatetimeIndex(c).month==5].index
expected = df.iloc[where]
# locations
result = store.select('df',where=where)
tm.assert_frame_equal(result,expected)
# boolean
result = store.select('df',where=where)
tm.assert_frame_equal(result,expected)
# invalid
self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df),dtype='float64'))
self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df)+1))
self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df)),start=5)
self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df)),start=5,stop=10)
# selection with filter
selection = date_range('20000101',periods=500)
result = store.select('df', where='index in selection')
expected = df[df.index.isin(selection)]
tm.assert_frame_equal(result,expected)
# list
df = DataFrame(np.random.randn(10,2))
store.append('df2',df)
result = store.select('df2',where=[0,3,5])
expected = df.iloc[[0,3,5]]
tm.assert_frame_equal(result,expected)
# boolean
where = [True] * 10
where[-2] = False
result = store.select('df2',where=where)
expected = df.loc[where]
tm.assert_frame_equal(result,expected)
# start/stop
result = store.select('df2', start=5, stop=10)
expected = df[5:10]
tm.assert_frame_equal(result,expected)
def test_append_to_multiple(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# exceptions
self.assertRaises(ValueError, store.append_to_multiple,
{'df1': ['A', 'B'], 'df2': None}, df, selector='df3')
self.assertRaises(ValueError, store.append_to_multiple,
{'df1': None, 'df2': None}, df, selector='df3')
self.assertRaises(
ValueError, store.append_to_multiple, 'df1', df, 'df1')
# regular operation
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1')
result = store.select_as_multiple(
['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')
expected = df[(df.A > 0) & (df.B > 0)]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple_dropna(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df1.ix[1, ['A', 'B']] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# dropna=True should guarantee rows are synchronized
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1',
dropna=True)
result = store.select_as_multiple(['df1', 'df2'])
expected = df.dropna()
tm.assert_frame_equal(result, expected)
tm.assert_index_equal(store.select('df1').index,
store.select('df2').index)
# dropna=False shouldn't synchronize row indexes
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1',
dropna=False)
self.assertRaises(
ValueError, store.select_as_multiple, ['df1', 'df2'])
assert not store.select('df1').index.equals(
store.select('df2').index)
def test_select_as_multiple(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
with ensure_clean_store(self.path) as store:
# no tables stored
self.assertRaises(Exception, store.select_as_multiple,
None, where=['A>0', 'B>0'], selector='df1')
store.append('df1', df1, data_columns=['A', 'B'])
store.append('df2', df2)
# exceptions
self.assertRaises(Exception, store.select_as_multiple,
None, where=['A>0', 'B>0'], selector='df1')
self.assertRaises(Exception, store.select_as_multiple,
[None], where=['A>0', 'B>0'], selector='df1')
self.assertRaises(KeyError, store.select_as_multiple,
['df1','df3'], where=['A>0', 'B>0'], selector='df1')
self.assertRaises(KeyError, store.select_as_multiple,
['df3'], where=['A>0', 'B>0'], selector='df1')
self.assertRaises(KeyError, store.select_as_multiple,
['df1','df2'], where=['A>0', 'B>0'], selector='df4')
# default select
result = store.select('df1', ['A>0', 'B>0'])
expected = store.select_as_multiple(
['df1'], where=['A>0', 'B>0'], selector='df1')
tm.assert_frame_equal(result, expected)
expected = store.select_as_multiple(
'df1', where=['A>0', 'B>0'], selector='df1')
tm.assert_frame_equal(result, expected)
# multiple
result = store.select_as_multiple(
['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# multiple (diff selector)
result = store.select_as_multiple(['df1', 'df2'], where=[Term(
'index>df2.index[4]')], selector='df2')
expected = concat([df1, df2], axis=1)
expected = expected[5:]
tm.assert_frame_equal(result, expected)
# test excpection for diff rows
store.append('df3', tm.makeTimeDataFrame(nper=50))
self.assertRaises(ValueError, store.select_as_multiple,
['df1','df3'], where=['A>0', 'B>0'], selector='df1')
def test_nan_selection_bug_4858(self):
# GH 4858; nan selection bug, only works for pytables >= 3.1
if LooseVersion(tables.__version__) < '3.1.0':
raise nose.SkipTest('tables version does not support fix for nan selection bug: GH 4858')
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(cols = range(6), values = range(6)), dtype='float64')
df['cols'] = (df['cols']+10).apply(str)
df.iloc[0] = np.nan
expected = DataFrame(dict(cols = ['13.0','14.0','15.0'], values = [3.,4.,5.]), index=[3,4,5])
# write w/o the index on that particular column
store.append('df',df, data_columns=True,index=['cols'])
result = store.select('df',where='values>2.0')
assert_frame_equal(result,expected)
def test_start_stop(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
store.append('df', df)
result = store.select(
'df', [Term("columns=['A']")], start=0, stop=5)
expected = df.ix[0:4, ['A']]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select(
'df', [Term("columns=['A']")], start=30, stop=40)
assert(len(result) == 0)
assert(type(result) == DataFrame)
def test_select_filter_corner(self):
df = DataFrame(np.random.randn(50, 100))
df.index = ['%.3d' % c for c in df.index]
df.columns = ['%.3d' % c for c in df.columns]
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='table')
crit = Term('columns=df.columns[:75]')
result = store.select('frame', [crit])
tm.assert_frame_equal(result, df.ix[:, df.columns[:75]])
crit = Term('columns=df.columns[:75:2]')
result = store.select('frame', [crit])
tm.assert_frame_equal(result, df.ix[:, df.columns[:75:2]])
def _check_roundtrip(self, obj, comparator, compression=False, **kwargs):
options = {}
if compression:
options['complib'] = _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store['obj'] = obj
retrieved = store['obj']
comparator(retrieved, obj, **kwargs)
def _check_double_roundtrip(self, obj, comparator, compression=False,
**kwargs):
options = {}
if compression:
options['complib'] = compression or _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store['obj'] = obj
retrieved = store['obj']
comparator(retrieved, obj, **kwargs)
store['obj'] = retrieved
again = store['obj']
comparator(again, obj, **kwargs)
def _check_roundtrip_table(self, obj, comparator, compression=False):
options = {}
if compression:
options['complib'] = _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store.put('obj', obj, format='table')
retrieved = store['obj']
# sorted_obj = _test_sort(obj)
comparator(retrieved, obj)
def test_multiple_open_close(self):
# GH 4409, open & close multiple times
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path,'df',mode='w',format='table')
# single
store = HDFStore(path)
self.assertNotIn('CLOSED', str(store))
self.assertTrue(store.is_open)
store.close()
self.assertIn('CLOSED', str(store))
self.assertFalse(store.is_open)
with ensure_clean_path(self.path) as path:
if pytables._table_file_open_policy_is_strict:
# multiples
store1 = HDFStore(path)
def f():
HDFStore(path)
self.assertRaises(ValueError, f)
store1.close()
else:
# multiples
store1 = HDFStore(path)
store2 = HDFStore(path)
self.assertNotIn('CLOSED', str(store1))
self.assertNotIn('CLOSED', str(store2))
self.assertTrue(store1.is_open)
self.assertTrue(store2.is_open)
store1.close()
self.assertIn('CLOSED', str(store1))
self.assertFalse(store1.is_open)
self.assertNotIn('CLOSED', str(store2))
self.assertTrue(store2.is_open)
store2.close()
self.assertIn('CLOSED', str(store1))
self.assertIn('CLOSED', str(store2))
self.assertFalse(store1.is_open)
self.assertFalse(store2.is_open)
# nested close
store = HDFStore(path,mode='w')
store.append('df',df)
store2 = HDFStore(path)
store2.append('df2',df)
store2.close()
self.assertIn('CLOSED', str(store2))
self.assertFalse(store2.is_open)
store.close()
self.assertIn('CLOSED', str(store))
self.assertFalse(store.is_open)
# double closing
store = HDFStore(path,mode='w')
store.append('df', df)
store2 = HDFStore(path)
store.close()
self.assertIn('CLOSED', str(store))
self.assertFalse(store.is_open)
store2.close()
self.assertIn('CLOSED', str(store2))
self.assertFalse(store2.is_open)
# ops on a closed store
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path,'df',mode='w',format='table')
store = HDFStore(path)
store.close()
self.assertRaises(ClosedFileError, store.keys)
self.assertRaises(ClosedFileError, lambda : 'df' in store)
self.assertRaises(ClosedFileError, lambda : len(store))
self.assertRaises(ClosedFileError, lambda : store['df'])
self.assertRaises(ClosedFileError, lambda : store.df)
self.assertRaises(ClosedFileError, store.select, 'df')
self.assertRaises(ClosedFileError, store.get, 'df')
self.assertRaises(ClosedFileError, store.append, 'df2', df)
self.assertRaises(ClosedFileError, store.put, 'df3', df)
self.assertRaises(ClosedFileError, store.get_storer, 'df2')
self.assertRaises(ClosedFileError, store.remove, 'df2')
def f():
store.select('df')
tm.assertRaisesRegexp(ClosedFileError, 'file is not open', f)
def test_pytables_native_read(self):
try:
store = HDFStore(tm.get_data_path('legacy_hdf/pytables_native.h5'), 'r')
d2 = store['detector/readout']
assert isinstance(d2, DataFrame)
finally:
safe_close(store)
try:
store = HDFStore(tm.get_data_path('legacy_hdf/pytables_native2.h5'), 'r')
str(store)
d1 = store['detector']
assert isinstance(d1, DataFrame)
finally:
safe_close(store)
def test_legacy_read(self):
try:
store = HDFStore(tm.get_data_path('legacy_hdf/legacy.h5'), 'r')
store['a']
store['b']
store['c']
store['d']
finally:
safe_close(store)
def test_legacy_table_read(self):
# legacy table types
try:
store = HDFStore(tm.get_data_path('legacy_hdf/legacy_table.h5'), 'r')
store.select('df1')
store.select('df2')
store.select('wp1')
# force the frame
store.select('df2', typ='legacy_frame')
# old version warning
with tm.assert_produces_warning(expected_warning=IncompatibilityWarning):
self.assertRaises(
Exception, store.select, 'wp1', Term('minor_axis=B'))
df2 = store.select('df2')
result = store.select('df2', Term('index>df2.index[2]'))
expected = df2[df2.index > df2.index[2]]
assert_frame_equal(expected, result)
finally:
safe_close(store)
def test_legacy_0_10_read(self):
# legacy from 0.10
try:
store = HDFStore(tm.get_data_path('legacy_hdf/legacy_0.10.h5'), 'r')
str(store)
for k in store.keys():
store.select(k)
finally:
safe_close(store)
def test_legacy_0_11_read(self):
# legacy from 0.11
try:
path = os.path.join('legacy_hdf', 'legacy_table_0.11.h5')
store = HDFStore(tm.get_data_path(path), 'r')
str(store)
assert 'df' in store
assert 'df1' in store
assert 'mi' in store
df = store.select('df')
df1 = store.select('df1')
mi = store.select('mi')
assert isinstance(df, DataFrame)
assert isinstance(df1, DataFrame)
assert isinstance(mi, DataFrame)
finally:
safe_close(store)
def test_copy(self):
def do_copy(f = None, new_f = None, keys = None, propindexes = True, **kwargs):
try:
if f is None:
f = tm.get_data_path(os.path.join('legacy_hdf',
'legacy_0.10.h5'))
store = HDFStore(f, 'r')
if new_f is None:
import tempfile
fd, new_f = tempfile.mkstemp()
tstore = store.copy(new_f, keys = keys, propindexes = propindexes, **kwargs)
# check keys
if keys is None:
keys = store.keys()
self.assertEqual(set(keys), set(tstore.keys()))
# check indicies & nrows
for k in tstore.keys():
if tstore.get_storer(k).is_table:
new_t = tstore.get_storer(k)
orig_t = store.get_storer(k)
self.assertEqual(orig_t.nrows, new_t.nrows)
# check propindixes
if propindexes:
for a in orig_t.axes:
if a.is_indexed:
self.assertTrue(new_t[a.name].is_indexed)
finally:
safe_close(store)
safe_close(tstore)
try:
os.close(fd)
except:
pass
safe_remove(new_f)
do_copy()
do_copy(keys = ['/a','/b','/df1_mixed'])
do_copy(propindexes = False)
# new table
df = tm.makeDataFrame()
try:
st = HDFStore(self.path)
st.append('df', df, data_columns = ['A'])
st.close()
do_copy(f = self.path)
do_copy(f = self.path, propindexes = False)
finally:
safe_remove(self.path)
def test_legacy_table_write(self):
raise nose.SkipTest("cannot write legacy tables")
store = HDFStore(tm.get_data_path('legacy_hdf/legacy_table_%s.h5' % pandas.__version__), 'a')
df = tm.makeDataFrame()
wp = tm.makePanel()
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
store.append('mi', df)
df = DataFrame(dict(A = 'foo', B = 'bar'),index=lrange(10))
store.append('df', df, data_columns = ['B'], min_itemsize={'A' : 200 })
store.append('wp', wp)
store.close()
def test_store_datetime_fractional_secs(self):
with ensure_clean_store(self.path) as store:
dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)
series = Series([0], [dt])
store['a'] = series
self.assertEqual(store['a'].index[0], dt)
def test_tseries_indices_series(self):
with ensure_clean_store(self.path) as store:
idx = tm.makeDateIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store['a'] = ser
result = store['a']
assert_series_equal(result, ser)
self.assertEqual(type(result.index), type(ser.index))
self.assertEqual(result.index.freq, ser.index.freq)
idx = tm.makePeriodIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store['a'] = ser
result = store['a']
assert_series_equal(result, ser)
self.assertEqual(type(result.index), type(ser.index))
self.assertEqual(result.index.freq, ser.index.freq)
def test_tseries_indices_frame(self):
with ensure_clean_store(self.path) as store:
idx = tm.makeDateIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
store['a'] = df
result = store['a']
assert_frame_equal(result, df)
self.assertEqual(type(result.index), type(df.index))
self.assertEqual(result.index.freq, df.index.freq)
idx = tm.makePeriodIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), idx)
store['a'] = df
result = store['a']
assert_frame_equal(result, df)
self.assertEqual(type(result.index), type(df.index))
self.assertEqual(result.index.freq, df.index.freq)
def test_tseries_select_index_column(self):
# GH7777
# selecting a UTC datetimeindex column did
# not preserve UTC tzinfo set before storing
# check that no tz still works
rng = date_range('1/1/2000', '1/30/2000')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
self.assertEqual(rng.tz, DatetimeIndex(result.values).tz)
# check utc
rng = date_range('1/1/2000', '1/30/2000', tz='UTC')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
self.assertEqual(rng.tz, DatetimeIndex(result.values).tz)
# double check non-utc
rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
self.assertEqual(rng.tz, DatetimeIndex(result.values).tz)
def test_unicode_index(self):
unicode_values = [u('\u03c3'), u('\u03c3\u03c3')]
def f():
s = Series(np.random.randn(len(unicode_values)), unicode_values)
self._check_roundtrip(s, tm.assert_series_equal)
compat_assert_produces_warning(PerformanceWarning,f)
def test_store_datetime_mixed(self):
df = DataFrame(
{'a': [1, 2, 3], 'b': [1., 2., 3.], 'c': ['a', 'b', 'c']})
ts = tm.makeTimeSeries()
df['d'] = ts.index[:3]
self._check_roundtrip(df, tm.assert_frame_equal)
# def test_cant_write_multiindex_table(self):
# # for now, #1848
# df = DataFrame(np.random.randn(10, 4),
# index=[np.arange(5).repeat(2),
# np.tile(np.arange(2), 5)])
# self.assertRaises(Exception, store.put, 'foo', df, format='table')
def test_append_with_diff_col_name_types_raises_value_error(self):
df = DataFrame(np.random.randn(10, 1))
df2 = DataFrame({'a': np.random.randn(10)})
df3 = DataFrame({(1, 2): np.random.randn(10)})
df4 = DataFrame({('1', 2): np.random.randn(10)})
df5 = DataFrame({('1', 2, object): np.random.randn(10)})
with ensure_clean_store(self.path) as store:
name = 'df_%s' % tm.rands(10)
store.append(name, df)
for d in (df2, df3, df4, df5):
with tm.assertRaises(ValueError):
store.append(name, d)
def test_query_with_nested_special_character(self):
df = DataFrame({'a': ['a', 'a', 'c', 'b', 'test & test', 'c' , 'b', 'e'],
'b': [1, 2, 3, 4, 5, 6, 7, 8]})
expected = df[df.a == 'test & test']
with ensure_clean_store(self.path) as store:
store.append('test', df, format='table', data_columns=True)
result = store.select('test', 'a = "test & test"')
tm.assert_frame_equal(expected, result)
def test_categorical(self):
with ensure_clean_store(self.path) as store:
# basic
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=['a','b','c','d'], ordered=False))
store.append('s', s, format='table')
result = store.select('s')
tm.assert_series_equal(s, result)
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=['a','b','c','d'], ordered=True))
store.append('s_ordered', s, format='table')
result = store.select('s_ordered')
tm.assert_series_equal(s, result)
df = DataFrame({"s":s, "vals":[1,2,3,4,5,6]})
store.append('df', df, format='table')
result = store.select('df')
tm.assert_frame_equal(result, df)
# dtypes
s = Series([1,1,2,2,3,4,5]).astype('category')
store.append('si',s)
result = store.select('si')
tm.assert_series_equal(result, s)
s = Series([1,1,np.nan,2,3,4,5]).astype('category')
store.append('si2',s)
result = store.select('si2')
tm.assert_series_equal(result, s)
# multiple
df2 = df.copy()
df2['s2'] = Series(list('abcdefg')).astype('category')
store.append('df2',df2)
result = store.select('df2')
tm.assert_frame_equal(result, df2)
# make sure the metadata is ok
self.assertTrue('/df2 ' in str(store))
self.assertTrue('/df2/meta/values_block_0/meta' in str(store))
self.assertTrue('/df2/meta/values_block_1/meta' in str(store))
# unordered
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=['a','b','c','d'],ordered=False))
store.append('s2', s, format='table')
result = store.select('s2')
tm.assert_series_equal(result, s)
# query
store.append('df3', df, data_columns=['s'])
expected = df[df.s.isin(['b','c'])]
result = store.select('df3', where = ['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['b','c'])]
result = store.select('df3', where = ['s = ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['d'])]
result = store.select('df3', where = ['s in ["d"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['f'])]
result = store.select('df3', where = ['s in ["f"]'])
tm.assert_frame_equal(result, expected)
# appending with same categories is ok
store.append('df3', df)
df = concat([df,df])
expected = df[df.s.isin(['b','c'])]
result = store.select('df3', where = ['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
# appending must have the same categories
df3 = df.copy()
df3['s'].cat.remove_unused_categories(inplace=True)
self.assertRaises(ValueError, lambda : store.append('df3', df3))
# remove
# make sure meta data is removed (its a recursive removal so should be)
result = store.select('df3/meta/s/meta')
self.assertIsNotNone(result)
store.remove('df3')
self.assertRaises(KeyError, lambda : store.select('df3/meta/s/meta'))
def test_duplicate_column_name(self):
df = DataFrame(columns=["a", "a"], data=[[0, 0]])
with ensure_clean_path(self.path) as path:
self.assertRaises(ValueError, df.to_hdf, path, 'df', format='fixed')
df.to_hdf(path, 'df', format='table')
other = read_hdf(path, 'df')
tm.assert_frame_equal(df, other)
self.assertTrue(df.equals(other))
self.assertTrue(other.equals(df))
def test_round_trip_equals(self):
# GH 9330
df = DataFrame({"B": [1,2], "A": ["x","y"]})
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table')
other = read_hdf(path, 'df')
tm.assert_frame_equal(df, other)
self.assertTrue(df.equals(other))
self.assertTrue(other.equals(df))
def test_preserve_timedeltaindex_type(self):
# GH9635
# Storing TimedeltaIndexed DataFrames in fixed stores did not preserve
# the type of the index.
df = DataFrame(np.random.normal(size=(10,5)))
df.index = timedelta_range(start='0s',periods=10,freq='1s',name='example')
with ensure_clean_store(self.path) as store:
store['df'] = df
assert_frame_equal(store['df'], df)
def _test_sort(obj):
if isinstance(obj, DataFrame):
return obj.reindex(sorted(obj.index))
elif isinstance(obj, Panel):
return obj.reindex(major=sorted(obj.major_axis))
else:
raise ValueError('type not supported here')
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
[] |
[] |
[
"TZ"
] |
[]
|
["TZ"]
|
python
| 1 | 0 | |
oscar/lib/python3.5/site-packages/pysolr.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import ast
import datetime
import logging
import os
import random
import re
import time
from xml.etree import ElementTree
from pkg_resources import DistributionNotFound, get_distribution, parse_version
import requests
try:
from kazoo.client import KazooClient, KazooState
except ImportError:
KazooClient = KazooState = None
try:
# Prefer simplejson, if installed.
import simplejson as json
except ImportError:
import json
try:
# Python 3.X
from urllib.parse import urlencode
except ImportError:
# Python 2.X
from urllib import urlencode
try:
# Python 3.X
from urllib.parse import quote
except ImportError:
# Python 2.X
from urllib import quote
try:
# Python 3.X
import html.entities as htmlentities
except ImportError:
# Python 2.X
import htmlentitydefs as htmlentities
try:
# Python 3.X
from http.client import HTTPException
except ImportError:
from httplib import HTTPException
try:
# Python 2.X
unicode_char = unichr
except NameError:
# Python 3.X
unicode_char = chr
# Ugh.
long = int
__author__ = 'Daniel Lindsley, Joseph Kocherhans, Jacob Kaplan-Moss'
__all__ = ['Solr']
try:
pkg_distribution = get_distribution(__name__)
__version__ = pkg_distribution.version
version_info = pkg_distribution.parsed_version
except DistributionNotFound:
__version__ = '0.0.dev0'
version_info = parse_version(__version__)
def get_version():
return __version__
DATETIME_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})T(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2})(\.\d+)?Z$')
# dict key used to add nested documents to a document
NESTED_DOC_KEY = '_childDocuments_'
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Add the ``NullHandler`` to avoid logging by default while still allowing
# others to attach their own handlers.
LOG = logging.getLogger('pysolr')
h = NullHandler()
LOG.addHandler(h)
# For debugging...
if os.environ.get("DEBUG_PYSOLR", "").lower() in ("true", "1"):
LOG.setLevel(logging.DEBUG)
stream = logging.StreamHandler()
LOG.addHandler(stream)
def is_py3():
try:
basestring
return False
except NameError:
return True
IS_PY3 = is_py3()
def force_unicode(value):
"""
Forces a bytestring to become a Unicode string.
"""
if IS_PY3:
# Python 3.X
if isinstance(value, bytes):
value = value.decode('utf-8', errors='replace')
elif not isinstance(value, str):
value = str(value)
else:
# Python 2.X
if isinstance(value, str):
value = value.decode('utf-8', 'replace')
elif not isinstance(value, basestring):
value = unicode(value)
return value
def force_bytes(value):
"""
Forces a Unicode string to become a bytestring.
"""
if IS_PY3:
if isinstance(value, str):
value = value.encode('utf-8', 'backslashreplace')
else:
if isinstance(value, unicode):
value = value.encode('utf-8')
return value
def unescape_html(text):
"""
Removes HTML or XML character references and entities from a text string.
@param text The HTML (or XML) source text.
@return The plain text, as a Unicode string, if necessary.
Source: http://effbot.org/zone/re-sub.htm#unescape-html
"""
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unicode_char(int(text[3:-1], 16))
else:
return unicode_char(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unicode_char(htmlentities.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
def safe_urlencode(params, doseq=0):
"""
UTF-8-safe version of safe_urlencode
The stdlib safe_urlencode prior to Python 3.x chokes on UTF-8 values
which can't fail down to ascii.
"""
if IS_PY3:
return urlencode(params, doseq)
if hasattr(params, "items"):
params = params.items()
new_params = list()
for k, v in params:
k = k.encode("utf-8")
if isinstance(v, (list, tuple)):
new_params.append((k, [force_bytes(i) for i in v]))
else:
new_params.append((k, force_bytes(v)))
return urlencode(new_params, doseq)
def is_valid_xml_char_ordinal(i):
"""
Defines whether char is valid to use in xml document
XML standard defines a valid char as::
Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
"""
# conditions ordered by presumed frequency
return (
0x20 <= i <= 0xD7FF
or i in (0x9, 0xA, 0xD)
or 0xE000 <= i <= 0xFFFD
or 0x10000 <= i <= 0x10FFFF
)
def clean_xml_string(s):
"""
Cleans string from invalid xml chars
Solution was found there::
http://stackoverflow.com/questions/8733233/filtering-out-certain-bytes-in-python
"""
return ''.join(c for c in s if is_valid_xml_char_ordinal(ord(c)))
class SolrError(Exception):
pass
class Results(object):
"""
Default results class for wrapping decoded (from JSON) solr responses.
Required ``decoded`` argument must be a Solr response dictionary.
Individual documents can be retrieved either through ``docs`` attribute
or by iterating over results instance.
Example::
results = Results({
'response': {
'docs': [{'id': 1}, {'id': 2}, {'id': 3}],
'numFound': 3,
}
})
# this:
for doc in results:
print doc
# ... is equivalent to:
for doc in results.docs:
print doc
# also:
list(results) == results.docs
Note that ``Results`` object does not support indexing and slicing. If you
need to retrieve documents by index just use ``docs`` attribute.
Other common response metadata (debug, highlighting, qtime, etc.) are available as attributes.
The full response from Solr is provided as the `raw_response` dictionary for use with features which
change the response format.
"""
def __init__(self, decoded):
self.raw_response = decoded
# main response part of decoded Solr response
response_part = decoded.get('response') or {}
self.docs = response_part.get('docs', ())
self.hits = response_part.get('numFound', 0)
# other response metadata
self.debug = decoded.get('debug', {})
self.highlighting = decoded.get('highlighting', {})
self.facets = decoded.get('facet_counts', {})
self.spellcheck = decoded.get('spellcheck', {})
self.stats = decoded.get('stats', {})
self.qtime = decoded.get('responseHeader', {}).get('QTime', None)
self.grouped = decoded.get('grouped', {})
self.nextCursorMark = decoded.get('nextCursorMark', None)
def __len__(self):
return len(self.docs)
def __iter__(self):
return iter(self.docs)
class Solr(object):
"""
The main object for working with Solr.
Optionally accepts ``decoder`` for an alternate JSON decoder instance.
Default is ``json.JSONDecoder()``.
Optionally accepts ``timeout`` for wait seconds until giving up on a
request. Default is ``60`` seconds.
Optionally accepts ``results_cls`` that specifies class of results object
returned by ``.search()`` and ``.more_like_this()`` methods.
Default is ``pysolr.Results``.
Usage::
solr = pysolr.Solr('http://localhost:8983/solr')
# With a 10 second timeout.
solr = pysolr.Solr('http://localhost:8983/solr', timeout=10)
# with a dict as a default results class instead of pysolr.Results
solr = pysolr.Solr('http://localhost:8983/solr', results_cls=dict)
"""
def __init__(self, url, decoder=None, timeout=60, results_cls=Results, search_handler='select', use_qt_param=False, always_commit=False,
auth=None, verify=True):
self.decoder = decoder or json.JSONDecoder()
self.url = url
self.timeout = timeout
self.log = self._get_log()
self.session = None
self.results_cls = results_cls
self.search_handler = search_handler
self.use_qt_param = use_qt_param
self.auth = auth
self.verify = verify
self.always_commit = always_commit
def get_session(self):
if self.session is None:
self.session = requests.Session()
self.session.stream = False
self.session.verify = self.verify
return self.session
def _get_log(self):
return LOG
def _create_full_url(self, path=''):
if len(path):
return '/'.join([self.url.rstrip('/'), path.lstrip('/')])
# No path? No problem.
return self.url
def _send_request(self, method, path='', body=None, headers=None, files=None):
url = self._create_full_url(path)
method = method.lower()
log_body = body
if headers is None:
headers = {}
if log_body is None:
log_body = ''
elif not isinstance(log_body, str):
log_body = repr(body)
self.log.debug("Starting request to '%s' (%s) with body '%s'...",
url, method, log_body[:10])
start_time = time.time()
session = self.get_session()
try:
requests_method = getattr(session, method)
except AttributeError as err:
raise SolrError("Unable to use unknown HTTP method '{0}.".format(method))
# Everything except the body can be Unicode. The body must be
# encoded to bytes to work properly on Py3.
bytes_body = body
if bytes_body is not None:
bytes_body = force_bytes(body)
try:
resp = requests_method(url, data=bytes_body, headers=headers, files=files,
timeout=self.timeout, auth=self.auth)
except requests.exceptions.Timeout as err:
error_message = "Connection to server '%s' timed out: %s"
self.log.error(error_message, url, err, exc_info=True)
raise SolrError(error_message % (url, err))
except requests.exceptions.ConnectionError as err:
error_message = "Failed to connect to server at '%s', are you sure that URL is correct? Checking it in a browser might help: %s"
params = (url, err)
self.log.error(error_message, *params, exc_info=True)
raise SolrError(error_message % params)
except HTTPException as err:
error_message = "Unhandled error: %s %s: %s"
self.log.error(error_message, method, url, err, exc_info=True)
raise SolrError(error_message % (method, url, err))
end_time = time.time()
self.log.info("Finished '%s' (%s) with body '%s' in %0.3f seconds, with status %s",
url, method, log_body[:10], end_time - start_time, resp.status_code)
if int(resp.status_code) != 200:
error_message = "Solr responded with an error (HTTP %s): %s"
solr_message = self._extract_error(resp)
self.log.error(error_message, resp.status_code, solr_message,
extra={'data': {'headers': resp.headers,
'response': resp.content,
'request_body': bytes_body,
'request_headers': headers}})
raise SolrError(error_message % (resp.status_code, solr_message))
return force_unicode(resp.content)
def _select(self, params, handler=None):
"""
:param params:
:param handler: defaults to self.search_handler (fallback to 'select')
:return:
"""
# specify json encoding of results
params['wt'] = 'json'
custom_handler = handler or self.search_handler
handler = 'select'
if custom_handler:
if self.use_qt_param:
params['qt'] = custom_handler
else:
handler = custom_handler
params_encoded = safe_urlencode(params, True)
if len(params_encoded) < 1024:
# Typical case.
path = '%s/?%s' % (handler, params_encoded)
return self._send_request('get', path)
else:
# Handles very long queries by submitting as a POST.
path = '%s/' % handler
headers = {
'Content-type': 'application/x-www-form-urlencoded; charset=utf-8',
}
return self._send_request('post', path, body=params_encoded, headers=headers)
def _mlt(self, params, handler='mlt'):
return self._select(params, handler)
def _suggest_terms(self, params, handler='terms'):
return self._select(params, handler)
def _update(self, message, clean_ctrl_chars=True, commit=None, softCommit=False, waitFlush=None, waitSearcher=None,
overwrite=None, handler='update'):
"""
Posts the given xml message to http://<self.url>/update and
returns the result.
Passing `clean_ctrl_chars` as False will prevent the message from being cleaned
of control characters (default True). This is done by default because
these characters would cause Solr to fail to parse the XML. Only pass
False if you're positive your data is clean.
"""
# Per http://wiki.apache.org/solr/UpdateXmlMessages, we can append a
# ``commit=true`` to the URL and have the commit happen without a
# second request.
query_vars = []
path_handler = handler
if self.use_qt_param:
path_handler = 'select'
query_vars.append('qt=%s' % safe_urlencode(handler, True))
path = '%s/' % path_handler
if commit is None:
commit = self.always_commit
if commit:
query_vars.append('commit=%s' % str(bool(commit)).lower())
elif softCommit:
query_vars.append('softCommit=%s' % str(bool(softCommit)).lower())
if waitFlush is not None:
query_vars.append('waitFlush=%s' % str(bool(waitFlush)).lower())
if overwrite is not None:
query_vars.append('overwrite=%s' % str(bool(overwrite)).lower())
if waitSearcher is not None:
query_vars.append('waitSearcher=%s' % str(bool(waitSearcher)).lower())
if query_vars:
path = '%s?%s' % (path, '&'.join(query_vars))
# Clean the message of ctrl characters.
if clean_ctrl_chars:
message = sanitize(message)
return self._send_request('post', path, message, {'Content-type': 'text/xml; charset=utf-8'})
def _extract_error(self, resp):
"""
Extract the actual error message from a solr response.
"""
reason = resp.headers.get('reason', None)
full_response = None
if reason is None:
try:
# if response is in json format
reason = resp.json()['error']['msg']
except KeyError:
# if json response has unexpected structure
full_response = resp.content
except ValueError:
# otherwise we assume it's html
reason, full_html = self._scrape_response(resp.headers, resp.content)
full_response = unescape_html(full_html)
msg = "[Reason: %s]" % reason
if reason is None:
msg += "\n%s" % full_response
return msg
def _scrape_response(self, headers, response):
"""
Scrape the html response.
"""
# identify the responding server
server_type = None
server_string = headers.get('server', '')
if server_string and 'jetty' in server_string.lower():
server_type = 'jetty'
if server_string and 'coyote' in server_string.lower():
server_type = 'tomcat'
reason = None
full_html = ''
dom_tree = None
# In Python3, response can be made of bytes
if IS_PY3 and hasattr(response, 'decode'):
response = response.decode()
if response.startswith('<?xml'):
# Try a strict XML parse
try:
soup = ElementTree.fromstring(response)
reason_node = soup.find('lst[@name="error"]/str[@name="msg"]')
tb_node = soup.find('lst[@name="error"]/str[@name="trace"]')
if reason_node is not None:
full_html = reason = reason_node.text.strip()
if tb_node is not None:
full_html = tb_node.text.strip()
if reason is None:
reason = full_html
# Since we had a precise match, we'll return the results now:
if reason and full_html:
return reason, full_html
except ElementTree.ParseError:
# XML parsing error, so we'll let the more liberal code handle it.
pass
if server_type == 'tomcat':
# Tomcat doesn't produce a valid XML response or consistent HTML:
m = re.search(r'<(h1)[^>]*>\s*(.+?)\s*</\1>', response, re.IGNORECASE)
if m:
reason = m.group(2)
else:
full_html = "%s" % response
else:
# Let's assume others do produce a valid XML response
try:
dom_tree = ElementTree.fromstring(response)
reason_node = None
# html page might be different for every server
if server_type == 'jetty':
reason_node = dom_tree.find('body/pre')
else:
reason_node = dom_tree.find('head/title')
if reason_node is not None:
reason = reason_node.text
if reason is None:
full_html = ElementTree.tostring(dom_tree)
except SyntaxError as err:
LOG.warning('Unable to extract error message from invalid XML: %s', err,
extra={'data': {'response': response}})
full_html = "%s" % response
full_html = force_unicode(full_html)
full_html = full_html.replace('\n', '')
full_html = full_html.replace('\r', '')
full_html = full_html.replace('<br/>', '')
full_html = full_html.replace('<br />', '')
full_html = full_html.strip()
return reason, full_html
# Conversion #############################################################
def _from_python(self, value):
"""
Converts python values to a form suitable for insertion into the xml
we send to solr.
"""
if hasattr(value, 'strftime'):
if hasattr(value, 'hour'):
offset = value.utcoffset()
if offset:
value = value - offset
value = value.replace(tzinfo=None).isoformat() + 'Z'
else:
value = "%sT00:00:00Z" % value.isoformat()
elif isinstance(value, bool):
if value:
value = 'true'
else:
value = 'false'
else:
if IS_PY3:
# Python 3.X
if isinstance(value, bytes):
value = str(value, errors='replace')
else:
# Python 2.X
if isinstance(value, str):
value = unicode(value, errors='replace')
value = "{0}".format(value)
return clean_xml_string(value)
def _to_python(self, value):
"""
Converts values from Solr to native Python values.
"""
if isinstance(value, (int, float, long, complex)):
return value
if isinstance(value, (list, tuple)):
value = value[0]
if value == 'true':
return True
elif value == 'false':
return False
is_string = False
if IS_PY3:
if isinstance(value, bytes):
value = force_unicode(value)
if isinstance(value, str):
is_string = True
else:
if isinstance(value, str):
value = force_unicode(value)
if isinstance(value, basestring):
is_string = True
if is_string:
possible_datetime = DATETIME_REGEX.search(value)
if possible_datetime:
date_values = possible_datetime.groupdict()
for dk, dv in date_values.items():
date_values[dk] = int(dv)
return datetime.datetime(date_values['year'], date_values['month'], date_values['day'], date_values['hour'], date_values['minute'], date_values['second'])
try:
# This is slightly gross but it's hard to tell otherwise what the
# string's original type might have been.
return ast.literal_eval(value)
except (ValueError, SyntaxError):
# If it fails, continue on.
pass
return value
def _is_null_value(self, value):
"""
Check if a given value is ``null``.
Criteria for this is based on values that shouldn't be included
in the Solr ``add`` request at all.
"""
if value is None:
return True
if IS_PY3:
# Python 3.X
if isinstance(value, str) and len(value) == 0:
return True
else:
# Python 2.X
if isinstance(value, basestring) and len(value) == 0:
return True
# TODO: This should probably be removed when solved in core Solr level?
return False
# API Methods ############################################################
def search(self, q, search_handler=None, **kwargs):
"""
Performs a search and returns the results.
Requires a ``q`` for a string version of the query to run.
Optionally accepts ``**kwargs`` for additional options to be passed
through the Solr URL.
Returns ``self.results_cls`` class object (defaults to
``pysolr.Results``)
Usage::
# All docs.
results = solr.search('*:*')
# Search with highlighting.
results = solr.search('ponies', **{
'hl': 'true',
'hl.fragsize': 10,
})
"""
params = {'q': q}
params.update(kwargs)
response = self._select(params, handler=search_handler)
decoded = self.decoder.decode(response)
self.log.debug(
"Found '%s' search results.",
# cover both cases: there is no response key or value is None
(decoded.get('response', {}) or {}).get('numFound', 0)
)
return self.results_cls(decoded)
def more_like_this(self, q, mltfl, handler='mlt', **kwargs):
"""
Finds and returns results similar to the provided query.
Returns ``self.results_cls`` class object (defaults to
``pysolr.Results``)
Requires Solr 1.3+.
Usage::
similar = solr.more_like_this('id:doc_234', 'text')
"""
params = {
'q': q,
'mlt.fl': mltfl,
}
params.update(kwargs)
response = self._mlt(params, handler=handler)
decoded = self.decoder.decode(response)
self.log.debug(
"Found '%s' MLT results.",
# cover both cases: there is no response key or value is None
(decoded.get('response', {}) or {}).get('numFound', 0)
)
return self.results_cls(decoded)
def suggest_terms(self, fields, prefix, handler='terms', **kwargs):
"""
Accepts a list of field names and a prefix
Returns a dictionary keyed on field name containing a list of
``(term, count)`` pairs
Requires Solr 1.4+.
"""
params = {
'terms.fl': fields,
'terms.prefix': prefix,
}
params.update(kwargs)
response = self._suggest_terms(params, handler=handler)
result = self.decoder.decode(response)
terms = result.get("terms", {})
res = {}
# in Solr 1.x the value of terms is a flat list:
# ["field_name", ["dance",23,"dancers",10,"dancing",8,"dancer",6]]
#
# in Solr 3.x the value of terms is a dict:
# {"field_name": ["dance",23,"dancers",10,"dancing",8,"dancer",6]}
if isinstance(terms, (list, tuple)):
terms = dict(zip(terms[0::2], terms[1::2]))
for field, values in terms.items():
tmp = list()
while values:
tmp.append((values.pop(0), values.pop(0)))
res[field] = tmp
self.log.debug("Found '%d' Term suggestions results.", sum(len(j) for i, j in res.items()))
return res
def _build_doc(self, doc, boost=None, fieldUpdates=None):
doc_elem = ElementTree.Element('doc')
for key, value in doc.items():
if key == NESTED_DOC_KEY:
for child in value:
doc_elem.append(self._build_doc(child, boost, fieldUpdates))
continue
if key == 'boost':
doc_elem.set('boost', force_unicode(value))
continue
# To avoid multiple code-paths we'd like to treat all of our values as iterables:
if isinstance(value, (list, tuple, set)):
values = value
else:
values = (value, )
for bit in values:
if self._is_null_value(bit):
continue
if key == '_doc':
child = self._build_doc(bit, boost)
doc_elem.append(child)
continue
attrs = {'name': key}
if fieldUpdates and key in fieldUpdates:
attrs['update'] = fieldUpdates[key]
if boost and key in boost:
attrs['boost'] = force_unicode(boost[key])
field = ElementTree.Element('field', **attrs)
field.text = self._from_python(bit)
doc_elem.append(field)
return doc_elem
def add(self, docs, boost=None, fieldUpdates=None, commit=None, softCommit=False, commitWithin=None, waitFlush=None,
waitSearcher=None, overwrite=None, handler='update'):
"""
Adds or updates documents.
Requires ``docs``, which is a list of dictionaries. Each key is the
field name and each value is the value to index.
Optionally accepts ``commit``. Default is ``None``. None signals to use default
Optionally accepts ``softCommit``. Default is ``False``.
Optionally accepts ``boost``. Default is ``None``.
Optionally accepts ``fieldUpdates``. Default is ``None``.
Optionally accepts ``commitWithin``. Default is ``None``.
Optionally accepts ``waitFlush``. Default is ``None``.
Optionally accepts ``waitSearcher``. Default is ``None``.
Optionally accepts ``overwrite``. Default is ``None``.
Usage::
solr.add([
{
"id": "doc_1",
"title": "A test document",
},
{
"id": "doc_2",
"title": "The Banana: Tasty or Dangerous?",
},
])
"""
start_time = time.time()
self.log.debug("Starting to build add request...")
message = ElementTree.Element('add')
if commitWithin:
message.set('commitWithin', commitWithin)
for doc in docs:
el = self._build_doc(doc, boost=boost, fieldUpdates=fieldUpdates)
message.append(el)
# This returns a bytestring. Ugh.
m = ElementTree.tostring(message, encoding='utf-8')
# Convert back to Unicode please.
m = force_unicode(m)
end_time = time.time()
self.log.debug("Built add request of %s docs in %0.2f seconds.", len(message), end_time - start_time)
return self._update(m, commit=commit, softCommit=softCommit, waitFlush=waitFlush, waitSearcher=waitSearcher,
overwrite=overwrite, handler=handler)
def delete(self, id=None, q=None, commit=None, softCommit=False, waitFlush=None, waitSearcher=None, handler='update'):
"""
Deletes documents.
Requires *either* ``id`` or ``query``. ``id`` is if you know the
specific document id to remove. Note that ``id`` can also be a list of
document ids to be deleted. ``query`` is a Lucene-style query
indicating a collection of documents to delete.
Optionally accepts ``commit``. Default is ``True``.
Optionally accepts ``softCommit``. Default is ``False``.
Optionally accepts ``waitFlush``. Default is ``None``.
Optionally accepts ``waitSearcher``. Default is ``None``.
Usage::
solr.delete(id='doc_12')
solr.delete(id=['doc_1', 'doc_3'])
solr.delete(q='*:*')
"""
if id is None and q is None:
raise ValueError('You must specify "id" or "q".')
elif id is not None and q is not None:
raise ValueError('You many only specify "id" OR "q", not both.')
elif id is not None:
if not isinstance(id, (list, set, tuple)):
id = [id]
else:
id = list(filter(None, id))
if id:
m = '<delete>%s</delete>' % ''.join('<id>%s</id>' % i for i in id)
else:
raise ValueError('The list of documents to delete was empty.')
elif q is not None:
m = '<delete><query>%s</query></delete>' % q
return self._update(m, commit=commit, softCommit=softCommit, waitFlush=waitFlush, waitSearcher=waitSearcher, handler=handler)
def commit(self, softCommit=False, waitFlush=None, waitSearcher=None, expungeDeletes=None, handler='update'):
"""
Forces Solr to write the index data to disk.
Optionally accepts ``expungeDeletes``. Default is ``None``.
Optionally accepts ``waitFlush``. Default is ``None``.
Optionally accepts ``waitSearcher``. Default is ``None``.
Optionally accepts ``softCommit``. Default is ``False``.
Usage::
solr.commit()
"""
if expungeDeletes is not None:
msg = '<commit expungeDeletes="%s" />' % str(bool(expungeDeletes)).lower()
else:
msg = '<commit />'
return self._update(msg, commit=not softCommit, softCommit=softCommit, waitFlush=waitFlush, waitSearcher=waitSearcher, handler=handler)
def optimize(self, commit=True, waitFlush=None, waitSearcher=None, maxSegments=None, handler='update'):
"""
Tells Solr to streamline the number of segments used, essentially a
defragmentation operation.
Optionally accepts ``maxSegments``. Default is ``None``.
Optionally accepts ``waitFlush``. Default is ``None``.
Optionally accepts ``waitSearcher``. Default is ``None``.
Usage::
solr.optimize()
"""
if maxSegments:
msg = '<optimize maxSegments="%d" />' % maxSegments
else:
msg = '<optimize />'
return self._update(msg, commit=commit, waitFlush=waitFlush, waitSearcher=waitSearcher, handler=handler)
def extract(self, file_obj, extractOnly=True, handler='update/extract', **kwargs):
"""
POSTs a file to the Solr ExtractingRequestHandler so rich content can
be processed using Apache Tika. See the Solr wiki for details:
http://wiki.apache.org/solr/ExtractingRequestHandler
The ExtractingRequestHandler has a very simple model: it extracts
contents and metadata from the uploaded file and inserts it directly
into the index. This is rarely useful as it allows no way to store
additional data or otherwise customize the record. Instead, by default
we'll use the extract-only mode to extract the data without indexing it
so the caller has the opportunity to process it as appropriate; call
with ``extractOnly=False`` if you want to insert with no additional
processing.
Returns None if metadata cannot be extracted; otherwise returns a
dictionary containing at least two keys:
:contents:
Extracted full-text content, if applicable
:metadata:
key:value pairs of text strings
"""
if not hasattr(file_obj, "name"):
raise ValueError("extract() requires file-like objects which have a defined name property")
params = {
"extractOnly": "true" if extractOnly else "false",
"lowernames": "true",
"wt": "json",
}
params.update(kwargs)
filename = quote(file_obj.name.encode('utf-8'))
try:
# We'll provide the file using its true name as Tika may use that
# as a file type hint:
resp = self._send_request('post', handler,
body=params,
files={'file': (filename, file_obj)})
except (IOError, SolrError) as err:
self.log.error("Failed to extract document metadata: %s", err,
exc_info=True)
raise
try:
data = json.loads(resp)
except ValueError as err:
self.log.error("Failed to load JSON response: %s", err,
exc_info=True)
raise
data['contents'] = data.pop(filename, None)
data['metadata'] = metadata = {}
raw_metadata = data.pop("%s_metadata" % filename, None)
if raw_metadata:
# The raw format is somewhat annoying: it's a flat list of
# alternating keys and value lists
while raw_metadata:
metadata[raw_metadata.pop()] = raw_metadata.pop()
return data
class SolrCoreAdmin(object):
"""
Handles core admin operations: see http://wiki.apache.org/solr/CoreAdmin
This must be initialized with the full admin cores URL::
solr_admin = SolrCoreAdmin('http://localhost:8983/solr/admin/cores')
status = solr_admin.status()
Operations offered by Solr are:
1. STATUS
2. CREATE
3. RELOAD
4. RENAME
5. ALIAS
6. SWAP
7. UNLOAD
8. LOAD (not currently implemented)
"""
def __init__(self, url, *args, **kwargs):
super(SolrCoreAdmin, self).__init__(*args, **kwargs)
self.url = url
def _get_url(self, url, params={}, headers={}):
resp = requests.get(url, data=safe_urlencode(params), headers=headers)
return force_unicode(resp.content)
def status(self, core=None):
"""http://wiki.apache.org/solr/CoreAdmin#head-9be76f5a459882c5c093a7a1456e98bea7723953"""
params = {
'action': 'STATUS',
}
if core is not None:
params.update(core=core)
return self._get_url(self.url, params=params)
def create(self, name, instance_dir=None, config='solrconfig.xml', schema='schema.xml'):
"""http://wiki.apache.org/solr/CoreAdmin#head-7ca1b98a9df8b8ca0dcfbfc49940ed5ac98c4a08"""
params = {
'action': 'CREATE',
'name': name,
'config': config,
'schema': schema,
}
if instance_dir is None:
params.update(instanceDir=name)
else:
params.update(instanceDir=instance_dir)
return self._get_url(self.url, params=params)
def reload(self, core):
"""http://wiki.apache.org/solr/CoreAdmin#head-3f125034c6a64611779442539812067b8b430930"""
params = {
'action': 'RELOAD',
'core': core,
}
return self._get_url(self.url, params=params)
def rename(self, core, other):
"""http://wiki.apache.org/solr/CoreAdmin#head-9473bee1abed39e8583ba45ef993bebb468e3afe"""
params = {
'action': 'RENAME',
'core': core,
'other': other,
}
return self._get_url(self.url, params=params)
def swap(self, core, other):
"""http://wiki.apache.org/solr/CoreAdmin#head-928b872300f1b66748c85cebb12a59bb574e501b"""
params = {
'action': 'SWAP',
'core': core,
'other': other,
}
return self._get_url(self.url, params=params)
def unload(self, core):
"""http://wiki.apache.org/solr/CoreAdmin#head-f5055a885932e2c25096a8856de840b06764d143"""
params = {
'action': 'UNLOAD',
'core': core,
}
return self._get_url(self.url, params=params)
def load(self, core):
raise NotImplementedError('Solr 1.4 and below do not support this operation.')
# Using two-tuples to preserve order.
REPLACEMENTS = (
# Nuke nasty control characters.
(b'\x00', b''), # Start of heading
(b'\x01', b''), # Start of heading
(b'\x02', b''), # Start of text
(b'\x03', b''), # End of text
(b'\x04', b''), # End of transmission
(b'\x05', b''), # Enquiry
(b'\x06', b''), # Acknowledge
(b'\x07', b''), # Ring terminal bell
(b'\x08', b''), # Backspace
(b'\x0b', b''), # Vertical tab
(b'\x0c', b''), # Form feed
(b'\x0e', b''), # Shift out
(b'\x0f', b''), # Shift in
(b'\x10', b''), # Data link escape
(b'\x11', b''), # Device control 1
(b'\x12', b''), # Device control 2
(b'\x13', b''), # Device control 3
(b'\x14', b''), # Device control 4
(b'\x15', b''), # Negative acknowledge
(b'\x16', b''), # Synchronous idle
(b'\x17', b''), # End of transmission block
(b'\x18', b''), # Cancel
(b'\x19', b''), # End of medium
(b'\x1a', b''), # Substitute character
(b'\x1b', b''), # Escape
(b'\x1c', b''), # File separator
(b'\x1d', b''), # Group separator
(b'\x1e', b''), # Record separator
(b'\x1f', b''), # Unit separator
)
def sanitize(data):
fixed_string = force_bytes(data)
for bad, good in REPLACEMENTS:
fixed_string = fixed_string.replace(bad, good)
return force_unicode(fixed_string)
class SolrCloud(Solr):
def __init__(self, zookeeper, collection, decoder=None, timeout=60, retry_timeout=0.2, auth=None, verify=True,
*args, **kwargs):
url = zookeeper.getRandomURL(collection)
self.auth = auth
self.verify = verify
super(SolrCloud, self).__init__(url, decoder=decoder, timeout=timeout, auth=self.auth, verify = self.verify,
*args, **kwargs)
self.zookeeper = zookeeper
self.collection = collection
self.retry_timeout = retry_timeout
def _randomized_request(self, method, path, body, headers, files):
self.url = self.zookeeper.getRandomURL(self.collection)
LOG.debug('Using random URL: %s', self.url)
return Solr._send_request(self, method, path, body, headers, files)
def _send_request(self, method, path='', body=None, headers=None, files=None):
# FIXME: this needs to have a maximum retry counter rather than waiting endlessly
try:
return self._randomized_request(method, path, body, headers, files)
except requests.exceptions.RequestException:
LOG.warning('RequestException, retrying after %fs', self.retry_timeout, exc_info=True)
time.sleep(self.retry_timeout) # give zookeeper time to notice
return self._randomized_request(method, path, body, headers, files)
except SolrError:
LOG.warning('SolrException, retrying after %fs', self.retry_timeout, exc_info=True)
time.sleep(self.retry_timeout) # give zookeeper time to notice
return self._randomized_request(method, path, body, headers, files)
def _update(self, *args, **kwargs):
self.url = self.zookeeper.getLeaderURL(self.collection)
LOG.debug('Using random leader URL: %s', self.url)
return Solr._update(self, *args, **kwargs)
class ZooKeeper(object):
# Constants used by the REST API:
LIVE_NODES_ZKNODE = '/live_nodes'
ALIASES = '/aliases.json'
CLUSTER_STATE = '/clusterstate.json'
SHARDS = 'shards'
REPLICAS = 'replicas'
STATE = 'state'
ACTIVE = 'active'
LEADER = 'leader'
BASE_URL = 'base_url'
TRUE = 'true'
FALSE = 'false'
COLLECTION = 'collection'
def __init__(self, zkServerAddress, timeout=15, max_retries=-1, kazoo_client=None):
if KazooClient is None:
logging.error('ZooKeeper requires the `kazoo` library to be installed')
raise RuntimeError
self.collections = {}
self.liveNodes = {}
self.aliases = {}
self.state = None
if kazoo_client is None:
self.zk = KazooClient(zkServerAddress, read_only=True, timeout=timeout,
command_retry={'max_tries': max_retries},
connection_retry={'max_tries': max_retries})
else:
self.zk = kazoo_client
self.zk.start()
def connectionListener(state):
if state == KazooState.LOST:
self.state = state
elif state == KazooState.SUSPENDED:
self.state = state
self.zk.add_listener(connectionListener)
@self.zk.DataWatch(ZooKeeper.CLUSTER_STATE)
def watchClusterState(data, *args, **kwargs):
if not data:
LOG.warning("No cluster state available: no collections defined?")
else:
self.collections = json.loads(data.decode('utf-8'))
LOG.info('Updated collections: %s', self.collections)
@self.zk.ChildrenWatch(ZooKeeper.LIVE_NODES_ZKNODE)
def watchLiveNodes(children):
self.liveNodes = children
LOG.info("Updated live nodes: %s", children)
@self.zk.DataWatch(ZooKeeper.ALIASES)
def watchAliases(data, stat):
if data:
json_data = json.loads(data.decode('utf-8'))
if ZooKeeper.COLLECTION in json_data:
self.aliases = json_data[ZooKeeper.COLLECTION]
else:
LOG.warning('Expected to find %s in alias update %s',
ZooKeeper.COLLECTION, json_data.keys())
else:
self.aliases = None
LOG.info("Updated aliases: %s", self.aliases)
def getHosts(self, collname, only_leader=False, seen_aliases=None):
if self.aliases and collname in self.aliases:
return self.getAliasHosts(collname, only_leader, seen_aliases)
hosts = []
if collname not in self.collections:
raise SolrError("Unknown collection: %s" % collname)
collection = self.collections[collname]
shards = collection[ZooKeeper.SHARDS]
for shardname in shards.keys():
shard = shards[shardname]
if shard[ZooKeeper.STATE] == ZooKeeper.ACTIVE:
replicas = shard[ZooKeeper.REPLICAS]
for replicaname in replicas.keys():
replica = replicas[replicaname]
if replica[ZooKeeper.STATE] == ZooKeeper.ACTIVE:
if not only_leader or (replica.get(ZooKeeper.LEADER, None) == ZooKeeper.TRUE):
base_url = replica[ZooKeeper.BASE_URL]
if base_url not in hosts:
hosts.append(base_url)
return hosts
def getAliasHosts(self, collname, only_leader, seen_aliases):
if seen_aliases:
if collname in seen_aliases:
LOG.warn("%s in circular alias definition - ignored", collname)
return []
else:
seen_aliases = []
seen_aliases.append(collname)
collections = self.aliases[collname].split(",")
hosts = []
for collection in collections:
for host in self.getHosts(collection, only_leader, seen_aliases):
if host not in hosts:
hosts.append(host)
return hosts
def getRandomURL(self, collname, only_leader=False):
hosts = self.getHosts(collname, only_leader=only_leader)
if not hosts:
raise SolrError('ZooKeeper returned no active shards!')
return '%s/%s' % (random.choice(hosts), collname)
def getLeaderURL(self, collname):
return self.getRandomURL(collname, only_leader=True)
|
[] |
[] |
[
"DEBUG_PYSOLR"
] |
[]
|
["DEBUG_PYSOLR"]
|
python
| 1 | 0 | |
tests/test_integration.py
|
import os
import re
import unittest
import html5lib
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.core import signing
from django.core.checks import Warning, run_checks
from django.db import connection
from django.http import HttpResponse
from django.template.loader import get_template
from django.test import RequestFactory, SimpleTestCase
from django.test.utils import override_settings
from debug_toolbar.middleware import DebugToolbarMiddleware, show_toolbar
from debug_toolbar.toolbar import DebugToolbar
from .base import BaseTestCase, IntegrationTestCase
from .views import regular_view
try:
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.wait import WebDriverWait
except ImportError:
webdriver = None
rf = RequestFactory()
@override_settings(DEBUG=True)
class DebugToolbarTestCase(BaseTestCase):
def test_show_toolbar(self):
self.assertTrue(show_toolbar(self.request))
def test_show_toolbar_DEBUG(self):
with self.settings(DEBUG=False):
self.assertFalse(show_toolbar(self.request))
def test_show_toolbar_INTERNAL_IPS(self):
with self.settings(INTERNAL_IPS=[]):
self.assertFalse(show_toolbar(self.request))
def _resolve_stats(self, path):
# takes stats from Request panel
self.request.path = path
panel = self.toolbar.get_panel_by_id("RequestPanel")
response = panel.process_request(self.request)
panel.generate_stats(self.request, response)
return panel.get_stats()
def test_url_resolving_positional(self):
stats = self._resolve_stats("/resolving1/a/b/")
self.assertEqual(stats["view_urlname"], "positional-resolving")
self.assertEqual(stats["view_func"], "tests.views.resolving_view")
self.assertEqual(stats["view_args"], ("a", "b"))
self.assertEqual(stats["view_kwargs"], {})
def test_url_resolving_named(self):
stats = self._resolve_stats("/resolving2/a/b/")
self.assertEqual(stats["view_args"], ())
self.assertEqual(stats["view_kwargs"], {"arg1": "a", "arg2": "b"})
def test_url_resolving_mixed(self):
stats = self._resolve_stats("/resolving3/a/")
self.assertEqual(stats["view_args"], ("a",))
self.assertEqual(stats["view_kwargs"], {"arg2": "default"})
def test_url_resolving_bad(self):
stats = self._resolve_stats("/non-existing-url/")
self.assertEqual(stats["view_urlname"], "None")
self.assertEqual(stats["view_args"], "None")
self.assertEqual(stats["view_kwargs"], "None")
self.assertEqual(stats["view_func"], "<no view>")
def test_middleware_response_insertion(self):
def get_response(request):
return regular_view(request, "İ")
response = DebugToolbarMiddleware(get_response)(self.request)
# check toolbar insertion before "</body>"
self.assertContains(response, "</div>\n</body>")
def test_cache_page(self):
self.client.get("/cached_view/")
self.assertEqual(len(self.toolbar.get_panel_by_id("CachePanel").calls), 3)
self.client.get("/cached_view/")
self.assertEqual(len(self.toolbar.get_panel_by_id("CachePanel").calls), 5)
@override_settings(DEBUG=True)
class DebugToolbarIntegrationTestCase(IntegrationTestCase):
def test_middleware(self):
response = self.client.get("/execute_sql/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, "djDebug")
@override_settings(DEFAULT_CHARSET="iso-8859-1")
def test_non_utf8_charset(self):
response = self.client.get("/regular/ASCII/")
self.assertContains(response, "ASCII") # template
self.assertContains(response, "djDebug") # toolbar
response = self.client.get("/regular/LÀTÍN/")
self.assertContains(response, "LÀTÍN") # template
self.assertContains(response, "djDebug") # toolbar
def test_html5_validation(self):
response = self.client.get("/regular/HTML5/")
parser = html5lib.HTMLParser()
content = response.content
parser.parse(content)
if parser.errors:
default_msg = ["Content is invalid HTML:"]
lines = content.split(b"\n")
for position, errorcode, datavars in parser.errors:
default_msg.append(" %s" % html5lib.constants.E[errorcode] % datavars)
default_msg.append(" %r" % lines[position[0] - 1])
msg = self._formatMessage(None, "\n".join(default_msg))
raise self.failureException(msg)
def test_render_panel_checks_show_toolbar(self):
def get_response(request):
return HttpResponse()
toolbar = DebugToolbar(rf.get("/"), get_response)
toolbar.store()
url = "/__debug__/render_panel/"
data = {"store_id": toolbar.store_id, "panel_id": "VersionsPanel"}
response = self.client.get(url, data)
self.assertEqual(response.status_code, 200)
response = self.client.get(url, data, HTTP_X_REQUESTED_WITH="XMLHttpRequest")
self.assertEqual(response.status_code, 200)
with self.settings(INTERNAL_IPS=[]):
response = self.client.get(url, data)
self.assertEqual(response.status_code, 404)
response = self.client.get(
url, data, HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
self.assertEqual(response.status_code, 404)
def test_middleware_render_toolbar_json(self):
"""Verify the toolbar is rendered and data is stored for a json request."""
self.assertEqual(len(DebugToolbar._store), 0)
data = {"foo": "bar"}
response = self.client.get("/json_view/", data, content_type="application/json")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content.decode("utf-8"), '{"foo": "bar"}')
# Check the history panel's stats to verify the toolbar rendered properly.
self.assertEqual(len(DebugToolbar._store), 1)
toolbar = list(DebugToolbar._store.values())[0]
self.assertEqual(
toolbar.get_panel_by_id("HistoryPanel").get_stats()["data"],
{"foo": ["bar"]},
)
def test_template_source_checks_show_toolbar(self):
template = get_template("basic.html")
url = "/__debug__/template_source/"
data = {
"template": template.template.name,
"template_origin": signing.dumps(template.template.origin.name),
}
response = self.client.get(url, data)
self.assertEqual(response.status_code, 200)
response = self.client.get(url, data, HTTP_X_REQUESTED_WITH="XMLHttpRequest")
self.assertEqual(response.status_code, 200)
with self.settings(INTERNAL_IPS=[]):
response = self.client.get(url, data)
self.assertEqual(response.status_code, 404)
response = self.client.get(
url, data, HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
self.assertEqual(response.status_code, 404)
def test_sql_select_checks_show_toolbar(self):
url = "/__debug__/sql_select/"
data = {
"sql": "SELECT * FROM auth_user",
"raw_sql": "SELECT * FROM auth_user",
"params": "{}",
"alias": "default",
"duration": "0",
"hash": "6e12daa636b8c9a8be993307135458f90a877606",
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
response = self.client.post(url, data, HTTP_X_REQUESTED_WITH="XMLHttpRequest")
self.assertEqual(response.status_code, 200)
with self.settings(INTERNAL_IPS=[]):
response = self.client.post(url, data)
self.assertEqual(response.status_code, 404)
response = self.client.post(
url, data, HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
self.assertEqual(response.status_code, 404)
def test_sql_explain_checks_show_toolbar(self):
url = "/__debug__/sql_explain/"
data = {
"sql": "SELECT * FROM auth_user",
"raw_sql": "SELECT * FROM auth_user",
"params": "{}",
"alias": "default",
"duration": "0",
"hash": "6e12daa636b8c9a8be993307135458f90a877606",
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
response = self.client.post(url, data, HTTP_X_REQUESTED_WITH="XMLHttpRequest")
self.assertEqual(response.status_code, 200)
with self.settings(INTERNAL_IPS=[]):
response = self.client.post(url, data)
self.assertEqual(response.status_code, 404)
response = self.client.post(
url, data, HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
self.assertEqual(response.status_code, 404)
@unittest.skipUnless(
connection.vendor == "postgresql", "Test valid only on PostgreSQL"
)
def test_sql_explain_postgres_json_field(self):
url = "/__debug__/sql_explain/"
base_query = (
'SELECT * FROM "tests_postgresjson" WHERE "tests_postgresjson"."field" @>'
)
query = base_query + """ '{"foo": "bar"}'"""
data = {
"sql": query,
"raw_sql": base_query + " %s",
"params": '["{\\"foo\\": \\"bar\\"}"]',
"alias": "default",
"duration": "0",
"hash": "2b7172eb2ac8e2a8d6f742f8a28342046e0d00ba",
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
response = self.client.post(url, data, HTTP_X_REQUESTED_WITH="XMLHttpRequest")
self.assertEqual(response.status_code, 200)
with self.settings(INTERNAL_IPS=[]):
response = self.client.post(url, data)
self.assertEqual(response.status_code, 404)
response = self.client.post(
url, data, HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
self.assertEqual(response.status_code, 404)
def test_sql_profile_checks_show_toolbar(self):
url = "/__debug__/sql_profile/"
data = {
"sql": "SELECT * FROM auth_user",
"raw_sql": "SELECT * FROM auth_user",
"params": "{}",
"alias": "default",
"duration": "0",
"hash": "6e12daa636b8c9a8be993307135458f90a877606",
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
response = self.client.post(url, data, HTTP_X_REQUESTED_WITH="XMLHttpRequest")
self.assertEqual(response.status_code, 200)
with self.settings(INTERNAL_IPS=[]):
response = self.client.post(url, data)
self.assertEqual(response.status_code, 404)
response = self.client.post(
url, data, HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
self.assertEqual(response.status_code, 404)
@override_settings(DEBUG_TOOLBAR_CONFIG={"RENDER_PANELS": True})
def test_data_store_id_not_rendered_when_none(self):
url = "/regular/basic/"
response = self.client.get(url)
self.assertIn(b'id="djDebug"', response.content)
self.assertNotIn(b"data-store-id", response.content)
def test_view_returns_template_response(self):
response = self.client.get("/template_response/basic/")
self.assertEqual(response.status_code, 200)
@override_settings(DEBUG_TOOLBAR_CONFIG={"DISABLE_PANELS": set()})
def test_incercept_redirects(self):
response = self.client.get("/redirect/")
self.assertEqual(response.status_code, 200)
# Link to LOCATION header.
self.assertIn(b'href="/regular/redirect/"', response.content)
def test_server_timing_headers(self):
response = self.client.get("/execute_sql/")
server_timing = response["Server-Timing"]
expected_partials = [
r'TimerPanel_utime;dur=(\d)*(\.(\d)*)?;desc="User CPU time", ',
r'TimerPanel_stime;dur=(\d)*(\.(\d)*)?;desc="System CPU time", ',
r'TimerPanel_total;dur=(\d)*(\.(\d)*)?;desc="Total CPU time", ',
r'TimerPanel_total_time;dur=(\d)*(\.(\d)*)?;desc="Elapsed time", ',
r'SQLPanel_sql_time;dur=(\d)*(\.(\d)*)?;desc="SQL 1 queries", ',
r'CachePanel_total_time;dur=0;desc="Cache 0 Calls"',
]
for expected in expected_partials:
self.assertTrue(re.compile(expected).search(server_timing))
@unittest.skipIf(webdriver is None, "selenium isn't installed")
@unittest.skipUnless(
"DJANGO_SELENIUM_TESTS" in os.environ, "selenium tests not requested"
)
@override_settings(DEBUG=True)
class DebugToolbarLiveTestCase(StaticLiveServerTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.selenium = webdriver.Firefox()
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super().tearDownClass()
def test_basic(self):
self.selenium.get(self.live_server_url + "/regular/basic/")
version_panel = self.selenium.find_element_by_id("VersionsPanel")
# Versions panel isn't loaded
with self.assertRaises(NoSuchElementException):
version_panel.find_element_by_tag_name("table")
# Click to show the versions panel
self.selenium.find_element_by_class_name("VersionsPanel").click()
# Version panel loads
table = WebDriverWait(self.selenium, timeout=10).until(
lambda selenium: version_panel.find_element_by_tag_name("table")
)
self.assertIn("Name", table.text)
self.assertIn("Version", table.text)
@override_settings(
DEBUG_TOOLBAR_CONFIG={
"DISABLE_PANELS": {"debug_toolbar.panels.redirects.RedirectsPanel"}
}
)
def test_basic_jinja(self):
self.selenium.get(self.live_server_url + "/regular_jinja/basic")
template_panel = self.selenium.find_element_by_id("TemplatesPanel")
# Click to show the template panel
self.selenium.find_element_by_class_name("TemplatesPanel").click()
self.assertIn("Templates (1 rendered)", template_panel.text)
self.assertIn("jinja2/basic.jinja", template_panel.text)
@override_settings(
DEBUG_TOOLBAR_CONFIG={
"DISABLE_PANELS": {"debug_toolbar.panels.redirects.RedirectsPanel"}
}
)
def test_rerender_on_history_switch(self):
self.selenium.get(self.live_server_url + "/regular_jinja/basic")
# Make a new request so the history panel has more than one option.
self.selenium.get(self.live_server_url + "/execute_sql/")
template_panel = self.selenium.find_element_by_id("HistoryPanel")
# Record the current side panel of buttons for later comparison.
previous_button_panel = self.selenium.find_element_by_id(
"djDebugPanelList"
).text
# Click to show the history panel
self.selenium.find_element_by_class_name("HistoryPanel").click()
# Click to switch back to the jinja page view snapshot
list(template_panel.find_elements_by_css_selector("button"))[-1].click()
current_button_panel = self.selenium.find_element_by_id("djDebugPanelList").text
# Verify the button side panels have updated.
self.assertNotEqual(previous_button_panel, current_button_panel)
self.assertNotIn("1 query", current_button_panel)
self.assertIn("1 query", previous_button_panel)
@override_settings(DEBUG_TOOLBAR_CONFIG={"RESULTS_CACHE_SIZE": 0})
def test_expired_store(self):
self.selenium.get(self.live_server_url + "/regular/basic/")
version_panel = self.selenium.find_element_by_id("VersionsPanel")
# Click to show the version panel
self.selenium.find_element_by_class_name("VersionsPanel").click()
# Version panel doesn't loads
error = WebDriverWait(self.selenium, timeout=10).until(
lambda selenium: version_panel.find_element_by_tag_name("p")
)
self.assertIn("Data for this panel isn't available anymore.", error.text)
@override_settings(
DEBUG=True,
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"OPTIONS": {
"loaders": [
(
"django.template.loaders.cached.Loader",
(
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
),
)
]
},
}
],
)
def test_django_cached_template_loader(self):
self.selenium.get(self.live_server_url + "/regular/basic/")
version_panel = self.selenium.find_element_by_id("TemplatesPanel")
# Click to show the versions panel
self.selenium.find_element_by_class_name("TemplatesPanel").click()
# Version panel loads
trigger = WebDriverWait(self.selenium, timeout=10).until(
lambda selenium: version_panel.find_element_by_css_selector(".remoteCall")
)
trigger.click()
# Verify the code is displayed
WebDriverWait(self.selenium, timeout=10).until(
lambda selenium: self.selenium.find_element_by_css_selector(
"#djDebugWindow code"
)
)
@override_settings(DEBUG=True)
class DebugToolbarSystemChecksTestCase(SimpleTestCase):
@override_settings(
MIDDLEWARE=[
"django.contrib.messages.middleware.MessageMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.middleware.gzip.GZipMiddleware",
"debug_toolbar.middleware.DebugToolbarMiddleware",
]
)
def test_check_good_configuration(self):
messages = run_checks()
self.assertEqual(messages, [])
@override_settings(
MIDDLEWARE=[
"django.contrib.messages.middleware.MessageMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
]
)
def test_check_missing_middleware_error(self):
messages = run_checks()
self.assertEqual(
messages,
[
Warning(
"debug_toolbar.middleware.DebugToolbarMiddleware is "
"missing from MIDDLEWARE.",
hint="Add debug_toolbar.middleware.DebugToolbarMiddleware "
"to MIDDLEWARE.",
id="debug_toolbar.W001",
)
],
)
@override_settings(
MIDDLEWARE=[
"django.contrib.messages.middleware.MessageMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"debug_toolbar.middleware.DebugToolbarMiddleware",
"django.middleware.gzip.GZipMiddleware",
]
)
def test_check_gzip_middleware_error(self):
messages = run_checks()
self.assertEqual(
messages,
[
Warning(
"debug_toolbar.middleware.DebugToolbarMiddleware occurs "
"before django.middleware.gzip.GZipMiddleware in "
"MIDDLEWARE.",
hint="Move debug_toolbar.middleware.DebugToolbarMiddleware "
"to after django.middleware.gzip.GZipMiddleware in "
"MIDDLEWARE.",
id="debug_toolbar.W003",
)
],
)
@override_settings(
MIDDLEWARE_CLASSES=[
"django.contrib.messages.middleware.MessageMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.middleware.gzip.GZipMiddleware",
"debug_toolbar.middleware.DebugToolbarMiddleware",
]
)
def test_check_middleware_classes_error(self):
messages = run_checks()
self.assertIn(
Warning(
"debug_toolbar is incompatible with MIDDLEWARE_CLASSES setting.",
hint="Use MIDDLEWARE instead of MIDDLEWARE_CLASSES",
id="debug_toolbar.W004",
),
messages,
)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
glintv2/celery_app.py
|
from __future__ import absolute_import, unicode_literals
from celery import Celery
from celery.utils.log import get_task_logger
from django.conf import settings
from .utils import jsonify_image_list, update_pending_transactions, get_images_for_group, set_images_for_group, process_pending_transactions, process_state_changes, queue_state_change, find_image_by_name, check_delete_restrictions, decrement_transactions, get_num_transactions, repo_proccesed, check_for_repo_changes, set_collection_task, check_for_image_conflicts, set_conflicts_for_group, check_cached_images, add_cached_image, do_cache_cleanup
from glintwebui.glint_api import repo_connector
import glintv2.config as config
logger = get_task_logger(__name__)
import os
import time
import redis
import subprocess
# Indicate Celery to use the default Django settings module
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'glintv2.settings')
import django
django.setup()
app = Celery('glintv2', broker=config.celery_url, backend=config.celery_backend)
app.config_from_object('django.conf:settings')
@app.task(bind=True)
def debug_task(self):
logger.debug('Request: {0!r}'.format(self.request))
@app.task(bind=True)
def image_collection(self):
from glintwebui.models import Group_Resources, User_Group, Group
from glintwebui.glint_api import repo_connector
wait_period = 0
term_signal = False
num_tx = get_num_transactions()
#perminant for loop to monitor image states and to queue up tasks
while(True):
# First check for term signal
logger.debug("Term signal: %s" % term_signal)
if term_signal is True:
#term signal detected, break while loop
logger.info("Term signal detected, shutting down")
set_collection_task(False)
return
logger.info("Start Image collection")
group_list = Group.objects.all()
#if there are no active transactions clean up the cache folders
if(num_tx == 0):
do_cache_cleanup()
for group in group_list:
repo_list = Group_Resources.objects.filter(group_name=group.group_name)
image_list = ()
for repo in repo_list:
try:
rcon = repo_connector(auth_url=repo.auth_url, project=repo.tenant, username=repo.username, password=repo.password, user_domain_name=repo.user_domain_name, project_domain_name=repo.project_domain_name, alias=repo.cloud_name)
image_list= image_list + rcon.image_list
except Exception as e:
logger.error(e)
logger.error("Could not connet to repo: %s at %s", (repo.tenant, repo.auth_url))
# take the new json and compare it to the previous one
# and merge the differences, generally the new one will be used but if there are any images awaiting
# transfer or deletion they must be added to the list
updated_img_list = update_pending_transactions(get_images_for_group(group.group_name), jsonify_image_list(image_list=image_list, repo_list=repo_list))
# now we have the most current version of the image matrix for this group
# The last thing that needs to be done here is to proccess the PROJECTX_pending_transactions
logger.info("Processing pending Transactions for group: %s" % group.group_name)
updated_img_list = process_pending_transactions(group_name=group.group_name, json_img_dict=updated_img_list)
logger.info("Proccessing state changes for group: %s" % group.group_name)
updated_img_list = process_state_changes(group_name=group.group_name, json_img_dict=updated_img_list)
set_images_for_group(group_name=group.group_name, json_img_dict=updated_img_list)
# Need to build conflict dictionary to be displayed on matrix page.
# check for image conflicts function returns a dictionary of conflicts, keyed by the repos
conflict_dict = check_for_image_conflicts(json_img_dict=updated_img_list)
set_conflicts_for_group(group_name=group.group_name, conflict_dict=conflict_dict)
logger.info("Image collection complete, entering downtime")#, sleeping for 1 second")
loop_counter = 0
if(num_tx == 0):
wait_period = config.image_collection_interval
else:
wait_period = 0
while(loop_counter<wait_period):
time.sleep(5)
num_tx = get_num_transactions()
#check for new transactions
if(num_tx>0):
break
#check if repos have been added or deleted
if(check_for_repo_changes()):
repo_proccesed()
break
#check if httpd is running
output = subprocess.check_output(['ps', '-A'])
if 'httpd' not in output:
#apache has shut down, time for image collection to do the same
logger.info("httpd offile, terminating")
term_signal = True
break
loop_counter = loop_counter+1
num_tx = get_num_transactions()
# Accepts Image info, project name, and a repo object
# Must find and download the appropriate image (by name) and then upload it
# to the given image ID
@app.task(bind=True)
def transfer_image(self, image_name, image_id, group_name, auth_url, project_tenant, username, password, requesting_user, cloud_name, project_domain_name="Default", user_domain_name="Default"):
logger.info("User %s attempting to transfer %s - %s to repo '%s'" % (requesting_user, image_name, image_id, project_tenant))
# Find image by name in another repo where the state=present
# returns tuple: (auth_url, tenant, username, password, img_id, checksum)
src_img_info = find_image_by_name(group_name=group_name, image_name=image_name)
if src_img_info is False:
logger.error("Could not find suitable source image for transfer, cancelling transfer")
decrement_transactions()
return False
#check if this image is cached locally
image_path = check_cached_images(image_name, src_img_info[5])
if image_path is not None:
logger.info("Found cached copy at: %s uploading image" % image_path)
#upload cached image
image_path = image_path.rsplit('/', 1)[0]+ "/"
logger.info("Uploading Image to %s" % project_tenant)
dest_rcon = repo_connector(auth_url=auth_url, project=project_tenant, username=username, password=password, project_domain_name=project_domain_name, user_domain_name=user_domain_name, alias=repo.cloud_name)
dest_rcon.upload_image(image_id=image_id, image_name=image_name, scratch_dir=image_path)
queue_state_change(group_name=group_name, cloud_name=cloud_name, img_id=image_id, state='Present', hidden=None)
logger.info("Image transfer finished")
decrement_transactions()
return True
else:
logger.info("No cached copy found, downloading image")
# Download img to the cache folder
# First check if a file by this name exists in the cache folder
image_path = "/var/www/glintv2/scratch/" + image_name
valid_path = True
if(os.path.exists(image_path)):
valid_path = False
# Filename exists locally, we need to use a temp folder
for x in range(0,10):
#first check if the temp folder exists
image_path = "/var/www/glintv2/scratch/" + str(x)
if not os.path.exists(image_path):
#create temp folder and break since it is definitly empty
os.makedirs(image_path)
image_path = "/var/www/glintv2/scratch/" + str(x) + "/" + image_file.name
valid_path = True
break
#then check if the file is in that folder
image_path = "/var/www/glintv2/scratch/" + str(x) + "/" + image_file.name
if not os.path.exists(image_path):
valid_path = True
break
# remove file name from path
image_path = image_path.rsplit('/', 1)[0]
image_path = image_path + "/"
logger.info("Downloading Image from %s" % src_img_info[1])
src_rcon = repo_connector(auth_url=src_img_info[0], project=src_img_info[1], username=src_img_info[2], password=src_img_info[3], project_domain_name=src_img_info[6], user_domain_name=src_img_info[7])
src_rcon.download_image(image_name=image_name, image_id=src_img_info[4], scratch_dir=image_path)
logger.info("Image transfer finished")
# Upload said image to the new repo
logger.info("Uploading Image to %s" % project_tenant)
dest_rcon = repo_connector(auth_url=auth_url, project=project_tenant, username=username, password=password, project_domain_name=project_domain_name, user_domain_name=user_domain_name)
dest_rcon.upload_image(image_id=image_id, image_name=image_name, scratch_dir=image_path)
queue_state_change(group_name=group_name, cloud_name=cloud_name, img_id=image_id, state='Present', hidden=None)
image_path = image_path + image_name
add_cached_image(image_name, src_img_info[5], image_path)
decrement_transactions()
return True
# Accepts Image info (name, local path, and format), project name, repo object info, and the requesting user
# Uploads the given image to the target cloud (repo object)
#
@app.task(bind=True)
def upload_image(self, image_name, image_path, group_name, auth_url, project_tenant, username, password, requesting_user, cloud_name, disk_format, container_format, project_domain_name="Default", user_domain_name="Default"):
# Upload said image to the new repo
logger.info("Attempting to upload Image to %s for user: %s" % (project_tenant, requesting_user))
dest_rcon = repo_connector(auth_url=auth_url, project=project_tenant, username=username, password=password, project_domain_name=project_domain_name, user_domain_name=user_domain_name)
image_id = dest_rcon.upload_image(image_id=None, image_name=image_name, scratch_dir=image_path, disk_format=disk_format, container_format=container_format)
img_checksum = dest_rcon.get_checksum(image_id)
if(check_cached_images(image_name, img_checksum) is None):
#image isn't in cache and it's unique add it to the cache list
add_cached_image(image_name, img_checksum, image_path)
logger.info("Image upload finished")
decrement_transactions()
return True
# Accepts image id, project name, and repo object to delete image ID from.
@app.task(bind=True)
def delete_image(self, image_id, image_name, group_name, auth_url, project_tenant, username, password, requesting_user, cloud_name, project_domain_name="Default", user_domain_name="Default"):
logger.info("User %s attempting to delete %s - %s from cloud '%s'" % (requesting_user, image_name, image_id, project_tenant))
if check_delete_restrictions(image_id=image_id, group_name=group_name, cloud_name=cloud_name):
rcon = repo_connector(auth_url=auth_url, project=project_tenant, username=username, password=password, project_domain_name=project_domain_name, user_domain_name=user_domain_name)
result = rcon.delete_image(image_id)
if result:
queue_state_change(group_name=group_name, cloud_name=cloud_name, img_id=image_id, state='deleted', hidden=None)
logger.info("Image Delete finished")
decrement_transactions()
return True
logger.error("Unknown error deleting %s (result = %s)" % (image_id, result))
decrement_transactions()
return False
else:
logger.error("Delete request violates delete rules, image either shared or the last copy.")
queue_state_change(group_name=group_name, cloud_name=cloud_name, img_id=image_id, state='present', hidden=None)
decrement_transactions()
return False
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_windows.py
|
#!/usr/bin/python2
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.script.script import Script
from status_params import *
# server configurations
config = Script.get_config()
# This is expected to be of the form #.#.#.#
stack_version_unformatted = config['clusterLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)
stack_root = None
hive_conf_dir = None
hive_home = None
hive_lib_dir = None
hive_log_dir = None
hive_opts = None
hcat_home = None
hcat_config_dir = None
hive_bin = None
try:
stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
hive_conf_dir = os.environ["HIVE_CONF_DIR"]
hive_home = os.environ["HIVE_HOME"]
hive_lib_dir = os.environ["HIVE_LIB_DIR"]
hive_log_dir = os.environ["HIVE_LOG_DIR"]
hive_opts = os.environ["HIVE_OPTS"]
hcat_home = os.environ["HCAT_HOME"]
hcat_config_dir = os.environ["WEBHCAT_CONF_DIR"]
hive_bin = os.path.join(hive_home, "bin")
except:
pass
hive_env_sh_template = config['configurations']['hive-env']['content']
hive_warehouse_dir = config['configurations']['hive-site']['hive.metastore.warehouse.dir']
hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
hive_user = hadoop_user
hcat_user = hadoop_user
hive_metastore_db_type = config['configurations']['hive-env']['hive_database_type']
hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
hive_execution_engine = config["configurations"]["hive-site"]["hive.execution.engine"]
######## Metastore Schema
init_metastore_schema = not config['configurations']['hive-site']['datanucleus.autoCreateSchema']
service_map = {
"metastore" : hive_metastore_win_service_name,
"client" : hive_client_win_service_name,
"hiveserver2" : hive_server_win_service_name,
"templeton" : webhcat_server_win_service_name
}
|
[] |
[] |
[
"HIVE_LOG_DIR",
"HIVE_CONF_DIR",
"HIVE_HOME",
"WEBHCAT_CONF_DIR",
"HIVE_OPTS",
"HADOOP_HOME",
"HIVE_LIB_DIR",
"HCAT_HOME"
] |
[]
|
["HIVE_LOG_DIR", "HIVE_CONF_DIR", "HIVE_HOME", "WEBHCAT_CONF_DIR", "HIVE_OPTS", "HADOOP_HOME", "HIVE_LIB_DIR", "HCAT_HOME"]
|
python
| 8 | 0 | |
libpod/healthcheck.go
|
package libpod
import (
"bufio"
"bytes"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"github.com/containers/podman/v2/libpod/define"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
// MaxHealthCheckNumberLogs is the maximum number of attempts we keep
// in the healthcheck history file
MaxHealthCheckNumberLogs int = 5
// MaxHealthCheckLogLength in characters
MaxHealthCheckLogLength = 500
)
// hcWriteCloser allows us to use bufio as a WriteCloser
type hcWriteCloser struct {
*bufio.Writer
}
// Used to add a closer to bufio
func (hcwc hcWriteCloser) Close() error {
return nil
}
// HealthCheck verifies the state and validity of the healthcheck configuration
// on the container and then executes the healthcheck
func (r *Runtime) HealthCheck(name string) (define.HealthCheckStatus, error) {
container, err := r.LookupContainer(name)
if err != nil {
return define.HealthCheckContainerNotFound, errors.Wrapf(err, "unable to lookup %s to perform a health check", name)
}
hcStatus, err := checkHealthCheckCanBeRun(container)
if err == nil {
return container.runHealthCheck()
}
return hcStatus, err
}
// runHealthCheck runs the health check as defined by the container
func (c *Container) runHealthCheck() (define.HealthCheckStatus, error) {
var (
newCommand []string
returnCode int
capture bytes.Buffer
inStartPeriod bool
)
hcCommand := c.HealthCheckConfig().Test
if len(hcCommand) < 1 {
return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
}
switch hcCommand[0] {
case "", "NONE":
return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
case "CMD":
newCommand = hcCommand[1:]
case "CMD-SHELL":
// TODO: SHELL command from image not available in Container - use Docker default
newCommand = []string{"/bin/sh", "-c", strings.Join(hcCommand[1:], " ")}
default:
// command supplied on command line - pass as-is
newCommand = hcCommand
}
if len(newCommand) < 1 || newCommand[0] == "" {
return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
}
captureBuffer := bufio.NewWriter(&capture)
hcw := hcWriteCloser{
captureBuffer,
}
streams := new(define.AttachStreams)
streams.OutputStream = hcw
streams.ErrorStream = hcw
streams.InputStream = bufio.NewReader(os.Stdin)
streams.AttachOutput = true
streams.AttachError = true
streams.AttachInput = true
logrus.Debugf("executing health check command %s for %s", strings.Join(newCommand, " "), c.ID())
timeStart := time.Now()
hcResult := define.HealthCheckSuccess
config := new(ExecConfig)
config.Command = newCommand
exitCode, hcErr := c.Exec(config, streams, nil)
if hcErr != nil {
errCause := errors.Cause(hcErr)
hcResult = define.HealthCheckFailure
if errCause == define.ErrOCIRuntimeNotFound ||
errCause == define.ErrOCIRuntimePermissionDenied ||
errCause == define.ErrOCIRuntime {
returnCode = 1
hcErr = nil
} else {
returnCode = 125
}
} else if exitCode != 0 {
hcResult = define.HealthCheckFailure
returnCode = 1
}
timeEnd := time.Now()
if c.HealthCheckConfig().StartPeriod > 0 {
// there is a start-period we need to honor; we add startPeriod to container start time
startPeriodTime := c.state.StartedTime.Add(c.HealthCheckConfig().StartPeriod)
if timeStart.Before(startPeriodTime) {
// we are still in the start period, flip the inStartPeriod bool
inStartPeriod = true
logrus.Debugf("healthcheck for %s being run in start-period", c.ID())
}
}
eventLog := capture.String()
if len(eventLog) > MaxHealthCheckLogLength {
eventLog = eventLog[:MaxHealthCheckLogLength]
}
if timeEnd.Sub(timeStart) > c.HealthCheckConfig().Timeout {
returnCode = -1
hcResult = define.HealthCheckFailure
hcErr = errors.Errorf("healthcheck command exceeded timeout of %s", c.HealthCheckConfig().Timeout.String())
}
hcl := newHealthCheckLog(timeStart, timeEnd, returnCode, eventLog)
if err := c.updateHealthCheckLog(hcl, inStartPeriod); err != nil {
return hcResult, errors.Wrapf(err, "unable to update health check log %s for %s", c.healthCheckLogPath(), c.ID())
}
return hcResult, hcErr
}
func checkHealthCheckCanBeRun(c *Container) (define.HealthCheckStatus, error) {
cstate, err := c.State()
if err != nil {
return define.HealthCheckInternalError, err
}
if cstate != define.ContainerStateRunning {
return define.HealthCheckContainerStopped, errors.Errorf("container %s is not running", c.ID())
}
if !c.HasHealthCheck() {
return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
}
return define.HealthCheckDefined, nil
}
func newHealthCheckLog(start, end time.Time, exitCode int, log string) define.HealthCheckLog {
return define.HealthCheckLog{
Start: start.Format(time.RFC3339Nano),
End: end.Format(time.RFC3339Nano),
ExitCode: exitCode,
Output: log,
}
}
// updatedHealthCheckStatus updates the health status of the container
// in the healthcheck log
func (c *Container) updateHealthStatus(status string) error {
healthCheck, err := c.GetHealthCheckLog()
if err != nil {
return err
}
healthCheck.Status = status
newResults, err := json.Marshal(healthCheck)
if err != nil {
return errors.Wrapf(err, "unable to marshall healthchecks for writing status")
}
return ioutil.WriteFile(c.healthCheckLogPath(), newResults, 0700)
}
// UpdateHealthCheckLog parses the health check results and writes the log
func (c *Container) updateHealthCheckLog(hcl define.HealthCheckLog, inStartPeriod bool) error {
healthCheck, err := c.GetHealthCheckLog()
if err != nil {
return err
}
if hcl.ExitCode == 0 {
// set status to healthy, reset failing state to 0
healthCheck.Status = define.HealthCheckHealthy
healthCheck.FailingStreak = 0
} else {
if len(healthCheck.Status) < 1 {
healthCheck.Status = define.HealthCheckHealthy
}
if !inStartPeriod {
// increment failing streak
healthCheck.FailingStreak += 1
// if failing streak > retries, then status to unhealthy
if healthCheck.FailingStreak >= c.HealthCheckConfig().Retries {
healthCheck.Status = define.HealthCheckUnhealthy
}
}
}
healthCheck.Log = append(healthCheck.Log, hcl)
if len(healthCheck.Log) > MaxHealthCheckNumberLogs {
healthCheck.Log = healthCheck.Log[1:]
}
newResults, err := json.Marshal(healthCheck)
if err != nil {
return errors.Wrapf(err, "unable to marshall healthchecks for writing")
}
return ioutil.WriteFile(c.healthCheckLogPath(), newResults, 0700)
}
// HealthCheckLogPath returns the path for where the health check log is
func (c *Container) healthCheckLogPath() string {
return filepath.Join(filepath.Dir(c.state.RunDir), "healthcheck.log")
}
// GetHealthCheckLog returns HealthCheck results by reading the container's
// health check log file. If the health check log file does not exist, then
// an empty healthcheck struct is returned
func (c *Container) GetHealthCheckLog() (define.HealthCheckResults, error) {
var healthCheck define.HealthCheckResults
if _, err := os.Stat(c.healthCheckLogPath()); os.IsNotExist(err) {
return healthCheck, nil
}
b, err := ioutil.ReadFile(c.healthCheckLogPath())
if err != nil {
return healthCheck, errors.Wrap(err, "failed to read health check log file")
}
if err := json.Unmarshal(b, &healthCheck); err != nil {
return healthCheck, errors.Wrapf(err, "failed to unmarshal existing healthcheck results in %s", c.healthCheckLogPath())
}
return healthCheck, nil
}
// HealthCheckStatus returns the current state of a container with a healthcheck
func (c *Container) HealthCheckStatus() (string, error) {
if !c.HasHealthCheck() {
return "", errors.Errorf("container %s has no defined healthcheck", c.ID())
}
results, err := c.GetHealthCheckLog()
if err != nil {
return "", errors.Wrapf(err, "unable to get healthcheck log for %s", c.ID())
}
return results.Status, nil
}
func (c *Container) disableHealthCheckSystemd() bool {
if os.Getenv("DISABLE_HC_SYSTEMD") == "true" {
return true
}
if c.config.HealthCheckConfig.Interval == 0 {
return true
}
return false
}
|
[
"\"DISABLE_HC_SYSTEMD\""
] |
[] |
[
"DISABLE_HC_SYSTEMD"
] |
[]
|
["DISABLE_HC_SYSTEMD"]
|
go
| 1 | 0 | |
aiven/resource_elasticsearch_acl_rule_test.go
|
package aiven
import (
"fmt"
"os"
"testing"
"github.com/aiven/aiven-go-client"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
)
func TestAccAivenElasticsearchACLRule_basic(t *testing.T) {
resourceName := "aiven_elasticsearch_acl_rule.foo"
rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
ProviderFactories: testAccProviderFactories,
CheckDestroy: testAccCheckAivenElasticsearchACLRuleResourceDestroy,
Steps: []resource.TestStep{
{
Config: testAccElasticsearchACLRuleResource(rName),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(resourceName, "project", os.Getenv("AIVEN_PROJECT_NAME")),
resource.TestCheckResourceAttr(resourceName, "service_name", fmt.Sprintf("test-acc-sr-aclrule-%s", rName)),
resource.TestCheckResourceAttr(resourceName, "index", "test-index"),
resource.TestCheckResourceAttr(resourceName, "username", fmt.Sprintf("user-%s", rName)),
resource.TestCheckResourceAttr(resourceName, "permission", "readwrite"),
),
},
},
})
}
func testAccElasticsearchACLRuleResource(name string) string {
return fmt.Sprintf(`
data "aiven_project" "foo" {
project = "%s"
}
resource "aiven_elasticsearch" "bar" {
project = data.aiven_project.foo.project
cloud_name = "google-europe-west1"
plan = "startup-4"
service_name = "test-acc-sr-aclrule-%s"
maintenance_window_dow = "monday"
maintenance_window_time = "10:00:00"
}
resource "aiven_service_user" "foo" {
service_name = aiven_elasticsearch.bar.service_name
project = data.aiven_project.foo.project
username = "user-%s"
}
resource "aiven_elasticsearch_acl_config" "foo" {
project = data.aiven_project.foo.project
service_name = aiven_elasticsearch.bar.service_name
enabled = true
extended_acl = false
}
resource "aiven_elasticsearch_acl_rule" "foo" {
project = data.aiven_project.foo.project
service_name = aiven_elasticsearch.bar.service_name
username = aiven_service_user.foo.username
index = "test-index"
permission = "readwrite"
}
`, os.Getenv("AIVEN_PROJECT_NAME"), name, name)
}
func testAccCheckAivenElasticsearchACLRuleResourceDestroy(s *terraform.State) error {
c := testAccProvider.Meta().(*aiven.Client)
// loop through the resources in state, verifying each OS ACL is destroyed
for _, rs := range s.RootModule().Resources {
if rs.Type != "aiven_opensearch_acl_rule" {
continue
}
projectName, serviceName, username, index := splitResourceID4(rs.Primary.ID)
r, err := c.ElasticsearchACLs.Get(projectName, serviceName)
if err != nil {
if err.(aiven.Error).Status != 404 {
return err
}
}
if r == nil {
return nil
}
for _, acl := range r.ElasticSearchACLConfig.ACLs {
if acl.Username != username {
continue
}
for _, rule := range acl.Rules {
if rule.Index == index {
return fmt.Errorf("opensearch acl (%s) still exists", rs.Primary.ID)
}
}
}
}
return nil
}
|
[
"\"AIVEN_PROJECT_NAME\"",
"\"AIVEN_PROJECT_NAME\""
] |
[] |
[
"AIVEN_PROJECT_NAME"
] |
[]
|
["AIVEN_PROJECT_NAME"]
|
go
| 1 | 0 | |
Compete/Class of Code/formated_string.py
|
# Problem: https://docs.google.com/document/d/1alGBhMiAn4mX9wCjy7H6Z83arwKeGNG7-iJpjbiyboA/edit?usp=sharing
words_list = input().strip().split()
result = []
for word in words_list:
counting = len([char for char in word if char.isalpha()])
if counting > 0:
result += ['A'+'m'*counting]
result += [word]
print(*result)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
Section 4/source/yolo_detect.py
|
"""
Perform object detection using YOLO v3
on an image, print each detected object -
its class, probability score and bounding box.
"""
from yolo_simple import YOLOSimple
# Turn off Tensorflow debug info
import os
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from sys import argv
if __name__ == '__main__':
# Create a new yolo object
myyo=YOLOSimple()
# Prepare an image to match the model.
img = myyo.prepare_image(argv[1])
# Detect objects.
boxes, scores, classes = myyo.detect(*img)
# Show results.
myyo.print_detected(boxes, scores, classes)
# Clean up the session afeter we've done.
myyo.close_session()
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
packages/at/at/__init__.py
|
#!/usr/bin/env python
# python 2 only, adjust path (or port, I'd love you) as needed
# run @ --help for (a little) more information
# MIT licensed.
"""\
primarily a python eval command. auto-formats the result of your expression:
(> indicates the line contains user input)
> $ @ '1'
1
> $ @ '1 + 1'
2
> $ @ '"hello world"'
hello world
has auto-import; looks through your string for module names.
if it finds them, it imports them. shouldn't cause any issues if you don't
mean to use the module:
> $ @ 'time.time()'
1397224233.76
if you pass -p, prints x for x in <your expression> on lines:
> $ @ '[1, 2, 3, 4]'
[1, 2, 3, 4]
> $ @ -p '[1, 2, 3, 4]'
1
2
3
4
if you pass -l, appends ' for line in lines' to your expression:
> $ @ -pl 'line[::-1]'
> hello world
dlrow olleh
> wheee!
!eeehw
> this is fun!
!nuf si siht
examples from my bash history (try them, they don't bite):
@ '"|".join(lines)'
@ -p 'range(10)'
@ -pl 'line[::-1]
@ '"%s %s" % (datetime.datetime.now(), line)' -plu
@ 'math.sqrt(49012)'
@ 'math.sqrt(150)'
... | @ 'line.decode("utf-8").encode("unicode_escape")' -pl | ...
@ 'x % 5 for x in range(15)'
@ 'dir()' -p
@ 'random.SystemRandom().choice("abcdef0123456789") for x in range(30)' -j
@ 'pprint.pprint(json.loads(inp()))'
@ 'urllib2.urlopen("google.com").read()'
another great one:
@ 'variables'
"""
from __future__ import print_function
from datetime import datetime as dt
from datetime import timedelta as td
from random import *
r=random
import random
try:
import builtins
except ImportError:
import __builtin__ as builtins
import sys
import codecs
import ast
import tokenize
import contextlib
import six
_debug = any((x.startswith("-") and not x.startswith("--") and len(x) < 9 and "d" in x) for x in sys.argv)
def _debugp(*a, **kw):
if _debug:
sys.stdout.flush()
sys.stderr.flush()
kw["file"]=sys.stderr
print(*a, **kw)
sys.stderr.flush()
class _Unbuffered(object):
def __init__(self, stream):
self.stream = stream
self._unbuffered = True
def write(self, data):
if isinstance(data, six.text_type) and not getattr(self.stream, "buffer", None):
data = data.encode("utf-8")
self.stream.write(data)
if self._unbuffered:
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
def _debuffer():
if _debug:
_debugp("_debuffer()")
sys.stdout = _Unbuffered(sys.stdout)
sys.stdout._unbuffered = sys.stdout.isatty()
sys.stderr = _Unbuffered(sys.stderr)
sys.stderr._unbuffered = sys.stderr.isatty()
if _debug:
_debugp("done in _debuffer()")
def succeed():
"Function that exits with a success return code (0)."
sys.exit(0)
def fail():
"Function that exits with a failure return code (1)."
sys.exit(1)
long = getattr(builtins, 'long', int)
class lines(object):
def __init__(self):
self.__doc__ = "Iterable of lines coming in from standard in."
def __iter__(self):
return self
def __repr__(self):
return "<lines - iterable of lines coming from standard in>"
def __str__(self):
return "iterable"
def next(self):
line = sys.stdin.readline()
if type(line) == type(b""):
line = line.decode("utf-8")
if not line:
raise StopIteration
if line.endswith("\n"):
line = line[:-1]
return line
__next__ = next
def inp():
"Returns entire standard in as one string."
res = sys.stdin.read()
if type(res) == type(b""):
return res.decode("utf-8")
return res
def _hash(hasher, text):
instance = hasher()
instance.update(text)
return instance
def md5(text):
"md5(text) - compute hex md5 of input string."
import hashlib
return _hash(hashlib.md5, text).hexdigest()
def sha256(text):
"sha256(text) - compute hex sha256 of input string."
import hashlib
return _hash(hashlib.sha256, text).hexdigest()
def handle(func, *args, **kwargs):
res = kwargs.pop("onerror", None)
exc = kwargs.pop("exc", Exception)
try:
return func(*args, **kwargs)
except exc:
return res
def safejson(*args, **kwargs):
import json
kwargs["exc"] = json.JSONDecodeError
return handle(json.loads, *args, **kwargs)
def pairs(iterable, **kwargs):
"pairs(iterable) -> (s0, s1), (s1,s2), (s2, s3), etc"
from itertools import tee, zip_longest, chain
iterables = []
for key, value in kwargs.items():
if any(x in key for x in ["pre", "head", "begin", "start", "lead", "before", "open", "front", "first", "prepare", "embark", "launch", "create", "go", "push"]):
iterables.append([value])
break
iterables.append(iterable)
for key, value in kwargs.items():
if any(x in key for x in ["post", "tail", "end", "stop", "conclude", "after", "close", "back", "last", "destroy", "halt", "off", "termin", "pop"]):
iterables.append([value])
break
if len(iterables) > 1:
iterable = chain(*iterables)
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def write(contents, value):
sys.stdout.write(contents)
sys.stdout.flush()
return value
_chunks_guard = object()
def chunks(generator, size, pad=_chunks_guard):
"""
chunks(generator, size, pad=<no pad>) - Yield size chunks from generator.
fills the last one with pad if provided.
"""
import itertools
q = itertools.zip_longest(*[iter(generator)]*size, fillvalue=pad)
return ([a for a in b if a is not _chunks_guard] for b in q)
def lerp(x, y, v):
return x + (y - x) * v
def delays(delta, iterable=None):
"delays(secs, i=itertools.repeat(None)) - Wraps iterator with delays."
if iterable is None:
import itertools
iterable = itertools.repeat(None)
if not callable(delta):
if getattr(delta, "total_seconds", None) is not None:
delta = delta.total_seconds()
deltafunc = lambda: delta
else:
deltafunc = delta
for x in iterable:
import time
time.sleep(float(deltafunc()))
yield x
def noexc(lamb, *a, **kw):
default = kw.get("default", None)
try:
return lamb(*a)
except Exception as e:
res = default
if type(default) in [str, unicode]:
return default.format(e=e)
lines = lines()
class chars(object):
def __init__(self):
self.__doc__ = "Standard in, char by char, as an iterator"
self.iterator = self._go()
assert six.next(self.iterator) is None
def __iter__(self):
return self.iterator
def next(self):
return self.iterator.next()
def __repr__(self):
return "<chars - standard in, char by char, as an iterator>"
def __str__(self):
return "".join(self)
def _go(self):
yield None
if sys.stdout.isatty() and sys.stdout._unbuffered:
bufsize = 1
else:
bufsize = 512
while True:
s = sys.stdin.read(bufsize)
if not s:
break
for char in s:
yield char
chars = chars()
def _split_statements(string):
if string is None:
return [], ""
operations = [[]]
i = iter(string.split("\n"))
nextop = lambda: six.next(i)
for type, tokenstring, start, end, line in tokenize.generate_tokens(nextop):
if tokenstring.strip() == ";":
operations.append([])
continue
operations[-1].append((type, tokenstring))
strings = [tokenize.untokenize(o) for o in operations]
return strings[:-1], strings[-1]
def readall(file_like, max=None):
import select
read = b""
while select.select([file_like], [], [], 0)[0] != [] and (max is None or len(read) < max):
read += file_like.read(select.PIPE_BUF)
return read
def paste():
import subprocess
return subprocess.check_output(["pbpaste"])
def readfor(file_like, duration, use_select=True):
import select
import time
read = b""
start = time.time()
end = start + duration
if use_select:
while select.select([file_like], [], [], max(0, end - time.time()))[0] != []:
read += file_like.read(select.PIPE_BUF)
else:
while time.time() < end:
read += file_like.read(select.PIPE_BUF)
return read
read_for = readfor
read_until = readfor
def shell(name="detect"):
if _debug:
_debugp(" => shell(name={!r})".format(name))
if name == "detect":
try:
import bpython
name = "bpython"
except ImportError:
pass
if name == "detect":
try:
import IPython
name = "ipython"
except ImportError:
pass
if name == "detect":
try:
import ptpython
name = "ptpython"
except ImportError:
print("You should totally install ptpython, it's super awesome.")
print("Specify `-i builtin` to shut this suggestion up.")
if name == "detect":
name = "builtin"
if _debug:
_debugp(" => after detect: shell(name={!r})".format(name))
def passthrough(globs, string):
_add_modules(globs, [string])
return string
if name == "ipython":
if _debug:
_debugp(" => loading ipython")
from IPython import embed
from IPython.terminal.embed import InteractiveShellEmbed
def e(globbelses):
orig = InteractiveShellEmbed.run_cell.im_func
def run_cell(self, string, *a, **kw):
return orig(self, passthrough(globbelses, string), *a, **kw)
InteractiveShellEmbed.run_cell = run_cell
return embed(user_ns=globbelses)
# monkeypatch ipython.terminal.embed.[...].run_cell(self, string, ...)
# write with self.push(dict)
return e
elif name == "ptpython":
if _debug:
_debugp(" => loading ptpython")
from ptpython.repl import embed, PythonRepl
def wrap_embed(globs):
orig = PythonRepl._execute.im_func
def _execute(self, cli, string):
return orig(self, cli, passthrough(globs, string))
PythonRepl._execute = _execute
return embed(globs, vi_mode=True)
# monkeypatch ptpython.repl.PythonRepl._execute(self, cli, string)
# write to actual external globals dict
return wrap_embed
elif name == "bpython":
if _debug:
_debugp(" => loading bpython")
from bpython import embed
from bpython import repl
def wrap_embed(globs):
orig = repl.Interpreter.runsource.im_func
def runsource(self, source, *a, **kw):
return orig(self, passthrough(globs, source), *a, **kw)
repl.Interpreter.runsource = runsource
return embed(globs)
return wrap_embed
else:
if _debug:
_debugp(" => loading builtin")
if name != "builtin":
print("warning: don't have interpreter %s, using builtin" % name)
import code
try:
import readline
except ImportError:
pass
def interact(local):
code.interact("Welcome to the @ built-in console.",
readfunc=(lambda prompt: passthrough(local, raw_input(prompt))),
local=local)
return interact
def _parse_args():
global _debug
if _debug:
_debugp("in _parse_args()")
import argparse
class Thingy(argparse.RawDescriptionHelpFormatter,
argparse.ArgumentDefaultsHelpFormatter):
# argparse's api is weird here
pass
parser = argparse.ArgumentParser(epilog=__doc__,
description="Convenient python eval!",
formatter_class=Thingy)
parser.add_argument("-a", "--all", action="store_true",
help="wrap expression in all()")
parser.add_argument("-n", "--any", action="store_true",
help="wrap expression in any()")
parser.add_argument("-l", "--lines", action="store_true",
help="make expression iterable per line")
parser.add_argument("-c", "--chars", action="store_true",
help="make expression iterable per character")
parser.add_argument("-b", "--bool", action="store_true",
help="wrap expression in bool() - important if result will be a bool!")
parser.add_argument("-u", "--unbuffered", action="store_true",
help="shut off output buffering")
parser.add_argument("-p", "--print-each", action="store_true",
help="print each result from the iterable - good with -l pipelining")
parser.add_argument("-j", "--print-joined", action="store_true",
help="print each result from the iterable, no newlines - good for -c pipelining")
parser.add_argument("-v", "--variables", nargs="*",
help='use as -v x="hi" y="$there" to add variables to the expression')
parser.add_argument("string", nargs="*",
help="the expression, automatically joined by space if multiple specified")
parser.add_argument("-d", "--debug", action="store_true",
help='print debug info for type detection')
parser.add_argument("-q", "--quiet", action="store_true",
help='quiet, eg don\'t print on ctrl+c')
parser.add_argument("-i", "--interactive", action="store", default=None,
const="detect", nargs="?",
help='launch interactive mode')
args = parser.parse_args()
_debug = args.debug
if _debug:
_debugp("did initial parse, args:", args)
if args.unbuffered:
sys.stdout._unbuffered = True
sys.stderr._unbuffered = True
if _debug:
_debugp("=> mark unbuffered")
if not args.string and not args.interactive:
args.interactive = 'detect'
if args.interactive:
args.interactive = shell(args.interactive)
if _debug:
_debugp("=> args.interactive: ", args.interactive)
string = " ".join(args.string) if args.string else None
statements, string = _split_statements(string)
if _debug:
_debugp("=> statements: ", args.interactive)
for statement in statements:
_debugp(" ", statement)
_debugp(" <<", string)
if args.print_joined:
args.print_each = True
assert not (args.lines and args.chars), "only -c or -l please"
actions = len([x for x in [
args.all,
args.any,
args.bool,
args.print_each
] if x])
assert actions <= 1, (
"only one of ---all, --any, --bool, or --print_each please")
if args.lines:
if not string.strip():
string = "line"
string = "(%s) for line in lines" % string
if args.chars:
if not string.strip():
string = "char"
string = "(%s) for char in characters" % string
if args.bool:
string = "bool(%s)" % string
if _debug:
_debugp("=> processed final str:")
_debugp(" << {}".format(string))
if not args.variables:
args.variables = []
for var in args.variables:
name, equals, value = var.partition("=")
assert equals, "please put an equals sign in variable defitions"
if _debug:
_debugp("=> add var {!r} = {!r}".format(name, value))
globals()[name] = value
return (statements, string, args.interactive, args.print_each, args.debug,
sys.stdout.write if args.print_joined else print, args.quiet)
_available_names = set()
_available = []
class _Importable(object):
def __init__(self, name, kind):
self.name = name
self.kind = kind
def __repr__(self):
return "<importable %s: %s>" % (self.kind, self.name)
def __str__(self):
return "importable %s" % (self.kind)
_optional_modules = [
"six",
"abc",
"aifc",
"argparse",
"array",
"ast",
"base64",
"BaseHTTPServer",
"bdb",
"binascii",
"binhex",
"bisect",
"bsddb",
"bz2",
"cProfile",
"cStringIO",
"calendar",
"cmath",
"cmd",
"code",
"codecs",
"codeop",
"collections",
"colorsys",
"compileall",
"ConfigParser",
"contextlib",
"Cookie",
"cookielib",
"copy",
"crypt",
"psutil",
"csv",
"ctypes",
"datetime",
"decimal",
"difflib",
"dis",
"distutils",
"doctest",
"email",
"email.charset",
"email.encoders",
"email.errors",
"email.generator",
"email.header",
"email.iterators",
"email.message",
"email.mime",
"email.parser",
"email.utils",
"encodings",
"encodings.idna",
"encodings.utf_8_sig",
"ensurepip",
"errno",
"exceptions",
"fcntl",
"fcntl",
"filecmp",
"fileinput",
"findertools",
"fnmatch",
"formatter",
"fpectl",
"fractions",
"ftplib",
"functools",
"gc",
"getopt",
"getpass",
"gettext",
"glob",
"grp",
"gzip",
"hashlib",
"heapq",
"hmac",
"httplib",
"imaplib",
"imghdr",
"imp",
"importlib",
"inspect",
"io",
"itertools",
"json",
"keyword",
"lib2to3",
"linecache",
"locale",
"logging",
"logging.config",
"logging.handlers",
"mailbox",
"mailcap",
"marshal",
"math",
"mimetypes",
"mmap",
"modulefinder",
"netrc",
"nntplib",
"numbers",
"operator",
"optparse",
"os",
"parser",
"pdb",
"pickle",
"pipes",
"pkgutil",
"platform",
"plistlib",
"poplib",
"pprint",
"profile",
"pstats",
"pty",
"pwd",
"py_compile",
"random",
"re",
"readline",
"resource",
"robotparser",
"runpy",
"sched",
"select",
"shlex",
"shutil",
"signal",
"SimpleHTTPServer",
"SimpleXMLRPCServer",
"site",
"smtpd",
"smtplib",
"sndhdr",
"socket",
"SocketServer",
"sqlite3",
"stat",
"string",
"StringIO",
"stringprep",
"struct",
"subprocess",
"symbol",
"symtable",
"sys",
"sysconfig",
"syslog",
"tabnanny",
"tarfile",
"telnetlib",
"tempfile",
"termios",
"textwrap",
"thread",
"threading",
"time",
"timeit",
"token",
"tokenize",
"trace",
"traceback",
"tty",
"types",
"unicodedata",
"urllib",
"urllib2",
#"urlparse",
"uu",
"uuid",
"warnings",
"wave",
"weakref",
"wsgiref",
"zipfile",
"zlib",
"cffi",
"chardet",
"colorama",
"cryptography",
"curtsies",
"dateutil",
"decorator",
"docopt",
"docutils",
"docx",
"enum",
"exifread",
"flask",
"humanize",
"husl",
"idna",
"jinja2",
"keras",
"klein",
"lxml",
"markdown",
"markupsafe",
"matplotlib",
"mutagen",
"numpy",
"pandas"
"pathtools",
"png",
"progressbar",
"prompt_toolkit",
"pudb",
"py",
"pygments",
"pytest",
"requests",
"scikits",
"scipy",
"setuptools",
"skimage",
"theano",
"treq",
"twisted",
"urwid",
"watchdog",
"yaml",
]
@contextlib.contextmanager
def _mute_stderr():
"Context manager that mutes file descriptor 2"
import os
if not _debug:
#print("muting fd 2 (this was written to sys.stderr)", file=sys.stderr)
#os.write(2, "muting fd 2 (this was written to fd 2)\n")
p = os.dup(fdnum)
q = os.open("/dev/null", os.O_WRONLY)
os.dup2(q, fdnum)
sys.stderr = os.fdopen(p, "w")
#print("just muted fd 2 (this was written to sys.stderr)", file=sys.stderr)
#os.write(2, "just muted fd 2 (this was written to fd 2)\n")
yield
if not _debug:
#print("about to unmute fd 2 (this was written to sys.stderr)", file=sys.stderr)
#os.write(2, "about to unmute fd 2 (this was written to fd 2)\n")
os.dup2(p, fdnum)
os.close(q)
sys.stderr = os.fdopen(fdnum, "w")
#print("unmuting fd 2 (this was written to fd sys.stderr)", file=sys.stderr)
#os.write(2, "unmuting fd 2 (this was written to fd 2)\n")
@contextlib.contextmanager
def _mute_all():
"Context manager that mutes file descriptor 2"
import os
fdnum2 = sys.stderr.fileno()
fdnum1 = sys.stdout.fileno()
if not _debug:
#print("muting fd 2 (this was written to sys.stderr)", file=sys.stderr)
#os.write(2, "muting fd 2 (this was written to fd 2)\n")
p = os.dup(fdnum2)
q = os.open("/dev/null", os.O_WRONLY)
os.dup2(q, fdnum2)
sys.stderr = os.fdopen(p, "w")
if fdnum2 != fdnum1:
w = os.dup(fdnum1)
v = os.open("/dev/null", os.O_WRONLY)
os.dup2(v, fdnum1)
else:
sys.stdout = sys.stderr
#print("just muted fd 2 (this was written to sys.stderr)", file=sys.stderr)
#os.write(2, "just muted fd 2 (this was written to fd 2)\n")
yield
if not _debug:
#print("about to unmute fd 2 (this was written to sys.stderr)", file=sys.stderr)
#os.write(2, "about to unmute fd 2 (this was written to fd 2)\n")
os.dup2(p, fdnum2)
os.close(q)
sys.stderr = os.fdopen(fdnum2, "w")
if fdnum1 != fdnum2:
os.dup2(w, fdnum1)
os.close(v)
sys.stdout = os.fdopen(fdnum1, "w")
else:
sys.stdout = sys.stderr
#print("unmuting fd 2 (this was written to fd sys.stderr)", file=sys.stderr)
#os.write(2, "unmuting fd 2 (this was written to fd 2)\n")
def _add_modules(globbles, strings):
if _debug:
_debugp("=> in _add_modules()")
def _import(_mod):
try:
globbles[_mod] = __import__(_mod)
except ImportError as e:
print(e)
return None
_reset_vars()
return globbles[_mod]
def _reset_vars():
if "variables" in globbles:
globbles["variables"]._cache = None
def _wanted(_mod, kind=None):
if kind is not None and _mod not in _available_names:
_available_names.add(_mod)
_available.append(_Importable(_mod, kind))
# to find all mentions of a module name
# (?:.*(?<!\w)(MODULE_NAME)(?!\w))+(?:.(?!(?<!\w)(MODULE_NAME)(?!\w)))*
# not sure if I need the hack at the end to make sure there are no
# repetitions of the module name...
mod_re = "(?<!\w)(MODULE_NAME)(?!\w)".replace("MODULE_NAME", _mod)
import re
return _mod not in globbles and any(re.search(mod_re, _s) for _s in strings)
if _debug:
_debugp("=> checking optional_modules")
for _mod in _optional_modules:
if _wanted(_mod, "module"):
if _debug:
_debugp("importing module found in code:", _mod)
_import(_mod)
if _wanted("terminal", "pre-initialized blessings instance") or _wanted("blessings", "blessings module") or _wanted("term", "same as `terminal`"):
if _debug:
_debugp("adding terminal/blessings")
blessings = _import("blessings")
globbles["blessings"] = blessings
if blessings and "terminal" not in globbles:
terminal = blessings.Terminal(force_styling=True)
globbles["terminal"] = terminal
globbles["term"] = terminal
# force non-short-circuit evaluation
#a = _wanted("session", "pre-initialized tensorflow session")
#a = _wanted("sess", "pre-initialized tensorflow session") or a
#a = _wanted("s", "pre-initialized tensorflow session") or a
#a = _wanted("tf", "tensorflow module short-name") or a
#a = _wanted("tensorflow", "tensorflow") or a
#if a:
# with _mute_all():
# tf = _import("tensorflow")
# if tf and "session" not in globbles:
# session = tf.InteractiveSession()
# globbles["session"] = session
# globbles["sess"] = session
# globbles["s"] = session
# globbles["tensorflow"] = tf
# globbles["tf"] = tf
# _reset_vars()
if _wanted("np", "numpy module short-name"):
if _debug:
_debugp("adding numpy as np")
numpy = _import("numpy")
if numpy:
globbles["np"] = numpy
for _itertool_func in _itertools_values:
if _wanted(_itertool_func):
if _debug:
_debugp("adding itertools func", _itertool_func)
_itertools = __import__("itertools")
globbles[_itertool_func] = getattr(_itertools, _itertool_func)
_reset_vars()
del _itertool_func
_available_itertools = []
_itertools_values = [
"accumulate",
"count",
"cycle",
"repeat",
"chain",
"compress",
"dropwhile",
"groupby",
"ifilter",
"ifilterfalse",
"islice",
"imap",
"starmap",
"tee",
"takewhile",
"izip",
"izip_longest",
"product",
"permutations",
"combinations",
"combinations_with_replacement",
]
for _itertool_func in _itertools_values:
_available_itertools.append(_Importable(_itertool_func, "itertools function"))
del _itertool_func
_blacklist = [
"_parse_args",
"_chunks_guard",
"_available_names",
"_mute_stderr",
"_mute_all",
"_debug",
"interactive",
"_format_var",
"run_globals",
"_add_environment_vars",
"_available",
"_LazyString",
"_avail",
"__doc__",
"__file__",
"_available_itertools",
"_Importable",
"_itertools_values",
"_wanted",
"_mod",
"_optional_modules",
"_blacklist",
"_run",
"_main",
"_debuffer",
"_string",
"_statements",
"_shouldprint",
"_hash",
"__package__",
"__name__",
"_add_modules",
"_Unbuffered",
"__builtins__",
"_statement",
"_variables",
"_result",
"_split_statements",
"_hasdoc",
"_old_globals",
]
class _LazyString(object):
"""Class for strings created by a function call.
The proxy implementation attempts to be as complete as possible, so that
the lazy objects should mostly work as expected, for example for sorting.
This is most of the implementation of the `speaklater` package on pypi,
copy and pasted. this class is BSD licensed, the rest of this file is MIT.
"""
__slots__ = ('_func', '_args', "_kwargs", "_cache")
def __init__(self, func, *args, **kwargs):
self._func = func
self._args = args
self._kwargs = kwargs
self._cache = None
@property
def value(self):
if self._cache is not None:
return self._cache
self._cache = self._func(*self._args, **self._kwargs)
return self._cache
def __contains__(self, key):
return key in self.value
def __nonzero__(self):
return bool(self.value)
def __dir__(self):
return dir(unicode)
def __iter__(self):
return iter(self.value)
def __len__(self):
return len(self.value)
def __str__(self):
return str(self.value)
def __unicode__(self):
return unicode(self.value)
def __add__(self, other):
return self.value + other
def __radd__(self, other):
return other + self.value
def __mod__(self, other):
return self.value % other
def __rmod__(self, other):
return other % self.value
def __mul__(self, other):
return self.value * other
def __rmul__(self, other):
return other * self.value
def __lt__(self, other):
return self.value < other
def __le__(self, other):
return self.value <= other
def __eq__(self, other):
return self.value == other
def __ne__(self, other):
return self.value != other
def __gt__(self, other):
return self.value > other
def __ge__(self, other):
return self.value >= other
def __getattr__(self, name):
if name == '__members__':
return self.__dir__()
return getattr(self.value, name)
def __getstate__(self):
return self._func, self._args, self._kwargs
def __setstate__(self, tup):
self._func, self._args, self._kwargs = tup
def __getitem__(self, key):
return self.value[key]
def __copy__(self):
return self
def __repr__(self):
print(self)
return ""
#try:
# return "LazyString(%r)" % self._func
#except Exception:
# return '<%s broken>' % self.__class__.__name__
def _hasdoc(value):
doc = getattr(value, "__doc__", None)
return doc != getattr(type(value), "__doc__", None) and doc
def _format_var(name, value, f=None):
def truncate_to_line(v, length=100):
s = v.split("\n")
if len(s) > 1 or len(s[0]) >= length:
v = s[0][:length//2 - 5] + " ... " + s[0][-(length//2 - 6):]
return v
if f is None:
f = "- {name}: {value}"
if type(value) == _LazyString:
simpledesc = "<Lazy String>"
elif type(value) == type(_format_var):
simpledesc = "function"
elif type(value) == type(print):
simpledesc = "builtin"
elif type(value) == type:
simpledesc = "class"
elif type(value) == type(contextlib):
simpledesc = "module"
elif type(value) == type(str.join):
simpledesc = "method"
elif type(value) == type("".join) or type(value) == type(object().__str__) or type(value) == type(chars.__str__):
simpledesc = "bound method"
elif isinstance(type(value), six.string_types):
simpledesc = repr(value)
else:
simpledesc = str(value)
if _hasdoc(value):
value = ("%s - %s" % (simpledesc, value.__doc__.split("\n")[0])).strip()
value = truncate_to_line(value)
else:
value = truncate_to_line(simpledesc)
return f.format(name=name, value=value)
def show(x, all=False):
def f(d, blist):
return "\n".join(
_format_var(name, value)
for name, value in sorted(d.items(), key=lambda x: x[0]))
m = dict((q, getattr(x, q)) for q in dir(x) if all or not q.startswith("_"))
l = 0
if m:
print("Attributes")
print("==============\n")
print(f(m, {}))
l = 1
if _hasdoc(x):
print()
print("Docstring")
print("==============\n")
print(x.__doc__)
l = 1
if l:
print()
print("Value")
print("==============\n")
print(_format_var(None, x, f="{value}"))
def _variables(g, oldglobals, quick=False):
import os
def f(d, blist):
return "\n".join(
_format_var(name, value)
for name, value in sorted(d.items(), key=lambda x: x[0])
if name not in blist)
blist = set(_blacklist)
shell_vars = f(os.environ, blist)
blist |= set(os.environ.keys())
modules = None
iterfuncs = None
if not quick:
modules = f(dict((x.name, g.get(x.name, x)) for x in _available), blist)
else:
modules = f(dict((x.name, g[x.name]) for x in _available if x.name in g), blist)
blist |= set([x.name for x in _available])
if not quick:
iterfuncs = f(dict((x.name, g.get(x.name, x)) for x in _available_itertools), blist)
else:
iterfuncs = f(dict((x.name, g[x.name]) for x in _available_itertools if x.name in g), blist)
blist |= set([x.name for x in _available_itertools])
remaining = f(oldglobals, blist)
blist |= set(oldglobals.keys())
uservars = f(g, blist)
tail = ""
result = ""
if shell_vars:
result += (
"Variables from shell environment\n"
"--------------------------------\n"
"\n"
"{shell_vars}\n"
"\n"
).format(shell_vars=shell_vars)
if modules:
result += (
"Imported and auto-importing modules\n"
"-----------------------------------\n"
"\n"
"'importable' modules will automatically import on use.\n"
"\n"
"{modules}\n"
"\n"
).format(modules=modules)
if iterfuncs:
result += (
"extra: these functions will be auto-imported from itertools:\n"
"\n"
"{iterfuncs}\n"
"\n"
).format(iterfuncs=iterfuncs)
if remaining:
result += (
"Utilities and globals specific to @\n"
"-----------------------------------\n"
"\n"
"{remaining}\n"
).format(remaining=remaining)
if uservars:
result += ("\n"
"Your variables\n"
"==============\n"
"\n"
"{}\n"
).format(uservars)
return result
def _add_environment_vars(glob, outer_dir):
import os
original_globals = dict(glob)
overlap = set(original_globals) & set(os.environ)
overlap2 = set(outer_dir + dir(__builtins__)) & set(os.environ)
if overlap and _debug:
_debugp("WARNING: variable overlap: %r" % overlap)
elif overlap2 and _debug:
_debugp("WARNING: builtin overlap: %r" % overlap2)
glob.update(os.environ)
glob.update(original_globals)
def run(statements, expression, run_globals, _shouldprint, _quiet):
if _debug:
_debugp("in run()")
try:
for statement in statements:
if _debug:
_debugp("exec statement:", statement)
six.exec_(statement, run_globals)
if not expression.strip():
if _debug:
_debugp("no expression to run")
_result = None
else:
if _debug:
_debugp("running expression:", expression)
_result = eval("(%s)" % expression, run_globals)
if _debug:
try:
_debugp("result: repr={!r} str={}".format(_result, _result))
except:
import traceback
_debugp("error printing result:")
traceback.print_exc()
_debugp("----------------")
if "tensorflow" in sys.modules:
import tensorflow
if _debug:
_debugp("tensorflow was imported, checking if tf result")
if isinstance(_result, tensorflow.python.framework.ops.Tensor):
if _debug:
_debugp("tensorflow result detected")
if "session" not in run_globals:
if _debug:
_debugp("no session detected, creating interactivesession as 'session'")
run_globals["session"] = tensorflow.InteractiveSession()
if _debug:
_debugp("run tf _result with", run_globals["session"])
_result = run_globals["session"].run(_result)
if _debug:
_debugp("tf result", _result)
except KeyboardInterrupt:
if not _quiet:
sys.stderr.write("@ killed (ctrl+d to close cleanly)")
return fail
except BrokenPipeError:
raise
except BaseException as e:
import traceback
if _debug:
traceback.print_exc()
else:
x = traceback.format_exc().split("\n")
y = "\n".join(x[4:])
sys.stderr.write(y)
sys.stderr.flush()
return fail
if _result is None:
if _debug:
_debugp("converting result of None to result of True")
_result = True
if not (isinstance(_result, six.string_types) or isinstance(_result, _LazyString)):
if _debug:
_debugp("result is not a string, attempting iteration")
try:
iterator = iter(_result)
except TypeError as e:
if getattr(_result, "__iter__", None):
print("Tried to run as iterator, but it failed with typeerror, despite having an __iter__:")
print(repr(_result.__iter__))
raise
if _debug:
_debugp("result doesn't seem iterable")
else:
if _shouldprint:
for x in iterator:
if _debug:
print("printed iterator:", x)
else:
print(x)
else:
result2 = list(iterator)
try:
# lol hax
is_repeatable_iterable = (
"numpy" in str(type(_result))
or (
iterator is not _result
and result2 == list(iter(_result))
)
)
except ValueError:
# assume yes, because annoying
is_repeatable_iterable = True
if is_repeatable_iterable: # check for repeatability
if _debug:
print("repeatable iterable:", _result, result2)
else:
print(_result)
elif any(x != None for x in result2):
if _debug:
print("listed iterable with at least one non-none:", result2)
else:
print(result2)
elif _debug:
_debugp("nothing to print")
return succeed
if not isinstance(_result, bool) or _shouldprint:
if _debug:
_debugp("hasdoc:", _hasdoc(_result), "repr and str equal:", repr(_result) == str(_result), "uses object str:", type(_result).__str__ == object.__str__)
if (_hasdoc(_result) and repr(_result) == str(_result)) or type(_result).__str__ == object.__str__:
if _debug:
print("printed docstring:", _result.__doc__)
else:
#print(_result.__doc__)
show(_result)
else:
if _debug:
print("primary print:", _result)
else:
print(_result)
if isinstance(_result, bool):
if _debug:
_debugp("bool result, returning exit code:", 0 if _result else 1, _result)
if _result:
return succeed
else:
return fail
else:
if _debug:
_debugp("non-bool result, returning exit code 0 (true)")
return succeed
def _run(_statements, _string, interactive, _shouldprint, _debug, print, _quiet):
import os
if _debug:
_debugp("in _run")
sys.path.append(os.path.abspath("."))
old_globals = dict(globals())
old_globals["all_variables"] = _LazyString(lambda: _variables(run_globals, old_globals))
old_globals["variables"] = _LazyString(lambda: _variables(run_globals, old_globals, quick=True))
_add_modules(old_globals, _statements + [_string])
if _debug:
_debugp("freezing globals")
run_globals = dict(old_globals)
if _debug:
_debugp("adding env vars")
_add_environment_vars(run_globals, list(old_globals.keys()))
if _string.strip() or _statements:
if _debug:
_debugp("main run...")
try:
result = run(_statements, _string, run_globals, _shouldprint, _quiet)
except SystemExit:
if not interactive:
raise
else:
if not interactive:
result()
if interactive:
if _debug:
_debugp("running interpreter with globals")
interactive(run_globals)
def _main():
global print
try:
_debuffer()
_statements, _string, interactive, _shouldprint, _debug, print, _quiet = _parse_args()
if _debug:
_debugp("_parse_args done. _shouldprint={}, _quiet={}".format(_shouldprint, _quiet))
_run(_statements, _string, interactive, _shouldprint, _debug, print, _quiet)
except BrokenPipeError:
pass
if _debug:
_debugp("before _main(); __name__ =", __name__)
if __name__ == "__main__":
_main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
molecule/default/tests/test_default.py
|
# -*- coding: utf-8 -*-
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_hosts_file(host):
f = host.file('/etc/hosts')
assert f.exists
assert f.user == 'root'
assert f.group == 'root'
def test_packages(host):
assert host.package('hd-idle').is_installed
assert host.package('hd-idle').version.startswith('1.05')
def test_service(host):
assert host.service('hd-idle').is_running
assert host.service('hd-idle').is_enabled
|
[] |
[] |
[
"MOLECULE_INVENTORY_FILE"
] |
[]
|
["MOLECULE_INVENTORY_FILE"]
|
python
| 1 | 0 | |
src/java/com/vaklinov/zcashui/ZCashUI.java
|
/************************************************************************************************
* _________ _ ____ _ __ __ _ _ _ _ _ ___
* |__ / ___|__ _ ___| |__ / ___|_ _(_)_ __ __ \ \ / /_ _| | | ___| |_| | | |_ _|
* / / | / _` / __| '_ \\___ \ \ /\ / / | '_ \ / _` \ \ /\ / / _` | | |/ _ \ __| | | || |
* / /| |__| (_| \__ \ | | |___) \ V V /| | | | | (_| |\ V V / (_| | | | __/ |_| |_| || |
* /____\____\__,_|___/_| |_|____/ \_/\_/ |_|_| |_|\__, | \_/\_/ \__,_|_|_|\___|\__|\___/|___|
* |___/
*
* Copyright (c) 2016 Ivan Vaklinov <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
**********************************************************************************/
package com.vaklinov.zcashui;
import java.awt.*;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.awt.event.KeyEvent;
import java.awt.event.WindowAdapter;
import java.awt.event.WindowEvent;
import java.io.*;
import java.security.SecureRandom;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.Date;
import java.util.Random;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.List;
import javax.swing.ImageIcon;
import javax.swing.JFrame;
import javax.swing.JMenu;
import javax.swing.JMenuBar;
import javax.swing.JMenuItem;
import javax.swing.JOptionPane;
import javax.swing.JTabbedPane;
import javax.swing.KeyStroke;
import javax.swing.SwingUtilities;
import javax.swing.UIManager;
import javax.swing.UIManager.LookAndFeelInfo;
import javax.swing.event.ChangeEvent;
import javax.swing.event.ChangeListener;
import com.vaklinov.zcashui.OSUtil.OS_TYPE;
import com.vaklinov.zcashui.ZCashClientCaller.NetworkAndBlockchainInfo;
import com.vaklinov.zcashui.ZCashClientCaller.WalletCallException;
import com.vaklinov.zcashui.ZCashInstallationObserver.DAEMON_STATUS;
import com.vaklinov.zcashui.ZCashInstallationObserver.DaemonInfo;
import com.vaklinov.zcashui.ZCashInstallationObserver.InstallationDetectionException;
import com.vaklinov.zcashui.msg.MessagingPanel;
/**
* Main ACASH Window.
*
* @author Ivan Vaklinov <[email protected]>
*/
public class ZCashUI
extends JFrame
{
private ZCashInstallationObserver installationObserver;
private ZCashClientCaller clientCaller;
private StatusUpdateErrorReporter errorReporter;
private WalletOperations walletOps;
private JMenuItem menuItemExit;
private JMenuItem menuItemAbout;
//private JMenuItem menuItemEncrypt;
private JMenuItem menuItemBackup;
private JMenuItem menuItemExportKeys;
private JMenuItem menuItemImportKeys;
private JMenuItem menuItemShowPrivateKey;
private JMenuItem menuItemImportOnePrivateKey;
private JMenuItem menuItemOwnIdentity;
private JMenuItem menuItemExportOwnIdentity;
private JMenuItem menuItemImportContactIdentity;
private JMenuItem menuItemAddMessagingGroup;
private JMenuItem menuItemRemoveContactIdentity;
private JMenuItem menuItemMessagingOptions;
private JMenuItem menuItemShareFileViaIPFS;
private DashboardPanel dashboard;
private AddressesPanel addresses;
private SendCashPanel sendPanel;
private AddressBookPanel addressBookPanel;
private MessagingPanel messagingPanel;
private List<Image> imageList;
JTabbedPane tabs;
public static final String CHARACTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789~`!@#$%^&*()-_=+[{]}\\|;:\'\",<.>/?";
public ZCashUI(StartupProgressDialog progressDialog)
throws IOException, InterruptedException, WalletCallException
{
super("ACASH Wallet 2.0.3");
if (progressDialog != null)
{
progressDialog.setProgressText("Starting GUI wallet...");
}
ClassLoader cl = this.getClass().getClassLoader();
imageList = new ArrayList();
imageList.add(new ImageIcon(cl.getResource("images/acash20x20.png")).getImage());
imageList.add(new ImageIcon(cl.getResource("images/acash30x30.png")).getImage());
imageList.add(new ImageIcon(cl.getResource("images/acash38x38.png")).getImage());
imageList.add(new ImageIcon(cl.getResource("images/acash40x40.png")).getImage());
imageList.add(new ImageIcon(cl.getResource("images/acash60x60.png")).getImage());
imageList.add(new ImageIcon(cl.getResource("images/acash80x80.png")).getImage());
imageList.add(new ImageIcon(cl.getResource("images/acash120x120.png")).getImage());
imageList.add(new ImageIcon(cl.getResource("images/ACASH.png")).getImage());
this.setIconImages(imageList);
Container contentPane = this.getContentPane();
errorReporter = new StatusUpdateErrorReporter(this);
installationObserver = new ZCashInstallationObserver(OSUtil.getProgramDirectory());
clientCaller = new ZCashClientCaller(OSUtil.getProgramDirectory());
if (installationObserver.isOnTestNet())
{
this.setTitle(this.getTitle() + " [using TESTNET]");
}
// Build content
tabs = new JTabbedPane();
Font oldTabFont = tabs.getFont();
Font newTabFont = new Font(oldTabFont.getName(), Font.BOLD | Font.ITALIC, oldTabFont.getSize() * 57 / 50);
tabs.setFont(newTabFont);
BackupTracker backupTracker = new BackupTracker(this);
tabs.addTab("Overview ",
new ImageIcon(cl.getResource("images/overview.png")),
dashboard = new DashboardPanel(this, installationObserver, clientCaller,
errorReporter, backupTracker));
tabs.addTab("Own addresses ",
new ImageIcon(cl.getResource("images/own-addresses.png")),
addresses = new AddressesPanel(this, clientCaller, errorReporter));
tabs.addTab("Send cash ",
new ImageIcon(cl.getResource("images/send.png")),
sendPanel = new SendCashPanel(clientCaller, errorReporter, installationObserver, backupTracker));
tabs.addTab("Address book ",
new ImageIcon(cl.getResource("images/address-book.png")),
addressBookPanel = new AddressBookPanel(sendPanel, tabs));
tabs.addTab("Messaging ",
new ImageIcon(cl.getResource("images/messaging.png")),
messagingPanel = new MessagingPanel(this, sendPanel, tabs, clientCaller, errorReporter));
contentPane.add(tabs);
this.walletOps = new WalletOperations(
this, tabs, dashboard, addresses, sendPanel,
installationObserver, clientCaller, errorReporter, backupTracker);
int width = 1024;
OS_TYPE os = OSUtil.getOSType();
// Window needs to be larger on Mac/Windows - typically
if ((os == OS_TYPE.WINDOWS) || (os == OS_TYPE.MAC_OS))
{
width = 1024;
}
this.setSize(new Dimension(width, 600));
// Build menu
JMenuBar mb = new JMenuBar();
JMenu file = new JMenu("Main");
file.setMnemonic(KeyEvent.VK_M);
int accelaratorKeyMask = Toolkit.getDefaultToolkit ().getMenuShortcutKeyMask();
file.add(menuItemAbout = new JMenuItem("About...", KeyEvent.VK_T));
menuItemAbout.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_T, accelaratorKeyMask));
file.addSeparator();
file.add(menuItemExit = new JMenuItem("Quit", KeyEvent.VK_Q));
menuItemExit.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_Q, accelaratorKeyMask));
mb.add(file);
JMenu wallet = new JMenu("Wallet");
wallet.setMnemonic(KeyEvent.VK_W);
wallet.add(menuItemBackup = new JMenuItem("Backup...", KeyEvent.VK_B));
menuItemBackup.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_B, accelaratorKeyMask));
//wallet.add(menuItemEncrypt = new JMenuItem("Encrypt...", KeyEvent.VK_E));
//menuItemEncrypt.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_E, accelaratorKeyMask));
wallet.add(menuItemExportKeys = new JMenuItem("Export private keys...", KeyEvent.VK_K));
menuItemExportKeys.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_K, accelaratorKeyMask));
wallet.add(menuItemImportKeys = new JMenuItem("Import private keys...", KeyEvent.VK_I));
menuItemImportKeys.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_I, accelaratorKeyMask));
wallet.add(menuItemShowPrivateKey = new JMenuItem("Show private key...", KeyEvent.VK_P));
menuItemShowPrivateKey.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_P, accelaratorKeyMask));
wallet.add(menuItemImportOnePrivateKey = new JMenuItem("Import one private key...", KeyEvent.VK_N));
menuItemImportOnePrivateKey.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_N, accelaratorKeyMask));
JMenuItem menuItemOpenWalletDirectory = new JMenuItem("Open wallet directory...");
wallet.add(menuItemOpenWalletDirectory);
mb.add(wallet);
JMenu messaging = new JMenu("Messaging");
messaging.setMnemonic(KeyEvent.VK_S);
messaging.add(menuItemOwnIdentity = new JMenuItem("Own identity...", KeyEvent.VK_D));
menuItemOwnIdentity.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_D, accelaratorKeyMask));
messaging.add(menuItemExportOwnIdentity = new JMenuItem("Export own identity...", KeyEvent.VK_X));
menuItemExportOwnIdentity.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_X, accelaratorKeyMask));
messaging.add(menuItemAddMessagingGroup = new JMenuItem("Add messaging group...", KeyEvent.VK_G));
menuItemAddMessagingGroup.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_G, accelaratorKeyMask));
messaging.add(menuItemImportContactIdentity = new JMenuItem("Import contact identity...", KeyEvent.VK_Y));
menuItemImportContactIdentity.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_Y, accelaratorKeyMask));
messaging.add(menuItemRemoveContactIdentity = new JMenuItem("Remove contact...", KeyEvent.VK_R));
menuItemRemoveContactIdentity.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_R, accelaratorKeyMask));
messaging.add(menuItemMessagingOptions = new JMenuItem("Options...", KeyEvent.VK_O));
menuItemMessagingOptions.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_O, accelaratorKeyMask));
JMenu shareFileVia = new JMenu("Share file via:");
shareFileVia.setMnemonic(KeyEvent.VK_V);
// TODO: uncomment this for IPFS integration
//messaging.add(shareFileVia);
shareFileVia.add(menuItemShareFileViaIPFS = new JMenuItem("IPFS", KeyEvent.VK_F));
menuItemShareFileViaIPFS.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_F, accelaratorKeyMask));
mb.add(messaging);
// TODO: Temporarily disable encryption until further notice - Oct 24 2016
//menuItemEncrypt.setEnabled(false);
this.setJMenuBar(mb);
// Add listeners etc.
menuItemExit.addActionListener(
new ActionListener()
{
@Override
public void actionPerformed(ActionEvent e)
{
ZCashUI.this.exitProgram();
}
}
);
menuItemAbout.addActionListener(
new ActionListener()
{
@Override
public void actionPerformed(ActionEvent e)
{
try
{
AboutDialog ad = new AboutDialog(ZCashUI.this);
ad.setVisible(true);
} catch (UnsupportedEncodingException uee)
{
Log.error("Unexpected error: ", uee);
ZCashUI.this.errorReporter.reportError(uee);
}
}
}
);
menuItemBackup.addActionListener(
new ActionListener()
{
@Override
public void actionPerformed(ActionEvent e)
{
ZCashUI.this.walletOps.backupWallet();
}
}
);
// menuItemEncrypt.addActionListener(
// new ActionListener()
// {
// @Override
// public void actionPerformed(ActionEvent e)
// {
// ZCashUI.this.walletOps.encryptWallet();
// }
// }
// );
menuItemExportKeys.addActionListener(
new ActionListener()
{
@Override
public void actionPerformed(ActionEvent e)
{
ZCashUI.this.walletOps.exportWalletPrivateKeys();
}
}
);
menuItemImportKeys.addActionListener(
new ActionListener()
{
@Override
public void actionPerformed(ActionEvent e)
{
ZCashUI.this.walletOps.importWalletPrivateKeys();
}
}
);
menuItemShowPrivateKey.addActionListener(
new ActionListener()
{
@Override
public void actionPerformed(ActionEvent e)
{
ZCashUI.this.walletOps.showPrivateKey();
}
}
);
menuItemImportOnePrivateKey.addActionListener(
new ActionListener()
{
@Override
public void actionPerformed(ActionEvent e)
{
ZCashUI.this.walletOps.importSinglePrivateKey();
}
}
);
menuItemOpenWalletDirectory.addActionListener(e -> {
try {
Desktop.getDesktop().open(new File(OSUtil.getBlockchainDirectory()));
} catch (IOException e1) {
Log.error("Open wallet directory exception.", e1);
}
}
);
menuItemOwnIdentity.addActionListener(
new ActionListener()
{
@Override
public void actionPerformed(ActionEvent e)
{
ZCashUI.this.messagingPanel.openOwnIdentityDialog();
}
}
);
menuItemExportOwnIdentity.addActionListener(
new ActionListener()
{
@Override
public void actionPerformed(ActionEvent e)
{
ZCashUI.this.messagingPanel.exportOwnIdentity();
}
}
);
menuItemImportContactIdentity.addActionListener(
new ActionListener()
{
@Override
public void actionPerformed(ActionEvent e)
{
ZCashUI.this.messagingPanel.importContactIdentity();
}
}
);
menuItemAddMessagingGroup.addActionListener(
new ActionListener()
{
@Override
public void actionPerformed(ActionEvent e)
{
ZCashUI.this.messagingPanel.addMessagingGroup();
}
}
);
menuItemRemoveContactIdentity.addActionListener(
new ActionListener()
{
@Override
public void actionPerformed(ActionEvent e)
{
ZCashUI.this.messagingPanel.removeSelectedContact();
}
}
);menuItemMessagingOptions.addActionListener(
new ActionListener()
{
@Override
public void actionPerformed(ActionEvent e)
{
ZCashUI.this.messagingPanel.openOptionsDialog();
}
}
);
menuItemShareFileViaIPFS.addActionListener(
new ActionListener()
{
@Override
public void actionPerformed(ActionEvent e)
{
ZCashUI.this.messagingPanel.shareFileViaIPFS();
}
}
);
// Close operation
this.setDefaultCloseOperation(DO_NOTHING_ON_CLOSE);
this.addWindowListener(new WindowAdapter()
{
@Override
public void windowClosing(WindowEvent e)
{
ZCashUI.this.exitProgram();
}
});
// Show initial message
SwingUtilities.invokeLater(new Runnable()
{
public void run()
{
try
{
String userDir = OSUtil.getSettingsDirectory();
File warningFlagFile = new File(userDir + File.separator + "initialInfoShown_0.75.flag");
if (warningFlagFile.exists())
{
return;
} else
{
warningFlagFile.createNewFile();
}
} catch (IOException ioe)
{
/* TODO: report exceptions to the user */
Log.error("Unexpected error: ", ioe);
}
JOptionPane.showMessageDialog(
ZCashUI.this.getRootPane().getParent(),
"The ACASH GUI Wallet is currently considered experimental. Use of this software\n" +
"comes at your own risk! Be sure to read the list of known issues and limitations\n" +
"at this page: https://github.com/acashcrypto/acash-wallet\n\n" +
"THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n" +
"IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n" +
"FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n" +
"AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n" +
"LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n" +
"OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n" +
"THE SOFTWARE.\n\n" +
"(This message will be shown only once, per release)",
"Disclaimer", JOptionPane.INFORMATION_MESSAGE);
}
});
// Finally dispose of the progress dialog
if (progressDialog != null)
{
progressDialog.doDispose();
}
// Notify the messaging TAB that it is being selected - every time
tabs.addChangeListener(
new ChangeListener()
{
@Override
public void stateChanged(ChangeEvent e)
{
JTabbedPane tabs = (JTabbedPane)e.getSource();
if (tabs.getSelectedIndex() == 4)
{
ZCashUI.this.messagingPanel.tabSelected();
}
}
}
);
}
public void exitProgram()
{
Log.info("Exiting ...");
this.setCursor(Cursor.getPredefinedCursor(Cursor.WAIT_CURSOR));
this.dashboard.stopThreadsAndTimers();
this.addresses.stopThreadsAndTimers();
this.sendPanel.stopThreadsAndTimers();
this.messagingPanel.stopThreadsAndTimers();
ZCashUI.this.setVisible(false);
ZCashUI.this.dispose();
System.exit(0);
}
public static void main(String argv[])
throws IOException
{
try
{
OS_TYPE os = OSUtil.getOSType();
if ((os == OS_TYPE.WINDOWS) || (os == OS_TYPE.MAC_OS))
{
possiblyCreateZENConfigFile();
}
Log.info("Starting ACASH Wallet ...");
Log.info("OS: " + System.getProperty("os.name") + " = " + os);
Log.info("Current directory: " + new File(".").getCanonicalPath());
Log.info("Class path: " + System.getProperty("java.class.path"));
Log.info("Environment PATH: " + System.getenv("PATH"));
// Look and feel settings - a custom OS-look and feel is set for Windows
if (os == OS_TYPE.WINDOWS)
{
// Custom Windows L&F and font settings
UIManager.setLookAndFeel("com.sun.java.swing.plaf.windows.WindowsLookAndFeel");
// This font looks good but on Windows 7 it misses some chars like the stars...
//FontUIResource font = new FontUIResource("Lucida Sans Unicode", Font.PLAIN, 11);
//UIManager.put("Table.font", font);
} else if (os == OS_TYPE.MAC_OS)
{
// The MacOS L&F is active by default - the property sets the menu bar Mac style
System.setProperty("apple.laf.useScreenMenuBar", "true");
}
else
{
for (LookAndFeelInfo ui : UIManager.getInstalledLookAndFeels())
{
Log.info("Available look and feel: " + ui.getName() + " " + ui.getClassName());
if (ui.getName().equals("Nimbus"))
{
Log.info("Setting look and feel: {0}", ui.getClassName());
UIManager.setLookAndFeel(ui.getClassName());
break;
};
}
}
// If acashd is currently not running, do a startup of the daemon as a child process
// It may be started but not ready - then also show dialog
ZCashInstallationObserver initialInstallationObserver =
new ZCashInstallationObserver(OSUtil.getProgramDirectory());
DaemonInfo zcashdInfo = initialInstallationObserver.getDaemonInfo();
initialInstallationObserver = null;
ZCashClientCaller initialClientCaller = new ZCashClientCaller(OSUtil.getProgramDirectory());
boolean daemonStartInProgress = false;
try
{
if (zcashdInfo.status == DAEMON_STATUS.RUNNING)
{
NetworkAndBlockchainInfo info = initialClientCaller.getNetworkAndBlockchainInfo();
// If more than 20 minutes behind in the blockchain - startup in progress
if ((System.currentTimeMillis() - info.lastBlockDate.getTime()) > (20 * 60 * 1000))
{
Log.info("Current blockchain synchronization date is " +
new Date(info.lastBlockDate.getTime()));
daemonStartInProgress = true;
}
}
} catch (WalletCallException wce)
{
if ((wce.getMessage().indexOf("{\"code\":-28") != -1) || // Started but not ready
(wce.getMessage().indexOf("error code: -28") != -1))
{
Log.info("acashd is currently starting...");
daemonStartInProgress = true;
}
}
StartupProgressDialog startupBar = null;
if ((zcashdInfo.status != DAEMON_STATUS.RUNNING) || (daemonStartInProgress))
{
Log.info(
"acashd is not runing at the moment or has not started/synchronized 100% - showing splash...");
startupBar = new StartupProgressDialog(initialClientCaller);
startupBar.setVisible(true);
startupBar.waitForStartup();
}
initialClientCaller = null;
// Main GUI is created here
ZCashUI ui = new ZCashUI(startupBar);
ui.setVisible(true);
} catch (InstallationDetectionException ide)
{
Log.error("Unexpected error: ", ide);
JOptionPane.showMessageDialog(
null,
"This program was started in directory: " + OSUtil.getProgramDirectory() + "\n" +
ide.getMessage() + "\n" +
"See the console/logfile output for more detailed error information!",
"Installation error",
JOptionPane.ERROR_MESSAGE);
System.exit(1);
} catch (WalletCallException wce)
{
Log.error("Unexpected error: ", wce);
if ((wce.getMessage().indexOf("{\"code\":-28,\"message\"") != -1) ||
(wce.getMessage().indexOf("error code: -28") != -1))
{
JOptionPane.showMessageDialog(
null,
"It appears that acashd has been started but is not ready to accept wallet\n" +
"connections. It is still loading the wallet and blockchain. Please try to \n" +
"start the GUI wallet later...",
"Wallet communication error",
JOptionPane.ERROR_MESSAGE);
} else
{
JOptionPane.showMessageDialog(
null,
"There was a problem communicating with the ACASH daemon/wallet. \n" +
"Please ensure that the ACASH server acashd is started (e.g. via \n" +
"command \"acashd --daemon\"). Error message is: \n" +
wce.getMessage() +
"See the console/logfile output for more detailed error information!",
"Wallet communication error",
JOptionPane.ERROR_MESSAGE);
}
System.exit(2);
} catch (Exception e)
{
Log.error("Unexpected error: ", e);
JOptionPane.showMessageDialog(
null,
"A general unexpected critical error has occurred: \n" + e.getMessage() + "\n" +
"See the console/logfile output for more detailed error information!",
"Error",
JOptionPane.ERROR_MESSAGE);
System.exit(3);
} catch (Error err)
{
// Last resort catch for unexpected problems - just to inform the user
err.printStackTrace();
JOptionPane.showMessageDialog(
null,
"A general unexpected critical/unrecoverable error has occurred: \n" + err.getMessage() + "\n" +
"See the console/logfile output for more detailed error information!",
"Error",
JOptionPane.ERROR_MESSAGE);
System.exit(4);
}
}
private static String getPassword(Random random) {
StringBuilder pass = new StringBuilder();
IntStream.range(0, 15)
.forEach(i -> pass.append(Character.toString(CHARACTERS.charAt(random.nextInt(CHARACTERS.length())))));
return pass.toString();
}
public static void possiblyCreateZENConfigFile()
throws IOException
{
String blockchainDir = OSUtil.getBlockchainDirectory();
File dir = new File(blockchainDir);
if (!dir.exists())
{
if (!dir.mkdirs())
{
Log.error("ERROR: Could not create settings directory: " + dir.getCanonicalPath());
throw new IOException("Could not create settings directory: " + dir.getCanonicalPath());
}
}
File zenConfigFile = new File(dir, "acash.conf");
if (!zenConfigFile.exists())
{
Log.info("ACASH configuration file " + zenConfigFile.getCanonicalPath() +
" does not exist. It will be created with default settings.");
Random random = new SecureRandom();
PrintStream configOut = new PrintStream(new FileOutputStream(zenConfigFile));
configOut.println("#############################################################################");
configOut.println("# ACASH configuration file #");
configOut.println("#############################################################################");
configOut.println("# This file has been automatically generated by the ACASH GUI wallet with #");
configOut.println("# default settings. It may be further cutsomized by hand only. #");
configOut.println("#############################################################################");
configOut.println("# Creation date: " + new Date().toString());
configOut.println("#############################################################################");
configOut.println("");
configOut.println("# The rpcuser/rpcpassword are used for the local call to acashd");
configOut.println("rpcuser=User" + getPassword(random));
configOut.println("rpcpassword=Pass" + getPassword(random));
configOut.println("");
configOut.close();
}
}
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
java
| 1 | 0 | |
cmd/analytics.go
|
package cmd
import (
"context"
"net/http"
"os"
"time"
)
func sendAnalytics() {
if os.Getenv("DECK_ANALYTICS") == "off" {
return
}
if len(os.Args) < 2 {
return
}
cmd := os.Args[1]
if cmd == "help" ||
cmd == "ping" ||
cmd == "version" {
return
}
// HTTP to avoid latency due to handshake
URL := "http://d.yolo42.com/" + cmd
ctx, _ := context.WithDeadline(context.Background(),
time.Now().Add(3*time.Second))
req, _ := http.NewRequestWithContext(ctx, "GET", URL, nil)
req.Header["deck-version"] = []string{VERSION}
http.DefaultClient.Do(req)
}
|
[
"\"DECK_ANALYTICS\""
] |
[] |
[
"DECK_ANALYTICS"
] |
[]
|
["DECK_ANALYTICS"]
|
go
| 1 | 0 | |
cli/config/configdir.go
|
package config // import "github.com/demonoid81/moby/cli/config"
import (
"os"
"path/filepath"
"github.com/demonoid81/moby/pkg/homedir"
)
var (
configDir = os.Getenv("DOCKER_CONFIG")
configFileDir = ".docker"
)
// Dir returns the path to the configuration directory as specified by the DOCKER_CONFIG environment variable.
// If DOCKER_CONFIG is unset, Dir returns ~/.docker .
// Dir ignores XDG_CONFIG_HOME (same as the docker client).
// TODO: this was copied from cli/config/configfile and should be removed once cmd/dockerd moves
func Dir() string {
return configDir
}
func init() {
if configDir == "" {
configDir = filepath.Join(homedir.Get(), configFileDir)
}
}
|
[
"\"DOCKER_CONFIG\""
] |
[] |
[
"DOCKER_CONFIG"
] |
[]
|
["DOCKER_CONFIG"]
|
go
| 1 | 0 | |
hack/toolbox/server/server.go
|
package main
import (
"fmt"
"math/rand"
"net"
"net/http"
"os"
"strconv"
"strings"
"time"
)
const (
httpport = 8080
tcp = "tcp"
tcpport = 8085
udp = "udp"
udpport = 8086
buffersize = 1024
)
func main() {
tcpPort, err := strconv.Atoi(os.Getenv("TCP_PORT"))
if err != nil {
tcpPort = tcpport
fmt.Printf("TCP_PORT not set, defaulting to port %d\n", tcpport)
}
udpPort, err := strconv.Atoi(os.Getenv("UDP_PORT"))
if err != nil {
udpPort = udpport
fmt.Printf("UDP_PORT not set, defaulting to port %d\n", udpport)
}
httpPort, err := strconv.Atoi(os.Getenv("HTTP_PORT"))
if err != nil {
httpPort = httpport
fmt.Printf("HTTP_PORT not set, defaulting to port %d\n", httpport)
}
go listenOnUDP(udpPort)
go listenOnTCP(tcpPort)
listenHTTP(httpPort)
}
func listenHTTP(port int) {
http.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) {
fmt.Printf("[HTTP] Received Connection from %v\n", r.RemoteAddr)
_, err := rw.Write(getResponse(r.RemoteAddr, "http"))
if err != nil {
fmt.Println(err)
}
})
p := strconv.Itoa(port)
fmt.Printf("[HTTP] Listening on %+v\n", p)
if err := http.ListenAndServe(":"+p, nil); err != nil {
panic(err)
}
}
func listenOnTCP(port int) {
listener, err := net.ListenTCP(tcp, &net.TCPAddr{Port: port})
if err != nil {
fmt.Println(err)
return
}
defer listener.Close()
fmt.Printf("[TCP] Listening on %+v\n", listener.Addr().String())
rand.Seed(time.Now().Unix())
for {
connection, err := listener.Accept()
if err != nil {
fmt.Println(err)
return
}
go handleConnection(connection)
}
}
func handleConnection(connection net.Conn) {
addressString := fmt.Sprintf("%+v", connection.RemoteAddr())
fmt.Printf("[TCP] Received Connection from %s\n", addressString)
_, err := connection.Write(getResponse(addressString, tcp))
if err != nil {
fmt.Println(err)
}
err = connection.Close()
if err != nil {
fmt.Println(err)
}
}
func getResponse(addressString, protocol string) []byte {
hostname, _ := os.Hostname()
interfaces, _ := net.Interfaces()
var base string
for _, iface := range interfaces {
base += fmt.Sprintf("\t%+v\n", iface.Name)
addrs, _ := iface.Addrs()
for _, addr := range addrs {
base += fmt.Sprintf("\t\t%+v\n", addr)
}
}
return []byte(fmt.Sprintf("Connected To: %s via %s\nConnected From: %v\nRemote Interfaces:\n%v", hostname, protocol, addressString, base))
}
func listenOnUDP(port int) {
connection, err := net.ListenUDP(udp, &net.UDPAddr{Port: port})
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("[UDP] Listening on %+v\n", connection.LocalAddr().String())
defer connection.Close()
buffer := make([]byte, buffersize)
rand.Seed(time.Now().Unix())
for {
n, addr, err := connection.ReadFromUDP(buffer)
if err != nil {
fmt.Println(err)
}
payload := strings.TrimSpace(string(buffer[0 : n-1]))
if payload == "STOP" {
fmt.Println("Exiting UDP server")
return
}
addressString := fmt.Sprintf("%+v", addr)
fmt.Printf("[UDP] Received Connection from %s\n", addressString)
_, err = connection.WriteToUDP(getResponse(addressString, udp), addr)
if err != nil {
fmt.Println(err)
return
}
}
}
|
[
"\"TCP_PORT\"",
"\"UDP_PORT\"",
"\"HTTP_PORT\""
] |
[] |
[
"HTTP_PORT",
"UDP_PORT",
"TCP_PORT"
] |
[]
|
["HTTP_PORT", "UDP_PORT", "TCP_PORT"]
|
go
| 3 | 0 | |
3rdParty/mpe2/mpe2-mpich2-1.5/build.py
|
import os
from askapdev.rbuild.builders import Autotools as Builder
import askapdev.rbuild.utils as utils
platform = utils.get_platform()
builder = Builder()
builder.remote_archive = "mpe2-mpich2-1.5.tar.gz"
# Use MPI compiler wrapper (except on Cray where cc and c++ are wrappers)
if not os.environ.has_key("CRAYOS_VERSION"):
builder.add_option("MPI_CC=mpicc")
builder.add_option("MPI_F77=mpif77")
# MacOSX MPI is not necessarily built with f77 support,
# and on Linux we use gfortran
if platform['system'] == 'Darwin':
builder.add_option("--disable-f77")
java_home = os.getenv('JAVA_HOME')
if java_home:
builder.add_option("--with-java=%s" % java_home)
elif os.environ.has_key("CRAYOS_VERSION"):
builder.add_option("F77=ftn")
else:
builder.add_option("F77=gfortran")
builder.add_option("--disable-checkMPI")
builder.add_option("--disable-graphics")
builder.add_option("--disable-wrappers")
builder.add_option("--disable-collchk")
builder.nowarnings = True
builder.build()
|
[] |
[] |
[
"JAVA_HOME"
] |
[]
|
["JAVA_HOME"]
|
python
| 1 | 0 | |
vm/interpreter_test.go
|
package vm_test
import (
"bytes"
"encoding/binary"
"io/ioutil"
"log"
"math"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"testing"
"github.com/vsariola/sointu"
"github.com/vsariola/sointu/vm"
"gopkg.in/yaml.v2"
)
func TestAllRegressionTests(t *testing.T) {
_, myname, _, _ := runtime.Caller(0)
files, err := filepath.Glob(path.Join(path.Dir(myname), "..", "tests", "*.yml"))
if err != nil {
t.Fatalf("cannot glob files in the test directory: %v", err)
}
for _, filename := range files {
basename := filepath.Base(filename)
testname := strings.TrimSuffix(basename, path.Ext(basename))
t.Run(testname, func(t *testing.T) {
if strings.Contains(testname, "sample") {
t.Skip("Samples (gm.dls) not available in the interpreter VM at the moment")
return
}
asmcode, err := ioutil.ReadFile(filename)
if err != nil {
t.Fatalf("cannot read the .asm file: %v", filename)
}
var song sointu.Song
err = yaml.Unmarshal(asmcode, &song)
if err != nil {
t.Fatalf("could not parse the .yml file: %v", err)
}
synth, err := vm.Synth(song.Patch)
if err != nil {
t.Fatalf("Compiling patch failed: %v", err)
}
buffer, syncBuffer, err := sointu.Play(synth, song)
buffer = buffer[:song.Score.LengthInRows()*song.SamplesPerRow()*2] // extend to the nominal length always.
if err != nil {
t.Fatalf("Play failed: %v", err)
}
if os.Getenv("SOINTU_TEST_SAVE_OUTPUT") == "YES" {
outputpath := path.Join(path.Dir(myname), "actual_output")
if _, err := os.Stat(outputpath); os.IsNotExist(err) {
os.Mkdir(outputpath, 0755)
}
outFileName := path.Join(path.Dir(myname), "actual_output", testname+".raw")
outfile, err := os.OpenFile(outFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755)
defer outfile.Close()
if err != nil {
t.Fatalf("Creating file failed: %v", err)
}
var createdbuf bytes.Buffer
err = binary.Write(&createdbuf, binary.LittleEndian, buffer)
if err != nil {
t.Fatalf("error converting buffer: %v", err)
}
_, err = outfile.Write(createdbuf.Bytes())
if err != nil {
log.Fatal(err)
}
}
compareToRawFloat32(t, buffer, testname+".raw")
if strings.Contains(testname, "sync") {
compareToRawFloat32(t, syncBuffer, testname+"_syncbuf.raw")
}
})
}
}
func TestStackUnderflow(t *testing.T) {
patch := sointu.Patch{sointu.Instrument{NumVoices: 1, Units: []sointu.Unit{
sointu.Unit{Type: "pop", Parameters: map[string]int{}},
}}}
synth, err := vm.Synth(patch)
if err != nil {
t.Fatalf("bridge compile error: %v", err)
}
buffer := make([]float32, 2)
err = sointu.Render(synth, buffer)
if err == nil {
t.Fatalf("rendering should have failed due to stack underflow")
}
}
func TestStackBalancing(t *testing.T) {
patch := sointu.Patch{
sointu.Instrument{NumVoices: 1, Units: []sointu.Unit{
sointu.Unit{Type: "push", Parameters: map[string]int{}},
}}}
synth, err := vm.Synth(patch)
if err != nil {
t.Fatalf("bridge compile error: %v", err)
}
buffer := make([]float32, 2)
err = sointu.Render(synth, buffer)
if err == nil {
t.Fatalf("rendering should have failed due to unbalanced stack push/pop")
}
}
func compareToRawFloat32(t *testing.T, buffer []float32, rawname string) {
_, filename, _, _ := runtime.Caller(0)
expectedb, err := ioutil.ReadFile(path.Join(path.Dir(filename), "..", "tests", "expected_output", rawname))
if err != nil {
t.Fatalf("cannot read expected: %v", err)
}
expected := make([]float32, len(expectedb)/4)
buf := bytes.NewReader(expectedb)
err = binary.Read(buf, binary.LittleEndian, &expected)
if err != nil {
t.Fatalf("error converting expected buffer: %v", err)
}
if len(expected) != len(buffer) {
t.Fatalf("buffer length mismatch, got %v, expected %v", len(buffer), len(expected))
}
firsterr := -1
errs := 0
for i, v := range expected[1 : len(expected)-1] {
if math.IsNaN(float64(buffer[i])) || (math.Abs(float64(v-buffer[i])) > 1e-2 &&
math.Abs(float64(v-buffer[i+1])) > 1e-2 && math.Abs(float64(v-buffer[i+2])) > 1e-2) {
errs++
if firsterr == -1 {
firsterr = i
}
if errs > 200 { // we are again quite liberal with rounding errors, as different platforms have minor differences in floating point rounding
t.Fatalf("more than 200 errors bigger than 1e-2 detected, first at sample position %v", firsterr)
}
}
}
}
func compareToRawInt16(t *testing.T, buffer []int16, rawname string) {
_, filename, _, _ := runtime.Caller(0)
expectedb, err := ioutil.ReadFile(path.Join(path.Dir(filename), "..", "tests", "expected_output", rawname))
if err != nil {
t.Fatalf("cannot read expected: %v", err)
}
expected := make([]int16, len(expectedb)/2)
buf := bytes.NewReader(expectedb)
err = binary.Read(buf, binary.LittleEndian, &expected)
if err != nil {
t.Fatalf("error converting expected buffer: %v", err)
}
if len(expected) != len(buffer) {
t.Fatalf("buffer length mismatch, got %v, expected %v", len(buffer), len(expected))
}
for i, v := range expected {
if math.IsNaN(float64(buffer[i])) || v != buffer[i] {
t.Fatalf("error at sample position %v", i)
}
}
}
func convertToInt16Buffer(buffer []float32) []int16 {
int16Buffer := make([]int16, len(buffer))
for i, v := range buffer {
int16Buffer[i] = int16(math.Round(math.Min(math.Max(float64(v), -1.0), 1.0) * 32767))
}
return int16Buffer
}
|
[
"\"SOINTU_TEST_SAVE_OUTPUT\""
] |
[] |
[
"SOINTU_TEST_SAVE_OUTPUT"
] |
[]
|
["SOINTU_TEST_SAVE_OUTPUT"]
|
go
| 1 | 0 | |
pkg/resource/plugin/host.go
|
// Copyright 2016-2018, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package plugin
import (
"os"
"github.com/blang/semver"
"github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/pkg/diag"
"github.com/pulumi/pulumi/pkg/resource"
"github.com/pulumi/pulumi/pkg/tokens"
"github.com/pulumi/pulumi/pkg/util/cmdutil"
"github.com/pulumi/pulumi/pkg/util/contract"
"github.com/pulumi/pulumi/pkg/util/logging"
"github.com/pulumi/pulumi/pkg/workspace"
)
// A Host hosts provider plugins and makes them easily accessible by package name.
type Host interface {
// ServerAddr returns the address at which the host's RPC interface may be found.
ServerAddr() string
// Log logs a message, including errors and warnings. Messages can have a resource URN
// associated with them. If no urn is provided, the message is global.
Log(sev diag.Severity, urn resource.URN, msg string, streamID int32)
// LogStatus logs a status message message, including errors and warnings. Status messages show
// up in the `Info` column of the progress display, but not in the final output. Messages can
// have a resource URN associated with them. If no urn is provided, the message is global.
LogStatus(sev diag.Severity, urn resource.URN, msg string, streamID int32)
// Analyzer fetches the analyzer with a given name, possibly lazily allocating the plugins for it. If an analyzer
// could not be found, or an error occurred while creating it, a non-nil error is returned.
Analyzer(nm tokens.QName) (Analyzer, error)
// Provider loads a new copy of the provider for a given package. If a provider for this package could not be
// found, or an error occurs while creating it, a non-nil error is returned.
Provider(pkg tokens.Package, version *semver.Version) (Provider, error)
// CloseProvider closes the given provider plugin and deregisters it from this host.
CloseProvider(provider Provider) error
// LanguageRuntime fetches the language runtime plugin for a given language, lazily allocating if necessary. If
// an implementation of this language runtime wasn't found, on an error occurs, a non-nil error is returned.
LanguageRuntime(runtime string) (LanguageRuntime, error)
// ListPlugins lists all plugins that have been loaded, with version information.
ListPlugins() []workspace.PluginInfo
// EnsurePlugins ensures all plugins in the given array are loaded and ready to use. If any plugins are missing,
// and/or there are errors loading one or more plugins, a non-nil error is returned.
EnsurePlugins(plugins []workspace.PluginInfo, kinds Flags) error
// GetRequiredPlugins lists a full set of plugins that will be required by the given program.
GetRequiredPlugins(info ProgInfo, kinds Flags) ([]workspace.PluginInfo, error)
// SignalCancellation asks all resource providers to gracefully shut down and abort any ongoing
// operations. Operation aborted in this way will return an error (e.g., `Update` and `Create`
// will either a creation error or an initialization error. SignalCancellation is advisory and
// non-blocking; it is up to the host to decide how long to wait after SignalCancellation is
// called before (e.g.) hard-closing any gRPC connection.
SignalCancellation() error
// Close reclaims any resources associated with the host.
Close() error
}
// Events provides higher-level consumers of the plugin model to attach callbacks on
// plugin load events.
type Events interface {
// OnPluginLoad is fired by the plugin host whenever a new plugin is successfully loaded.
// newPlugin is the plugin that was loaded.
OnPluginLoad(newPlugin workspace.PluginInfo) error
}
// NewDefaultHost implements the standard plugin logic, using the standard installation root to find them.
func NewDefaultHost(ctx *Context, config ConfigSource, events Events,
runtimeOptions map[string]interface{}) (Host, error) {
host := &defaultHost{
ctx: ctx,
config: config,
events: events,
runtimeOptions: runtimeOptions,
analyzerPlugins: make(map[tokens.QName]*analyzerPlugin),
languagePlugins: make(map[string]*languagePlugin),
resourcePlugins: make(map[Provider]*resourcePlugin),
reportedResourcePlugins: make(map[string]struct{}),
loadRequests: make(chan pluginLoadRequest),
}
// Fire up a gRPC server to listen for requests. This acts as a RPC interface that plugins can use
// to "phone home" in case there are things the host must do on behalf of the plugins (like log, etc).
svr, err := newHostServer(host, ctx)
if err != nil {
return nil, err
}
host.server = svr
// Start a goroutine we'll use to satisfy load requests serially and avoid race conditions.
go func() {
for req := range host.loadRequests {
req.result <- req.load()
}
}()
return host, nil
}
type pluginLoadRequest struct {
load func() error
result chan<- error
}
type defaultHost struct {
ctx *Context // the shared context for this host.
config ConfigSource // the source for provider configuration parameters.
events Events // optional callbacks for plugin load events
runtimeOptions map[string]interface{} // options to pass to the language plugins.
analyzerPlugins map[tokens.QName]*analyzerPlugin // a cache of analyzer plugins and their processes.
languagePlugins map[string]*languagePlugin // a cache of language plugins and their processes.
resourcePlugins map[Provider]*resourcePlugin // the set of loaded resource plugins.
reportedResourcePlugins map[string]struct{} // the set of unique resource plugins we'll report.
plugins []workspace.PluginInfo // a list of plugins allocated by this host.
loadRequests chan pluginLoadRequest // a channel used to satisfy plugin load requests.
server *hostServer // the server's RPC machinery.
}
var _ Host = (*defaultHost)(nil)
type analyzerPlugin struct {
Plugin Analyzer
Info workspace.PluginInfo
}
type languagePlugin struct {
Plugin LanguageRuntime
Info workspace.PluginInfo
}
type resourcePlugin struct {
Plugin Provider
Info workspace.PluginInfo
}
func (host *defaultHost) ServerAddr() string {
return host.server.Address()
}
func (host *defaultHost) Log(sev diag.Severity, urn resource.URN, msg string, streamID int32) {
host.ctx.Diag.Logf(sev, diag.StreamMessage(urn, msg, streamID))
}
func (host *defaultHost) LogStatus(sev diag.Severity, urn resource.URN, msg string, streamID int32) {
host.ctx.StatusDiag.Logf(sev, diag.StreamMessage(urn, msg, streamID))
}
// loadPlugin sends an appropriate load request to the plugin loader and returns the loaded plugin (if any) and error.
func (host *defaultHost) loadPlugin(load func() (interface{}, error)) (interface{}, error) {
var plugin interface{}
result := make(chan error)
host.loadRequests <- pluginLoadRequest{
load: func() error {
p, err := load()
plugin = p
return err
},
result: result,
}
return plugin, <-result
}
func (host *defaultHost) Analyzer(name tokens.QName) (Analyzer, error) {
plugin, err := host.loadPlugin(func() (interface{}, error) {
// First see if we already loaded this plugin.
if plug, has := host.analyzerPlugins[name]; has {
contract.Assert(plug != nil)
return plug.Plugin, nil
}
// If not, try to load and bind to a plugin.
plug, err := NewAnalyzer(host, host.ctx, name)
if err == nil && plug != nil {
info, infoerr := plug.GetPluginInfo()
if infoerr != nil {
return nil, infoerr
}
// Memoize the result.
host.plugins = append(host.plugins, info)
host.analyzerPlugins[name] = &analyzerPlugin{Plugin: plug, Info: info}
if host.events != nil {
if eventerr := host.events.OnPluginLoad(info); eventerr != nil {
return nil, errors.Wrapf(eventerr, "failed to perform plugin load callback")
}
}
}
return plug, err
})
if plugin == nil || err != nil {
return nil, err
}
return plugin.(Analyzer), nil
}
func (host *defaultHost) Provider(pkg tokens.Package, version *semver.Version) (Provider, error) {
plugin, err := host.loadPlugin(func() (interface{}, error) {
// Try to load and bind to a plugin.
plug, err := NewProvider(host, host.ctx, pkg, version)
if err == nil && plug != nil {
info, infoerr := plug.GetPluginInfo()
if infoerr != nil {
return nil, infoerr
}
// Warn if the plugin version was not what we expected
if version != nil && !cmdutil.IsTruthy(os.Getenv("PULUMI_DEV")) {
if info.Version == nil || !info.Version.GTE(*version) {
var v string
if info.Version != nil {
v = info.Version.String()
}
host.ctx.Diag.Warningf(
diag.Message("", /*urn*/
"resource plugin %s is expected to have version >=%s, but has %s; "+
"the wrong version may be on your path, or this may be a bug in the plugin"),
info.Name, version.String(), v)
}
}
// Record the result and add the plugin's info to our list of loaded plugins if it's the first copy of its
// kind.
key := info.Name
if info.Version != nil {
key += info.Version.String()
}
_, alreadyReported := host.reportedResourcePlugins[key]
if !alreadyReported {
host.reportedResourcePlugins[key] = struct{}{}
host.plugins = append(host.plugins, info)
}
host.resourcePlugins[plug] = &resourcePlugin{Plugin: plug, Info: info}
if host.events != nil && !alreadyReported {
if eventerr := host.events.OnPluginLoad(info); eventerr != nil {
return nil, errors.Wrapf(eventerr, "failed to perform plugin load callback")
}
}
}
return plug, err
})
if plugin == nil || err != nil {
return nil, err
}
return plugin.(Provider), nil
}
func (host *defaultHost) LanguageRuntime(runtime string) (LanguageRuntime, error) {
plugin, err := host.loadPlugin(func() (interface{}, error) {
// First see if we already loaded this plugin.
if plug, has := host.languagePlugins[runtime]; has {
contract.Assert(plug != nil)
return plug.Plugin, nil
}
// If not, allocate a new one.
plug, err := NewLanguageRuntime(host, host.ctx, runtime, host.runtimeOptions)
if err == nil && plug != nil {
info, infoerr := plug.GetPluginInfo()
if infoerr != nil {
return nil, infoerr
}
// Memoize the result.
host.plugins = append(host.plugins, info)
host.languagePlugins[runtime] = &languagePlugin{Plugin: plug, Info: info}
if host.events != nil {
if eventerr := host.events.OnPluginLoad(info); eventerr != nil {
return nil, errors.Wrapf(eventerr, "failed to perform plugin load callback")
}
}
}
return plug, err
})
if plugin == nil || err != nil {
return nil, err
}
return plugin.(LanguageRuntime), nil
}
func (host *defaultHost) ListPlugins() []workspace.PluginInfo {
return host.plugins
}
// EnsurePlugins ensures all plugins in the given array are loaded and ready to use. If any plugins are missing,
// and/or there are errors loading one or more plugins, a non-nil error is returned.
func (host *defaultHost) EnsurePlugins(plugins []workspace.PluginInfo, kinds Flags) error {
// Use a multieerror to track failures so we can return one big list of all failures at the end.
var result error
for _, plugin := range plugins {
switch plugin.Kind {
case workspace.AnalyzerPlugin:
if kinds&AnalyzerPlugins != 0 {
if _, err := host.Analyzer(tokens.QName(plugin.Name)); err != nil {
result = multierror.Append(result,
errors.Wrapf(err, "failed to load analyzer plugin %s", plugin.Name))
}
}
case workspace.LanguagePlugin:
if kinds&LanguagePlugins != 0 {
if _, err := host.LanguageRuntime(plugin.Name); err != nil {
result = multierror.Append(result,
errors.Wrapf(err, "failed to load language plugin %s", plugin.Name))
}
}
case workspace.ResourcePlugin:
if kinds&ResourcePlugins != 0 {
if _, err := host.Provider(tokens.Package(plugin.Name), plugin.Version); err != nil {
result = multierror.Append(result,
errors.Wrapf(err, "failed to load resource plugin %s", plugin.Name))
}
}
default:
contract.Failf("unexpected plugin kind: %s", plugin.Kind)
}
}
return result
}
// GetRequiredPlugins lists a full set of plugins that will be required by the given program.
func (host *defaultHost) GetRequiredPlugins(info ProgInfo, kinds Flags) ([]workspace.PluginInfo, error) {
var plugins []workspace.PluginInfo
if kinds&LanguagePlugins != 0 {
// First make sure the language plugin is present. We need this to load the required resource plugins.
// TODO: we need to think about how best to version this. For now, it always picks the latest.
lang, err := host.LanguageRuntime(info.Proj.Runtime.Name())
if err != nil {
return nil, errors.Wrapf(err, "failed to load language plugin %s", info.Proj.Runtime.Name())
}
plugins = append(plugins, workspace.PluginInfo{
Name: info.Proj.Runtime.Name(),
Kind: workspace.LanguagePlugin,
})
if kinds&ResourcePlugins != 0 {
// Use the language plugin to compute this project's set of plugin dependencies.
// TODO: we want to support loading precisely what the project needs, rather than doing a static scan of resolved
// packages. Doing this requires that we change our RPC interface and figure out how to configure plugins
// later than we do (right now, we do it up front, but at that point we don't know the version).
deps, err := lang.GetRequiredPlugins(info)
if err != nil {
return nil, errors.Wrapf(err, "failed to discover plugin requirements")
}
plugins = append(plugins, deps...)
}
} else {
// If we can't load the language plugin, we can't discover the resource plugins.
contract.Assertf(kinds&ResourcePlugins != 0,
"cannot load resource plugins without also loading the language plugin")
}
// Next, if there are analyzers listed in the project file, use them too.
// TODO: these are currently not versioned. We probably need to let folks specify versions in Pulumi.yaml.
if info.Proj.Analyzers != nil && kinds&AnalyzerPlugins != 0 {
for _, analyzer := range *info.Proj.Analyzers {
plugins = append(plugins, workspace.PluginInfo{
Name: string(analyzer),
Kind: workspace.AnalyzerPlugin,
})
}
}
return plugins, nil
}
func (host *defaultHost) SignalCancellation() error {
// NOTE: we're abusing loadPlugin in order to ensure proper synchronization.
_, err := host.loadPlugin(func() (interface{}, error) {
var result error
for _, plug := range host.resourcePlugins {
if err := plug.Plugin.SignalCancellation(); err != nil {
result = multierror.Append(result, errors.Wrapf(err,
"Error signaling cancellation to resource provider '%s'", plug.Info.Name))
}
}
return nil, result
})
return err
}
func (host *defaultHost) CloseProvider(provider Provider) error {
// NOTE: we're abusing loadPlugin in order to ensure proper synchronization.
_, err := host.loadPlugin(func() (interface{}, error) {
if err := provider.Close(); err != nil {
return nil, err
}
delete(host.resourcePlugins, provider)
return nil, nil
})
return err
}
func (host *defaultHost) Close() error {
// Close all plugins.
for _, plug := range host.analyzerPlugins {
if err := plug.Plugin.Close(); err != nil {
logging.Infof("Error closing '%s' analyzer plugin during shutdown; ignoring: %v", plug.Info.Name, err)
}
}
for _, plug := range host.resourcePlugins {
if err := plug.Plugin.Close(); err != nil {
logging.Infof("Error closing '%s' resource plugin during shutdown; ignoring: %v", plug.Info.Name, err)
}
}
for _, plug := range host.languagePlugins {
if err := plug.Plugin.Close(); err != nil {
logging.Infof("Error closing '%s' language plugin during shutdown; ignoring: %v", plug.Info.Name, err)
}
}
// Empty out all maps.
host.analyzerPlugins = make(map[tokens.QName]*analyzerPlugin)
host.languagePlugins = make(map[string]*languagePlugin)
host.resourcePlugins = make(map[Provider]*resourcePlugin)
// Shut down the plugin loader.
close(host.loadRequests)
// Finally, shut down the host's gRPC server.
return host.server.Cancel()
}
// Flags can be used to filter out plugins during loading that aren't necessary.
type Flags int
const (
// AnalyzerPlugins is used to only load analyzers.
AnalyzerPlugins Flags = 1 << iota
// LanguagePlugins is used to only load language plugins.
LanguagePlugins
// ResourcePlugins is used to only load resource provider plugins.
ResourcePlugins
)
// AllPlugins uses flags to ensure that all plugin kinds are loaded.
var AllPlugins = AnalyzerPlugins | LanguagePlugins | ResourcePlugins
|
[
"\"PULUMI_DEV\""
] |
[] |
[
"PULUMI_DEV"
] |
[]
|
["PULUMI_DEV"]
|
go
| 1 | 0 | |
larch/wxmap/mapviewer.py
|
#!/usr/bin/env python
"""
GUI for displaying maps from HDF5 files
"""
import os
import platform
import sys
import time
import json
import socket
import datetime
from functools import partial
from threading import Thread
from collections import OrderedDict, namedtuple
import wx
from wx.adv import AboutBox, AboutDialogInfo
import wx.lib.scrolledpanel as scrolled
import wx.lib.mixins.inspection
import wx.dataview as dv
DVSTY = dv.DV_SINGLE|dv.DV_VERT_RULES|dv.DV_ROW_LINES
HAS_EPICS = False
try:
from epics import caput
HAS_EPICS = True
except:
pass
import numpy as np
import scipy.stats as stats
#from matplotlib.widgets import Slider, Button, RadioButtons
from wxmplot import PlotFrame
import larch
from larch.larchlib import read_workdir, save_workdir
from larch.wxlib import (LarchPanel, LarchFrame, EditableListBox, SimpleText,
FloatCtrl, Font, pack, Popup, Button, MenuItem,
Choice, Check, GridPanel, FileSave, HLine, flatnotebook,
HLine, OkCancel, LEFT, LarchUpdaterDialog, LarchWxApp)
from larch.utils.strutils import bytes2str, version_ge
from larch.io import nativepath
from larch.site_config import icondir
from ..xrd import lambda_from_E, xrd1d, save1D, calculate_xvalues
from ..xrmmap import GSEXRM_MapFile, GSEXRM_FileStatus, h5str, ensure_subgroup, DEFAULT_XRAY_ENERGY
from ..apps import check_larchversion, update_larch
from ..epics import pv_fullname
from ..wxlib.xrfdisplay import XRFDisplayFrame
from .mapimageframe import MapImageFrame, CorrelatedMapFrame
from .mapmathpanel import MapMathPanel
from .maptomopanel import TomographyPanel
from .mapxrfpanel import XRFAnalysisPanel
from .maptomopanel import TomographyPanel
from ..wxxrd import XRD1DViewerFrame, XRD2DViewerFrame
def timestring():
return datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')
FONTSIZE = 8
if platform.system() in ('Windows', 'Darwin'):
FONTSIZE = 10
CEN = wx.ALIGN_CENTER
LEFT = wx.ALIGN_LEFT
RIGHT = wx.ALIGN_RIGHT
ALL_CEN = wx.ALL|CEN
ALL_LEFT = wx.ALL|LEFT
ALL_RIGHT = wx.ALL|RIGHT
FILE_WILDCARDS = 'X-ray Maps (*.h5)|*.h5|All files (*.*)|*.*'
XRF_ICON_FILE = 'gse_xrfmap.ico'
NOT_OWNER_MSG = """The File
'%s'
appears to be open by another process. Having two
processes writing to the file can cause corruption.
Do you want to take ownership of the file?
"""
NOT_GSEXRM_FILE = """The File
'%s'
doesn't seem to be a Map File
"""
NOT_GSEXRM_FOLDER = """The Folder
'%s'
doesn't seem to be a Map Folder
"""
FILE_ALREADY_READ = """The File
'%s'
has already been read.
"""
FRAMESTYLE = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL
BEAMLINE = '13-ID-E'
FACILITY = 'APS'
PLOT_TYPES = ('Single ROI Map', 'Three ROI Map', 'Correlation Plot')
PROCROWS_CHOICES = ('All', '500', '200', '100', '50', '20', '10')
PLOT_OPERS = ('/', '*', '-', '+')
ESCAN_CRED = os.environ.get('ESCAN_CREDENTIALS', None)
if ESCAN_CRED is not None:
try:
from ..epics.larchscan import connect_scandb
except ImportError:
ESCAN_CRED = None
CWID = 150
WWID = 100 + CWID*4
class MapPanel(GridPanel):
'''Panel of Controls for viewing maps'''
label = 'ROI Map'
def __init__(self, parent, owner=None, **kws):
self.owner = owner
self.cfile, self.xrmmap = None,None
self.last_process_time = 0
self.detectors_set = False
GridPanel.__init__(self, parent, nrows=8, ncols=6, **kws)
self.plot_choice = Choice(self, choices=PLOT_TYPES, size=(CWID, -1))
self.plot_choice.Bind(wx.EVT_CHOICE, self.plotSELECT)
self.det_choice = [Choice(self, size=(CWID, -1)),
Choice(self, size=(CWID, -1)),
Choice(self, size=(CWID, -1)),
Choice(self, size=(CWID, -1))]
self.roi_choice = [Choice(self, size=(CWID, -1)),
Choice(self, size=(CWID, -1)),
Choice(self, size=(CWID, -1)),
Choice(self, size=(CWID, -1))]
for i,det_chc in enumerate(self.det_choice):
det_chc.Bind(wx.EVT_CHOICE, partial(self.detSELECT,i))
for i,roi_chc in enumerate(self.roi_choice):
roi_chc.Bind(wx.EVT_CHOICE, partial(self.roiSELECT,i))
self.det_label = [SimpleText(self,'Intensity'),
SimpleText(self,''),
SimpleText(self,''),
SimpleText(self, 'Normalization')]
self.roi_label = [SimpleText(self,''),
SimpleText(self,''),
SimpleText(self,''),
SimpleText(self,'')]
fopts = dict(minval=-50000, precision=0, size=(70, -1))
self.lims = [FloatCtrl(self, value= 0, **fopts),
FloatCtrl(self, value=-1, **fopts),
FloatCtrl(self, value= 0, **fopts),
FloatCtrl(self, value=-1, **fopts)]
self.zigoff = FloatCtrl(self, value=0, minval=-15, maxval=15,
precision=0, size=(70, -1))
for wid in self.lims:
wid.Disable()
self.use_dtcorr = Check(self, default=True,
label='Correct for Detector Deadtime',
action=self.onDTCorrect)
self.use_hotcols = Check(self, default=False,
label='Remove First and Last columns',
action=self.onHotCols)
self.use_zigzag = Check(self, default=False, label='Fix ZigZag',
action=self.onZigZag)
self.limrange = Check(self, default=False,
label=' Limit Map Range to Pixel Range:',
action=self.onLimitRange)
map_shownew = Button(self, 'Show New Map', size=(CWID, -1),
action=partial(self.onROIMap, new=True))
map_update = Button(self, 'Replace Last Map', size=(CWID, -1),
action=partial(self.onROIMap, new=False))
self.mapproc_btn = Button(self, 'Add More Rows', size=(CWID, -1),
action=self.onProcessMap)
self.mapproc_nrows = Choice(self, choices=PROCROWS_CHOICES, size=(CWID, -1))
self.mapproc_nrows.SetStringSelection('100')
self.Add(SimpleText(self, 'Build Map From Raw Data Folder:'),
dcol=2, style=LEFT, newrow=True)
self.Add(self.mapproc_btn, dcol=1, style=LEFT)
self.Add(SimpleText(self, 'Max # Rows to Add:'), dcol=1,
style=LEFT, newrow=False)
self.Add(self.mapproc_nrows, dcol=1, style=LEFT)
self.Add(HLine(self, size=(WWID, 5)), dcol=8, style=LEFT, newrow=True)
self.Add((5, 5), newrow=True)
self.Add(SimpleText(self, 'Display ROI Maps: Plot Type:'), dcol=2,
style=LEFT, newrow=True)
self.Add(self.plot_choice, dcol=1, style=LEFT)
self.AddMany((SimpleText(self,''), self.det_label[0],
self.det_label[1], self.det_label[2], self.det_label[3]),
style=LEFT, newrow=True)
self.AddMany((SimpleText(self,'Detector:'), self.det_choice[0],
self.det_choice[1], self.det_choice[2], self.det_choice[3]),
style=LEFT, newrow=True)
self.AddMany((SimpleText(self,'ROI:'),self.roi_choice[0],
self.roi_choice[1],self.roi_choice[2], self.roi_choice[3]),
style=LEFT, newrow=True)
self.AddMany((SimpleText(self,''),self.roi_label[0],
self.roi_label[1],self.roi_label[2], self.roi_label[3]),
style=LEFT, newrow=True)
self.Add((5, 5), dcol=1, style=LEFT, newrow=True)
self.Add(map_shownew, dcol=1, style=LEFT)
self.Add(map_update, dcol=1, style=LEFT)
self.Add(HLine(self, size=(WWID, 5)), dcol=8, style=LEFT, newrow=True)
self.Add(SimpleText(self,'Options:'), dcol=1, style=LEFT, newrow=True)
self.Add(self.use_dtcorr, dcol=2, style=LEFT)
self.Add((5, 5), dcol=1, style=LEFT, newrow=True)
self.Add(self.use_hotcols, dcol=2, style=LEFT)
self.Add((5, 5), dcol=1, style=LEFT, newrow=True)
self.Add(self.use_zigzag, dcol=1, style=LEFT)
self.Add(self.zigoff, dcol=1, style=LEFT)
self.Add((5, 5), dcol=1, style=LEFT, newrow=True)
self.Add(self.limrange, dcol=2, style=LEFT)
self.Add((5, 5), dcol=1, style=LEFT, newrow=True)
self.Add(SimpleText(self, 'X Range:'), dcol=1, style=LEFT)
self.Add(self.lims[0], dcol=1, style=LEFT)
self.Add(self.lims[1], dcol=1, style=LEFT)
self.Add((5, 5), dcol=1, style=LEFT, newrow=True)
self.Add(SimpleText(self, 'Y Range:'), dcol=1, style=LEFT)
self.Add(self.lims[2], dcol=1, style=LEFT)
self.Add(self.lims[3], dcol=1, style=LEFT)
self.Add(HLine(self, size=(WWID, 5)), dcol=8, style=LEFT, newrow=True)
self.pack()
def onDTCorrect(self, event=None):
xrmfile = self.owner.current_file
if xrmfile is not None:
xrmfile.dtcorrect = self.use_dtcorr.IsChecked()
def onHotCols(self, event=None):
xrmfile = self.owner.current_file
if xrmfile is not None:
xrmfile.hotcols = self.use_hotcols.IsChecked()
def onZigZag(self, event=None):
xrmfile = self.owner.current_file
if xrmfile is not None:
zigzag = 0
if self.use_zigzag.IsChecked():
zigzag = int(self.zigoff.GetValue())
xrmfile.zigzag = zigzag
def update_xrmmap(self, xrmfile=None, set_detectors=False):
if xrmfile is None:
xrmfile = self.owner.current_file
self.cfile = xrmfile
self.xrmmap = self.cfile.xrmmap
if set_detectors or (not self.detectors_set):
self.set_det_choices()
self.plotSELECT()
def onLimitRange(self, event=None):
if self.limrange.IsChecked():
for wid in self.lims:
wid.Enable()
else:
for wid in self.lims:
wid.Disable()
def detSELECT(self,idet,event=None):
self.set_roi_choices(idet=idet)
def roiSELECT(self,iroi,event=None):
detname = self.det_choice[iroi].GetStringSelection()
roiname = self.roi_choice[iroi].GetStringSelection()
if version_ge(self.cfile.version, '2.0.0'):
try:
roi = self.cfile.xrmmap['roimap'][detname][roiname]
limits = roi['limits'][:]
units = bytes2str(roi['limits'].attrs.get('units',''))
roistr = '[%0.1f to %0.1f %s]' % (limits[0],limits[1],units)
except:
roistr = ''
else:
try:
roi = self.cfile.xrmmap[detname]
en = list(roi['energy'][:])
index = list(roi['roi_name'][:]).index(roiname)
limits = list(roi['roi_limits'][:][index])
roistr = '[%0.1f to %0.1f keV]' % (en[limits[0]],en[limits[1]])
except:
roistr = ''
self.roi_label[iroi].SetLabel(roistr)
def plotSELECT(self,event=None):
if len(self.owner.filemap) > 0:
plot_type = self.plot_choice.GetStringSelection().lower()
if 'single' in plot_type:
for i in (1, 2):
self.det_choice[i].Disable()
self.roi_choice[i].Disable()
self.roi_label[i].SetLabel('')
for i, label in enumerate(['Intensity', ' ', ' ']):
self.det_label[i].SetLabel(label)
elif 'three' in plot_type:
for i in (1, 2):
self.det_choice[i].Enable()
self.roi_choice[i].Enable()
for i, label in enumerate(['Red', 'Green', 'Blue']):
self.det_label[i].SetLabel(label)
self.set_roi_choices()
elif 'correl' in plot_type:
self.det_choice[1].Enable()
self.roi_choice[1].Enable()
self.det_choice[2].Disable()
self.roi_choice[2].Disable()
for i, label in enumerate([' X ',' Y ', '']):
self.det_label[i].SetLabel(label)
self.set_roi_choices()
def onClose(self):
for p in self.plotframes:
try:
p.Destroy()
except:
pass
def ShowMap(self, xrmfile=None, new=True):
subtitles = None
plt3 = 'three' in self.plot_choice.GetStringSelection().lower()
if xrmfile is None:
xrmfile = self.owner.current_file
self.onZigZag()
args={'hotcols' : xrmfile.hotcols,
'dtcorrect' : xrmfile.dtcorrect}
det_name, roi_name, plt_name = [], [], []
for det, roi in zip(self.det_choice, self.roi_choice):
det_name += [det.GetStringSelection()]
roi_name += [roi.GetStringSelection()]
if det_name[-1] == 'scalars':
plt_name += ['%s' % roi_name[-1]]
else:
plt_name += ['%s(%s)' % (roi_name[-1],det_name[-1])]
mapx = 1.0
if roi_name[-1] != '1':
mapx = xrmfile.get_roimap(roi_name[-1], det=det_name[-1], **args)
mapx[np.where(mapx==0)] = 1.
r_map = xrmfile.get_roimap(roi_name[0], det=det_name[0], **args)
if plt3:
g_map = xrmfile.get_roimap(roi_name[1], det=det_name[1], **args)
b_map = xrmfile.get_roimap(roi_name[2], det=det_name[2], **args)
x = xrmfile.get_pos(0, mean=True)
y = xrmfile.get_pos(1, mean=True)
pref, fname = os.path.split(xrmfile.filename)
if plt3:
map = np.array([r_map/mapx, g_map/mapx, b_map/mapx])
map = np.einsum('kij->ijk', map)
title = fname
info = ''
if roi_name[-1] == '1':
subtitles = {'red': 'Red: %s' % plt_name[0],
'green': 'Green: %s' % plt_name[1],
'blue': 'Blue: %s' % plt_name[2]}
else:
subtitles = {'red': 'Red: %s / %s' % (plt_name[0], plt_name[-1]),
'green': 'Green: %s / %s' % (plt_name[1], plt_name[-1]),
'blue': 'Blue: %s / %s' % (plt_name[2], plt_name[-1])}
else:
map = r_map/mapx
if roi_name[-1] == '1':
title = plt_name[0]
else:
title = '%s / %s' % (plt_name[0], plt_name[-1])
title = '%s: %s' % (fname, title)
info = 'Intensity: [%g, %g]' %(map.min(), map.max())
subtitle = None
det = None
if (plt3 and det_name[0]==det_name[1] and det_name[0]==det_name[2]) or not plt3:
for s in det_name[0]:
if s.isdigit(): det = int(s)
if len(self.owner.im_displays) == 0 or new:
iframe = self.owner.add_imdisplay(title, det=det)
xoff, yoff = 0, 0
if self.limrange.IsChecked():
lims = [wid.GetValue() for wid in self.lims]
map = map[lims[2]:lims[3], lims[0]:lims[1]]
xoff, yoff = lims[0], lims[2]
self.owner.display_map(map, title=title, info=info, x=x, y=y, det=det,
xoff=xoff, yoff=yoff, subtitles=subtitles,
xrmfile=self.cfile)
def onLasso(self, selected=None, mask=None, data=None, xrmfile=None, **kws):
if xrmfile is None:
xrmfile = self.owner.current_file
ny, nx = xrmfile.get_shape()
indices = []
for idx in selected:
iy, ix = divmod(idx, ny)
indices.append((ix, iy))
def ShowCorrel(self, xrmfile=None, new=True):
if xrmfile is None:
xrmfile = self.owner.current_file
self.onZigZag()
args={'hotcols' : xrmfile.hotcols,
'dtcorrect' : xrmfile.dtcorrect}
det_name,roi_name = [],[]
plt_name = []
xdet = self.det_choice[0].GetStringSelection()
xroi = self.roi_choice[0].GetStringSelection()
xlab = "%s(%s)" % (xroi, xdet)
if 'scalar' in xdet.lower():
xlab = xroi
ydet = self.det_choice[1].GetStringSelection()
yroi = self.roi_choice[1].GetStringSelection()
ylab = "%s(%s)" % (yroi, ydet)
if 'scalar' in ydet.lower():
ylab = yroi
map1 = xrmfile.get_roimap(xroi, det=xdet, **args)
map2 = xrmfile.get_roimap(yroi, det=ydet, **args)
x = xrmfile.get_pos(0, mean=True)
y = xrmfile.get_pos(1, mean=True)
pref, fname = os.path.split(xrmfile.filename)
title ='%s: %s vs. %s' %(fname, ylab, xlab)
correl_plot = CorrelatedMapFrame(parent=self.owner, xrmfile=xrmfile)
correl_plot.display(map1, map2, name1=xlab, name2=ylab,
x=x, y=y, title=title)
correl_plot.Show()
correl_plot.Raise()
self.owner.plot_displays.append(correl_plot)
def onProcessMap(self, event=None, max_new_rows=None):
xrmfile = self.owner.current_file
if xrmfile is None:
return
pref, fname = os.path.split(xrmfile.filename)
if max_new_rows is None:
max_new_rows = self.mapproc_nrows.GetStringSelection().lower()
if max_new_rows.lower() == 'all':
max_new_rows = None
else:
max_new_rows = int(max_new_rows)
self.owner.process_file(fname, max_new_rows=max_new_rows)
self.update_xrmmap(xrmfile=self.owner.current_file, set_detectors=True)
def onROIMap(self, event=None, new=True):
plotcmd = partial(self.ShowMap, new=new)
if 'correlation' in self.plot_choice.GetStringSelection().lower():
plotcmd = partial(self.ShowCorrel, new=new)
plotcmd()
def set_det_choices(self):
det_list = self.cfile.get_detector_list()
for det_ch in self.det_choice:
det_ch.SetChoices(det_list)
if 'scalars' in det_list: ## should set 'denominator' to scalars as default
self.det_choice[-1].SetStringSelection('scalars')
self.set_roi_choices()
self.detectors_set = True
def set_roi_choices(self, idet=None):
force_rois = not self.detectors_set
if idet is None:
for idet, det_ch in enumerate(self.det_choice):
detname = self.det_choice[idet].GetStringSelection()
rois = self.cfile.get_roi_list(detname, force=force_rois)
cur = self.roi_choice[idet].GetStringSelection()
self.roi_choice[idet].SetChoices(rois)
if cur in rois:
self.roi_choice[idet].SetStringSelection(cur)
self.roiSELECT(idet)
else:
detname = self.det_choice[idet].GetStringSelection()
rois = self.cfile.get_roi_list(detname, force=force_rois)
cur = self.roi_choice[idet].GetStringSelection()
self.roi_choice[idet].SetChoices(rois)
if cur in rois:
self.roi_choice[idet].SetStringSelection(cur)
self.roiSELECT(idet)
def update_roi(self, detname):
force = not self.detectors_set
return self.cfile.get_roi_list(detname, force=force)
class MapInfoPanel(scrolled.ScrolledPanel):
"""Info Panel """
label = 'Map Info'
def __init__(self, parent, owner=None, **kws):
scrolled.ScrolledPanel.__init__(self, parent, -1,
style=wx.GROW|wx.TAB_TRAVERSAL, **kws)
self.owner = owner
sizer = wx.GridBagSizer(3, 3)
self.wids = {}
ir = 0
for label in ('Facility','Run Cycle','Proposal Number','User group',
'H5 Map Created',
'Scan Time','File Compression','Map Data',
'Ring Current', 'X-ray Energy', 'X-ray Intensity (I0)',
'Original data path', 'User Comments 1', 'User Comments 2',
'Scan Fast Motor', 'Scan Slow Motor', 'Dwell Time',
'Sample Fine Stages',
'Sample Stage X', 'Sample Stage Y',
'Sample Stage Z', 'Sample Stage Theta',
'XRD Calibration'):
ir += 1
thislabel = SimpleText(self, '%s:' % label, style=wx.LEFT, size=(125, -1))
self.wids[label] = SimpleText(self, ' ' , style=wx.LEFT, size=(350, -1))
sizer.Add(thislabel, (ir, 0), (1, 1), 1)
sizer.Add(self.wids[label], (ir, 1), (1, 1), 1)
pack(self, sizer)
self.SetupScrolling()
def update_xrmmap(self, xrmfile=None, set_detectors=None):
if xrmfile is None:
xrmfile = self.owner.current_file
xrmmap = xrmfile.xrmmap
def time_between(d1, d2):
d1 = datetime.datetime.strptime(d1, "%Y-%m-%d %H:%M:%S")
d2 = datetime.datetime.strptime(d2, "%Y-%m-%d %H:%M:%S")
diff = d2 - d1 if d2 > d1 else d1 - d2
return diff.days,diff.seconds
config_grp = ensure_subgroup('config',xrmmap)
notes_grp = ensure_subgroup('notes',config_grp)
time_str = bytes2str(notes_grp.attrs.get('h5_create_time',''))
self.wids['H5 Map Created'].SetLabel(time_str)
try:
d,s = time_between(bytes2str(notes_grp.attrs.get('scan_start_time','')),
bytes2str(notes_grp.attrs.get('scan_end_time','')))
time_str = str(datetime.timedelta(days=d,seconds=s))
except:
time_str = bytes2str(xrmmap.attrs.get('Start_Time',''))
self.wids['Scan Time'].SetLabel( time_str )
self.wids['File Compression'].SetLabel(bytes2str(xrmmap.attrs.get('Compression','')))
comments = h5str(xrmmap['config/scan/comments'][()]).split('\n', 2)
for i, comm in enumerate(comments):
self.wids['User Comments %i' %(i+1)].SetLabel(comm)
pos_addrs = [str(x) for x in xrmmap['config/positioners'].keys()]
pos_label = [h5str(x[()]) for x in xrmmap['config/positioners'].values()]
scan_pos1 = h5str(xrmmap['config/scan/pos1'][()])
scan_pos2 = h5str(xrmmap['config/scan/pos2'][()])
i1 = pos_addrs.index(scan_pos1)
i2 = pos_addrs.index(scan_pos2)
start1 = float(xrmmap['config/scan/start1'][()])
start2 = float(xrmmap['config/scan/start2'][()])
stop1 = float(xrmmap['config/scan/stop1'][()])
stop2 = float(xrmmap['config/scan/stop2'][()])
step1 = float(xrmmap['config/scan/step1'][()])
step2 = float(xrmmap['config/scan/step2'][()])
npts1 = int((abs(stop1 - start1) + 1.1*step1)/step1)
npts2 = int((abs(stop2 - start2) + 1.1*step2)/step2)
sfmt = '%s: [%.4f:%.4f], step=%.4f, %i pixels'
scan1 = sfmt % (pos_label[i1], start1, stop1, step1, npts1)
scan2 = sfmt % (pos_label[i2], start2, stop2, step2, npts2)
rowtime = float(xrmmap['config/scan/time1'][()])
self.wids['Scan Fast Motor'].SetLabel(scan1)
self.wids['Scan Slow Motor'].SetLabel(scan2)
pixtime = xrmfile.pixeltime
if pixtime is None:
pixtime = xrmfile.calc_pixeltime()
pixtime =int(round(1000.0*pixtime))
self.wids['Dwell Time'].SetLabel('%.1f ms per pixel' % pixtime)
env_names = list(xrmmap['config/environ/name'])
env_vals = list(xrmmap['config/environ/value'])
env_addrs = list(xrmmap['config/environ/address'])
fines = {'X': '?', 'Y': '?'}
i0vals = {'flux':'?', 'current':'?'}
en = xrmfile.get_incident_energy()
enmsg = '%0.1f eV (%0.3f \u00c5)' % (en, lambda_from_E(en, E_units='eV'))
if abs(en - DEFAULT_XRAY_ENERGY) < 1.0:
enmsg = "%s : PROBABLY NOT CORRECT" % enmsg
self.wids['X-ray Energy'].SetLabel(enmsg)
for name, addr, val in zip(env_names, env_addrs, env_vals):
name = bytes2str(name).lower()
val = h5str(val)
if 'ring_current' in name or 'ring current' in name:
self.wids['Ring Current'].SetLabel('%s mA' % val)
elif 'beamline.fluxestimate' in name or 'transmitted flux' in name:
i0vals['flux'] = val
elif 'i0 current' in name:
i0vals['current'] = val
elif name.startswith('sample'):
name = name.replace('samplestage.', '')
if 'coarsex' in name or 'coarse x' in name:
self.wids['Sample Stage X'].SetLabel('%s mm' % val)
elif 'coarsey' in name or 'coarse y' in name:
self.wids['Sample Stage Y'].SetLabel('%s mm' % val)
elif 'coarsez' in name or 'coarse z' in name:
self.wids['Sample Stage Z'].SetLabel('%s mm' % val)
elif 'theta' in name:
self.wids['Sample Stage Theta'].SetLabel('%s deg' % val)
elif 'finex' in name or 'fine x' in name:
fines['X'] = val
elif 'finey' in name or 'fine y' in name:
fines['Y'] = val
if i0vals['current'] == '?':
i0val = 'Flux=%(flux)s Hz' % i0vals
else:
i0val = u'Flux=%(flux)s Hz, I0 Current=%(current)s \u03BCA' % i0vals
self.wids['X-ray Intensity (I0)'].SetLabel(i0val)
self.wids['Sample Fine Stages'].SetLabel('X, Y = %(X)s, %(Y)s mm' % (fines))
folderpath = bytes2str(xrmmap.attrs.get('Map_Folder',''))
if len(folderpath) > 35:
folderpath = '...'+folderpath[-35:]
self.wids['Original data path'].SetLabel(folderpath)
self.wids['XRD Calibration'].SetLabel('')
xrd_calibration = ''
if 'xrd1d' in xrmmap:
xrd_calibration = bytes2str(xrmmap['xrd1d'].attrs.get('calfile',''))
if not os.path.exists(xrd_calibration):
xrd_calibration = ''
self.wids['XRD Calibration'].SetLabel(os.path.split(xrd_calibration)[-1])
notes = {}
config_grp = ensure_subgroup('config',xrmmap)
notes_grp = ensure_subgroup('notes',config_grp)
for key in notes_grp.attrs.keys():
try:
notes[key] = bytes2str(notes_grp.attrs[key])
except:
pass
note_title = ['Facility','Run Cycle','Proposal Number','User group']
note_str = ['','','','']
if 'beamline' in notes and 'facility' in notes:
note_str[0] = '%s @ %s' % (notes['beamline'],notes['facility'])
if 'run' in notes:
note_str[1] = notes['run']
if 'proposal' in notes:
note_str[2] = notes['proposal']
if 'user' in notes:
note_str[3] = notes['user']
for title,note in zip(note_title,note_str):
self.wids[title].SetLabel(note)
xrmfile.reset_flags()
if xrmfile.has_xrf:
if xrmfile.has_xrd2d and xrmfile.has_xrd1d:
datastr = 'XRF, 2D- and 1D-XRD data'
elif xrmfile.has_xrd2d:
datastr = 'XRF, 2D-XRD data'
elif xrmfile.has_xrd1d:
datastr = 'XRF, 1D-XRD data'
else:
datastr = 'XRF data'
else:
if xrmfile.has_xrd2d and xrmfile.has_xrd1d:
datastr = '2D- and 1D-XRD data'
elif xrmfile.has_xrd2d:
datastr = '2D-XRD data'
elif xrmfile.has_xrd1d:
datastr = '1D-XRD data'
else:
datastr = ''
self.wids['Map Data'].SetLabel(datastr)
def onClose(self):
pass
class MapAreaPanel(scrolled.ScrolledPanel):
label = 'Map Areas'
delstr = """ Delete Area '%s'?
WARNING: This cannot be undone!
"""
def __init__(self, parent, owner=None, **kws):
scrolled.ScrolledPanel.__init__(self, parent, -1,
style=wx.GROW|wx.TAB_TRAVERSAL, **kws)
######################################
## GENERAL MAP AREAS
self.owner = owner
pane = wx.Panel(self)
sizer = wx.GridBagSizer(3, 3)
self.choices = {}
bsize = (CWID, -1)
self.choice = Choice(pane, size=(225, -1), action=self.onSelect)
self.desc = wx.TextCtrl(pane, -1, '', size=(225, -1))
self.info1 = wx.StaticText(pane, -1, '', size=(275, -1))
self.info2 = wx.StaticText(pane, -1, '', size=(275, -1))
self.onmap = Button(pane, 'Show on Map', size=bsize, action=self.onShow)
self.clear = Button(pane, 'Clear Map', size=bsize, action=self.onClear)
self.bdelete = Button(pane, 'Delete', size=bsize, action=self.onDelete)
self.update = Button(pane, 'Apply', size=bsize, action=self.onLabel)
self.bexport = Button(pane, 'Export Areas', size=bsize, action=self.onExport)
self.bimport = Button(pane, 'Import Areas', size=bsize, action=self.onImport)
self.bcopy = Button(pane, 'Copy to Other Maps', size=bsize, action=self.onCopy)
self.xrf = Button(pane, 'Show XRF (Fore)', size=bsize, action=self.onXRF)
self.xrf2 = Button(pane, 'Show XRF (Back)', size=bsize,
action=partial(self.onXRF, as_mca2=True))
self.onstats = Button(pane, 'Calculate XRF Stats', size=bsize,
action=self.onShowStats)
self.onreport = Button(pane, 'Save XRF Stats', size=bsize,
action=self.onReport)
self.xrd1d_plot = Button(pane, 'Show 1D XRD', size=bsize,
action=partial(self.onXRD, show=True, xrd1d=True))
self.xrd2d_plot = Button(pane, 'Show 2D XRD', size=bsize,
action=partial(self.onXRD, show=True, xrd2d=True))
legend = wx.StaticText(pane, -1, 'Values in Counts per second', size=(200, -1))
def txt(s):
return SimpleText(pane, s)
irow = 1
sizer.Add(txt('Map Areas and Saved Points'), ( 0, 0), (1, 5), ALL_CEN, 2)
sizer.Add(txt('Area: '), (irow, 0), (1, 1), ALL_LEFT, 2)
sizer.Add(self.choice, (irow, 1), (1, 2), ALL_LEFT, 2)
sizer.Add(self.bdelete, (irow, 3), (1, 1), ALL_LEFT, 2)
irow += 1
sizer.Add(txt('Info: '), (irow, 0), (1, 1), ALL_LEFT, 2)
sizer.Add(self.info1, (irow, 1), (1, 2), ALL_LEFT, 2)
sizer.Add(self.info2, (irow, 3), (1, 2), ALL_LEFT, 2)
irow += 1
sizer.Add(txt('Rename: '), (irow, 0), (1, 1), ALL_LEFT, 2)
sizer.Add(self.desc, (irow, 1), (1, 2), ALL_LEFT, 2)
sizer.Add(self.update, (irow, 3), (1, 1), ALL_LEFT, 2)
irow += 1
sizer.Add(txt('Show: '), (irow, 0), (1, 1), ALL_LEFT, 2)
sizer.Add(self.onmap, (irow, 1), (1, 1), ALL_LEFT, 2)
sizer.Add(self.clear, (irow, 2), (1, 1), ALL_LEFT, 2)
irow += 1
sizer.Add(txt('Save: '), (irow, 0), (1, 1), ALL_LEFT, 2)
sizer.Add(self.bexport, (irow, 1), (1, 1), ALL_LEFT, 2)
sizer.Add(self.bimport, (irow, 2), (1, 1), ALL_LEFT, 2)
sizer.Add(self.bcopy, (irow, 3), (1, 1), ALL_LEFT, 2)
irow += 1
sizer.Add(txt('XRF: '), (irow, 0), (1, 1), ALL_LEFT, 2)
sizer.Add(self.xrf, (irow, 1), (1, 1), ALL_LEFT, 2)
sizer.Add(self.xrf2, (irow, 2), (1, 1), ALL_LEFT, 2)
sizer.Add(self.onstats, (irow, 3), (1, 1), ALL_LEFT, 2)
sizer.Add(self.onreport, (irow, 4), (1, 1), ALL_LEFT, 2)
irow += 1
sizer.Add(txt('XRD: '), (irow, 0), (1, 1), ALL_LEFT, 2)
sizer.Add(self.xrd1d_plot, (irow, 1), (1, 1), ALL_LEFT, 2)
sizer.Add(self.xrd2d_plot, (irow, 2), (1, 1), ALL_LEFT, 2)
# sizer.Add(self.xrd1d_save, (irow, 0), (1, 2), ALL_LEFT, 2)
# sizer.Add(self.xrd2d_save, (irow, 2), (1, 2), ALL_LEFT, 2)
irow += 1
sizer.Add(legend, (irow, 1), (1, 2), ALL_LEFT, 2)
pack(pane, sizer)
for btn in (self.xrd1d_plot, self.xrd2d_plot):
btn.Disable()
# main sizer
msizer = wx.BoxSizer(wx.VERTICAL)
msizer.Add(pane, 0, wx.ALIGN_LEFT|wx.ALL, 1)
msizer.Add(wx.StaticLine(self, size=(375, 2), style=wx.LI_HORIZONTAL),
0, wx.EXPAND|wx.ALL, 1)
self.report = None
rep = self.report = dv.DataViewListCtrl(self, style=DVSTY)
rep.AppendTextColumn('ROI ', width=100)
rep.AppendTextColumn('Min', width=75)
rep.AppendTextColumn('Max', width=75)
rep.AppendTextColumn('Mean ', width=75)
rep.AppendTextColumn('Sigma', width=75)
rep.AppendTextColumn('Median', width=75)
rep.AppendTextColumn('Mode', width=75)
for col in range(7):
align = wx.ALIGN_RIGHT
if col == 0: align = wx.ALIGN_LEFT
rep.Columns[col].Sortable = False
rep.Columns[col].Renderer.Alignment = align
rep.Columns[col].Alignment = align
rep.SetMinSize((590, 300))
msizer.Add(rep, 1, wx.ALIGN_LEFT|wx.ALL, 1)
pack(self, msizer)
self.SetupScrolling()
def onCopy(self, event=None):
xrmfile = self.owner.current_file
xrmmap = xrmfile.xrmmap
print("Copy Area : shape", xrmfile, xrmmap.shape)
def show_stats(self):
# self.stats = self.xrmfile.get_area_stats(self.areaname)
if self.report is None:
return
self.report.DeleteAllItems()
self.report_data = []
def report_info(dname,d):
try:
hmean, gmean = stats.gmean(d), stats.hmean(d)
skew, kurtosis = stats.skew(d), stats.kurtosis(d)
except ValueError:
hmean, gmean, skew, kurtosis = 0, 0, 0, 0
smode = '--'
fmt = '{:,.1f}'.format # use thousands commas, 1 decimal place
mode = stats.mode(d)
if len(mode) > 0:
mode = mode[0]
if len(mode) > 0:
smode = fmt(mode[0])
dat = (dname, fmt(d.min()), fmt(d.max()), fmt(d.mean()),
fmt(d.std()), fmt(np.median(d)), smode)
self.report_data.append(dat)
self.report.AppendItem(dat)
areaname = self._getarea()
xrmfile = self.owner.current_file
xrmmap = xrmfile.xrmmap
ctime = xrmfile.pixeltime
area = xrmfile.get_area(name=areaname)
amask = area[()]
def match_mask_shape(det, mask):
if mask.shape[1] == det.shape[1] - 2: # hotcols
det = det[:,1:-1]
if mask.shape[0] < det.shape[0]:
det = det[:mask.shape[0]]
return det[mask]
if 'roistats' in area.attrs:
for dat in json.loads(area.attrs.get('roistats','')):
dat = tuple(dat)
self.report_data.append(dat)
self.report.AppendItem(dat)
self.choice.Enable()
return
version = xrmmap.attrs.get('Version','1.0.0')
if version_ge(version, '2.0.0'):
d_pref = 'mca'
d_scas = [d for d in xrmmap['scalars']]
detnames = ["%s%i" % (d_pref, i) for i in range(1, xrmfile.nmca+1)]
d_rois = xrmfile.get_roi_list(detnames[0])
else:
d_addrs = [d.lower() for d in xrmmap['roimap/det_address']]
d_names = [d for d in xrmmap['roimap/det_name']]
d_pref = 'det'
for i in range(1, xrmfile.nmca+1):
tname = '%s%i/realtime' % (d_pref, i)
rtime = xrmmap[tname][()]
if amask.shape[1] == rtime.shape[1] - 2: # hotcols
rtime = rtime[:,1:-1]
if version_ge(version, '2.0.0'):
for scalar in d_scas:
d = xrmmap['scalars'][scalar][()]
d = match_mask_shape(d, amask)
report_info(scalar, d/ctime)
for roi in d_rois:
for det in detnames:
d = xrmfile.get_roimap(roi, det=det, dtcorrect=False)
d = match_mask_shape(d, amask)
report_info('%s (%s)' % (roi, det), d/ctime)
else:
for idet, dname in enumerate(d_names):
try:
daddr = h5str(d_addrs[idet])
except IndexError:
break
if 'mca' in daddr:
det = 1
words = daddr.split('mca')
if len(words) > 1:
det = int(words[1].split('.')[0])
d = xrmmap['roimap/det_raw'][:,:,idet]
d = match_mask_shape(d, amask)
report_info(dname, d/ctime)
if 'roistats' not in area.attrs:
area.attrs['roistats'] = json.dumps(self.report_data)
xrmfile.h5root.flush()
def update_xrmmap(self, xrmfile=None, set_detectors=None):
if xrmfile is None: xrmfile = self.owner.current_file
xrmmap = xrmfile.xrmmap
self.set_area_choices(xrmmap, show_last=True)
self.set_enabled_btns(xrmfile=xrmfile)
self.report.DeleteAllItems()
self.report_data = []
try:
self.onSelect()
except:
pass
def set_enabled_btns(self, xrmfile=None):
if xrmfile is None:
xrmfile = self.owner.current_file
xrmfile.reset_flags()
self.xrd2d_plot.Enable(xrmfile.has_xrd1d)
self.xrd1d_plot.Enable(xrmfile.has_xrd1d)
def clear_area_choices(self):
self.info1.SetLabel('')
self.info2.SetLabel('')
self.desc.SetValue('')
self.choice.Clear()
def set_area_choices(self, xrmmap, show_last=False):
self.clear_area_choices()
areas = xrmmap['areas']
c = self.choice
c.Clear()
self.choices = {}
choice_labels = []
for a in areas:
desc = bytes2str(areas[a].attrs.get('description', a))
self.choices[desc] = a
choice_labels.append(desc)
c.AppendItems(choice_labels)
this_label = ''
if len(self.choices) > 0:
idx = 0
if show_last:
idx = len(self.choices)-1
try:
this_label = choice_labels[idx]
except:
return
c.SetStringSelection(this_label)
self.desc.SetValue(this_label)
def onReport(self, event=None):
aname = self._getarea()
path, fname = os.path.split(self.owner.current_file.filename)
deffile = '%s_%s' % (fname, aname)
deffile = deffile.replace('.', '_') + '.dat'
outfile = FileSave(self, 'Save Area XRF Statistics File',
default_file=deffile,
wildcard=FILE_WILDCARDS)
if outfile is None:
return
area = self.owner.current_file.xrmmap['areas/%s' % aname]
npix = area[()].sum()
pixtime = self.owner.current_file.pixeltime
mca = self.owner.current_file.get_mca_area(aname)
dtime = mca.real_time
info_fmt = '%i Pixels, %i ms/pixel, %.3f total seconds'
buff = ['# Map %s, Area %s' % (self.owner.current_file.filename, aname),
'# %i Pixels' % npix,
'# %i ms per pixel' % int(round(1000.0*pixtime)),
'# %.3f total seconds' % dtime,
'# Time (TSCALER) in ms',
'# All other values in counts per second',
'#----------------------------------',
'# ROI Min Max Mean Sigma Median Mode']
for dat in self.report_data:
buff.append(' '.join(dat))
buff.append('')
try:
fout = open(outfile, 'w')
fout.write('\n'.join(buff))
fout.close()
except IOError:
print('could not write %s' % outfile)
def _getarea(self):
return self.choices[self.choice.GetStringSelection()]
def onExport(self, event=None):
ofile = self.owner.current_file.export_areas()
self.owner.message('Exported Areas to %s' % ofile)
def onImport(self, event=None):
wildcards = 'Area Files (*_Areas.npz)|*_Areas.npz|All files (*.*)|*.*'
dlg = wx.FileDialog(self, message='Read Areas File',
defaultDir=os.getcwd(),
wildcard=wildcards, style=wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
fname = dlg.GetPath().replace('\\', '/')
self.owner.current_file.import_areas(fname)
self.owner.message('Imported Areas from %s' % fname)
self.set_area_choices(self.owner.current_file.xrmmap)
self.onSelect()
def onSelect(self, event=None):
try:
aname = self._getarea()
except:
return
area = self.owner.current_file.xrmmap['areas/%s' % aname]
npix = area[()].sum()
yvals, xvals = np.where(area[()])
pixtime = self.owner.current_file.pixeltime
dtime = npix*pixtime
info1_fmt = '%i Pixels, %.3f seconds'
info2_fmt = ' Range (pixels) X: [%i:%i], Y: [%i:%i] '
self.info1.SetLabel(info1_fmt % (npix, dtime))
self.info2.SetLabel(info2_fmt % (xvals.min(), xvals.max(),
yvals.min(), yvals.max()))
self.desc.SetValue(area.attrs.get('description', aname))
self.report.DeleteAllItems()
self.report_data = []
if 'roistats' in area.attrs:
self.show_stats()
def onShowStats(self, event=None):
if self.report is None:
return
self.show_stats()
def onLabel(self, event=None):
aname = self._getarea()
area = self.owner.current_file.xrmmap['areas/%s' % aname]
new_label = str(self.desc.GetValue())
area.attrs['description'] = new_label
self.owner.current_file.h5root.flush()
self.set_area_choices(self.owner.current_file.xrmmap)
self.choice.SetStringSelection(new_label)
self.desc.SetValue(new_label)
def onShow(self, event=None):
aname = self._getarea()
area = self.owner.current_file.xrmmap['areas'][aname]
label = bytes2str(area.attrs.get('description', aname))
if len(self.owner.tomo_displays) > 0:
imd = self.owner.tomo_displays[-1]
try:
imd.add_highlight_area(area[()], label=label)
except:
pass
if len(self.owner.im_displays) > 0:
imd = self.owner.im_displays[-1]
h, w = self.owner.current_file.get_shape()
highlight = np.zeros((h, w))
highlight[np.where(area[()])] = 1
imd.panel.add_highlight_area(highlight, label=label)
def onDelete(self, event=None):
aname = self._getarea()
erase = (wx.ID_YES == Popup(self.owner, self.delstr % aname,
'Delete Area?', style=wx.YES_NO))
if erase:
xrmmap = self.owner.current_file.xrmmap
del xrmmap['areas/%s' % aname]
self.set_area_choices(xrmmap)
self.onSelect()
def onClear(self, event=None):
if len(self.owner.im_displays) > 0:
imd = self.owner.im_displays[-1]
try:
for area in imd.panel.conf.highlight_areas:
for w in area.collections + area.labelTexts:
w.remove()
imd.panel.conf.highlight_areas = []
imd.panel.redraw()
except:
pass
if len(self.owner.tomo_displays) > 0:
imd = self.owner.tomo_displays[-1]
try:
imd.clear_highlight_area()
except:
pass
def onXRF(self, event=None, as_mca2=False):
aname = self._getarea()
xrmfile = self.owner.current_file
area = xrmfile.xrmmap['areas/%s' % aname]
label = bytes2str(area.attrs.get('description', aname))
self._mca = None
self.owner.message("Getting XRF Spectra for area '%s'..." % aname)
def _getmca_area(aname):
o = self.owner
self._mca = o.current_file.get_mca_area(aname,
dtcorrect=o.dtcor)
mca_thread = Thread(target=_getmca_area, args=(aname,))
mca_thread.start()
self.owner.show_XRFDisplay()
mca_thread.join()
pref, fname = os.path.split(self.owner.current_file.filename)
npix = area[()].sum()
self._mca.filename = fname
self._mca.title = label
self._mca.npixels = npix
self.owner.message("Plotting XRF Spectra for area '%s'..." % aname)
self.owner.xrfdisplay.add_mca(self._mca, label="%s:%s" % (fname, label),
plot=not as_mca2)
if as_mca2:
self.owner.xrfdisplay.swap_mcas()
def onXRD(self, event=None, save=False, show=False,
xrd1d=False, xrd2d=False, verbose=True):
try:
aname = self._getarea()
xrmfile = self.owner.current_file
area = xrmfile.xrmmap['areas/%s' % aname]
title = area.attrs.get('description', aname)
env_names = list(xrmfile.xrmmap['config/environ/name'])
env_vals = list(xrmfile.xrmmap['config/environ/value'])
for name, val in zip(env_names, env_vals):
if 'mono.energy' in str(name).lower():
energy = float(val)/1000.
except:
if verbose:
print('No map file and/or areas specified.')
return
xrmfile.reset_flags()
if not xrmfile.has_xrd1d and not xrmfile.has_xrd2d:
if verbose:
print('No XRD data in map file: %s' % self.owner.current_file.filename)
return
ponifile = bytes2str(xrmfile.xrmmap['xrd1d'].attrs.get('calfile',''))
ponifile = ponifile if os.path.exists(ponifile) else None
if show:
self.owner.message('Plotting XRD pattern for \'%s\'...' % title)
if save:
self.owner.message('Saving XRD pattern for \'%s\'...' % title)
path,stem = os.path.split(self.owner.current_file.filename)
stem = '%s_%s' % (stem,title)
kwargs = dict(filename=self.owner.current_file.filename,
npixels = area[()].sum(),
energy = 0.001*xrmfile.get_incident_energy(),
calfile = ponifile, title = title, xrd2d=False)
if xrd1d and xrmfile.has_xrd1d:
self._xrd = xrmfile.get_xrd1d_area(aname, **kwargs)
if show:
label = '%s: %s' % (os.path.split(self._xrd.filename)[-1], title)
self.owner.display_xrd1d(self._xrd.data1D, self._xrd.q,
self._xrd.energy, label=label)
if save:
wildcards = '1D XRD file (*.xy)|*.xy|All files (*.*)|*.*'
dlg = wx.FileDialog(self, 'Save file as...',
defaultDir=os.getcwd(),
defaultFile='%s.xy' % stem,
wildcard=wildcards,
style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath().replace('\\', '/')
dlg.Destroy()
print('\nSaving 1D XRD in file: %s' % (filename))
save1D(filename, self._xrd.data1D[0], self._xrd.data1D[1], calfile=ponifile)
## turns off flag since it has already been displayed/saved
xrd1d = False
if xrd2d:
print("Looking for 2D XRD Data")
try:
_xrd = xrmfile.get_xrd2d_area(aname, **kwargs)
except:
_xrd = None
if _xrd is None:
print("no 2D XRD Data")
return
label = '%s: %s' % (os.path.split(_xrd.filename)[-1], title)
self.owner.display_2Dxrd(_xrd.data2D, label=label, xrmfile=xrmfile)
wildcards = '2D XRD file (*.tiff)|*.tif;*.tiff;*.edf|All files (*.*)|*.*'
fname = xrmfile.filename + '_' + aname
dlg = wx.FileDialog(self, 'Save file as...',
defaultDir=os.getcwd(),
defaultFile='%s.tiff' % fname,
wildcard=wildcards,
style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
if dlg.ShowModal() == wx.ID_OK:
filename = os.path.abspath(dlg.GetPath().replace('\\', '/'))
_xrd.save_2D(file=filename, verbose=True)
dlg.Destroy()
class MapViewerFrame(wx.Frame):
cursor_menulabels = {'lasso': ('Select Points for XRF Spectra\tCtrl+X',
'Left-Drag to select points for XRF Spectra')}
def __init__(self, parent=None, filename=None, _larch=None,
use_scandb=False, version_info=None,
size=(925, 650), **kwds):
kwds['style'] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, parent, -1, size=size, **kwds)
self.data = None
self.use_scandb = use_scandb
self.filemap = {}
self.im_displays = []
self.tomo_displays = []
self.plot_displays = []
self.current_file = None
self.larch_buffer = parent
if not isinstance(parent, LarchFrame):
self.larch_buffer = LarchFrame(_larch=_larch, is_standalone=False, with_raise=False)
self.larch = self.larch_buffer.larchshell
self.xrfdisplay = None
self.xrddisplay1D = None
self.xrddisplay2D = None
self.watch_files = False
self.files_in_progress = []
# self.hotcols = False
self.dtcor = True
self.showxrd = False
self.SetTitle('GSE XRM MapViewer')
self.createMainPanel()
self.SetFont(Font(FONTSIZE))
self.createMenus()
self.statusbar = self.CreateStatusBar(2, 0)
self.statusbar.SetStatusWidths([-3, -1])
statusbar_fields = ['Initializing....', ' ']
for i in range(len(statusbar_fields)):
self.statusbar.SetStatusText(statusbar_fields[i], i)
self.htimer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.onTimer, self.htimer)
self.h5convert_done = True
self.h5convert_irow = 0
self.h5convert_nrow = 0
read_workdir('gsemap.dat')
w0, h0 = self.GetSize()
w1, h1 = self.GetBestSize()
self.SetSize((max(w0, w1)+5, max(h0, h1)+5))
self.SetMinSize((500, 300))
self.Show()
self.scandb = None
self.instdb = None
self.inst_name = None
self.move_callback = None
if version_info is not None:
if version_info.update_available:
self.onCheckforUpdates()
if filename is not None:
wx.CallAfter(self.onRead, filename)
def CloseFile(self, filename, event=None):
if filename in self.filemap:
self.filemap[filename].close()
self.filemap.pop(filename)
def createMainPanel(self):
splitter = wx.SplitterWindow(self, style=wx.SP_LIVE_UPDATE)
splitter.SetMinimumPaneSize(250)
self.filelist = EditableListBox(splitter, self.ShowFile,
remove_action=self.CloseFile,
size=(250, -1))
dpanel = self.detailspanel = wx.Panel(splitter)
self.createNBPanels(dpanel)
splitter.SplitVertically(self.filelist, self.detailspanel, 1)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(splitter, 1, wx.GROW|wx.ALL, 5)
pack(self, sizer)
fico = os.path.join(icondir, XRF_ICON_FILE)
try:
self.SetIcon(wx.Icon(fico, wx.BITMAP_TYPE_ICO))
except:
pass
self.Raise()
wx.CallAfter(self.init_larch)
def createNBPanels(self, parent):
self.title = SimpleText(parent, 'initializing...', size=(680, -1))
self.SetBackgroundColour('#F0F0E8')
nbpanels = OrderedDict()
for panel in (MapPanel, MapInfoPanel, MapAreaPanel, MapMathPanel,
TomographyPanel, XRFAnalysisPanel):
nbpanels[panel.label] = panel
self.nb = flatnotebook(parent, nbpanels, panelkws={'owner':self},
on_change=self.onNBChanged)
self.roimap_panel = self.nb.GetPage(0)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.title, 0, ALL_CEN)
sizer.Add(self.nb, 1, wx.ALL|wx.EXPAND)
parent.SetSize((700, 400))
pack(parent, sizer)
def onNBChanged(self, event=None):
cb = getattr(self.nb.GetCurrentPage(), 'update_xrmmap', None)
if callable(cb):
cb()
def get_mca_area(self, mask, xoff=0, yoff=0, det=None, xrmfile=None):
if xrmfile is None:
xrmfile = self.current_file
aname = xrmfile.add_area(mask)
self.sel_mca = xrmfile.get_mca_area(aname, det=det)
def lassoHandler(self, mask=None, xrmfile=None, xoff=0, yoff=0,
det=None, **kws):
if xrmfile is None:
xrmfile = self.current_file
ny, nx = xrmfile.get_shape()
if mask.sum() < 1:
return
if (xoff>0 or yoff>0) or mask.shape != (ny, nx):
if mask.shape == (nx, ny): ## sinogram
mask = np.swapaxes(mask,0,1)
# elif mask.shape == (ny, ny) or mask.shape == (nx, nx): ## tomograph
# tomo = True
else:
ym, xm = mask.shape
tmask = np.zeros((ny, nx)).astype(bool)
xmax = min(nx, xm+xoff)
for iy in range(ym):
if iy+yoff < ny:
tmask[iy+yoff, xoff:xmax] = mask[iy]
mask = tmask
kwargs = dict(xrmfile=xrmfile, xoff=xoff, yoff=yoff, det=det)
mca_thread = Thread(target=self.get_mca_area,
args=(mask,), kwargs=kwargs)
mca_thread.start()
self.show_XRFDisplay()
mca_thread.join()
if hasattr(self, 'sel_mca'):
path, fname = os.path.split(xrmfile.filename)
aname = self.sel_mca.areaname
area = xrmfile.xrmmap['areas/%s' % aname]
npix = area[()].sum()
self.sel_mca.filename = fname
self.sel_mca.title = aname
self.sel_mca.npixels = npix
self.xrfdisplay.add_mca(self.sel_mca, label='%s:%s'% (fname, aname),
plot=True)
update_xrmmap = getattr(self.nb.GetCurrentPage(), 'update_xrmmap', None)
if callable(update_xrmmap):
update_xrmmap(xrmfile=self.current_file)
if self.showxrd:
for page in self.nb.pagelist:
if hasattr(page, 'onXRD'):
page.onXRD(show=True, xrd1d=True,verbose=False)
def show_XRFDisplay(self, do_raise=True, clear=True, xrmfile=None):
'make sure XRF plot frame is enabled and visible'
if xrmfile is None:
xrmfile = self.current_file
if self.xrfdisplay is None:
self.xrfdisplay = XRFDisplayFrame(parent=self.larch_buffer,
_larch=self.larch)
try:
self.xrfdisplay.Show()
except:
self.xrfdisplay = XRFDisplayFrame(parent=self.larch_buffer,
_larch=self.larch)
self.xrfdisplay.Show()
if do_raise:
self.xrfdisplay.Raise()
if clear:
self.xrfdisplay.panel.clear()
self.xrfdisplay.panel.reset_config()
def onMoveToPixel(self, xval, yval):
if not HAS_EPICS:
return
xrmmap = self.current_file.xrmmap
pos_addrs = [str(x) for x in xrmmap['config/positioners'].keys()]
pos_label = [str(x[()]) for x in xrmmap['config/positioners'].values()]
pos1 = h5str(xrmmap['config/scan/pos1'][()])
pos2 = h5str(xrmmap['config/scan/pos2'][()])
i1 = pos_addrs.index(pos1)
i2 = pos_addrs.index(pos2)
msg = '%s(%s) = %.4f, %s(%s) = %.4f?' % (pos_label[i1], pos_addrs[i1], xval,
pos_label[i2], pos_addrs[i2], yval)
if (wx.ID_YES == Popup(self, 'Really move stages to\n %s?' % msg,
'move stages to pixel?', style=wx.YES_NO)):
caput(pos_addrs[i1], xval)
caput(pos_addrs[i2], yval)
def onSavePixel(self, name, ix, iy, x=None, y=None, title=None, xrmfile=None):
'save pixel as area, and perhaps to scandb'
if x is None:
x = float(xrmfile.get_pos(0, mean=True)[ix])
if y is None:
y = float(xrmfile.get_pos(1, mean=True)[iy])
if len(name) < 1:
return
if xrmfile is None:
xrmfile = self.current_file
# first, create 1-pixel mask for area, and save that
ny, nx = xrmfile.get_shape()
tmask = np.zeros((ny, nx)).astype(bool)
tmask[int(iy), int(ix)] = True
xrmfile.add_area(tmask, name=name)
# for page in self.nb.pagelist:
# if hasattr(page, 'update_xrmmap'):
# page.update_xrmmap(xrmfile=xrmfile)
update_xrmmap = getattr(self.nb.GetCurrentPage(), 'update_xrmmap', None)
if callable(update_xrmmap):
update_xrmmap(xrmfile=xrmfile)
# show position on map
self.im_displays[-1].panel.add_highlight_area(tmask, label=name)
# make sure we can save position into database
if self.scandb is None or self.instdb is None:
return
samplestage = self.instdb.get_instrument(self.inst_name)
if samplestage is None:
return
allpvs = [pv.name for pv in samplestage.pv]
pvn = pv_fullname
conf = xrmfile.xrmmap['config']
pos_addrs = [pvn(h5str(tval)) for tval in conf['positioners']]
env_addrs = [pvn(h5str(tval)) for tval in conf['environ/address']]
env_vals = [h5str(tval) for tval in conf['environ/value']]
position = {}
for pv in allpvs:
position[pv] = None
for addr, val in zip(env_addrs, env_vals):
if addr in allpvs:
position[addr] = float(val)
position[pvn(h5str(conf['scan/pos1'][()]))] = x
position[pvn(h5str(conf['scan/pos2'][()]))] = y
notes = {'source': '%s: %s' % (xrmfile.filename, name)}
self.instdb.save_position(self.inst_name, name, position,
notes=json.dumps(notes))
def add_tomodisplay(self, title, det=None, _lassocallback=True):
if _lassocallback:
lasso_cb = partial(self.lassoHandler, det=det)
else:
lasso_cb = None
imframe = MapImageFrame(output_title=title,
lasso_callback=lasso_cb)
self.tomo_displays.append(imframe)
def display_tomo(self, tomo, title='', info='', x=None, y=None, xoff=0,
yoff=0, det=None, subtitles=None, xrmfile=None,
_lassocallback=True):
displayed = False
if _lassocallback:
lasso_cb = partial(self.lassoHandler, det=det, xrmfile=xrmfile)
else:
lasso_cb = None
while not displayed:
try:
tmd = self.tomo_displays.pop()
clevel = tmd.panel.conf.contrast_level
if clevel in (0, None):
clevel = 0.5
tmd.display(tomo, title=title, subtitles=subtitles,
contrast_level=clevel)
tmd.lasso_callback = lasso_cb
displayed = True
except IndexError:
tmd = MapImageFrame(output_title=title,
lasso_callback=lasso_cb)
tmd.display(tomo, title=title, subtitles=subtitles,
contrast_level=0.5)
displayed = True
except:
displayed = False
self.tomo_displays.append(tmd)
tmd.SetStatusText(info, 1)
tmd.Show()
tmd.Raise()
def add_imdisplay(self, title, det=None):
imd = MapImageFrame(output_title=title,
lasso_callback=partial(self.lassoHandler, det=det),
cursor_labels=self.cursor_menulabels,
save_callback=self.onSavePixel)
self.im_displays.append(imd)
return imd
def display_map(self, map, title='', info='', x=None, y=None, xoff=0, yoff=0,
det=None, subtitles=None, xrmfile=None, with_savepos=True):
"""display a map in an available image display"""
if xrmfile is None:
hotcols = False
else:
hotcols = xrmfile.hotcols
if x is not None:
zigzag = abs(xrmfile.zigzag)
if zigzag != 0:
x = x[zigzag:-zigzag]
elif hotcols and map.shape[1] != x.shape[0]:
x = x[1:-1]
dopts = dict(title=title, x=x, y=y, xoff=xoff, yoff=yoff,
det=det, subtitles=subtitles,
xrmfile=xrmfile, with_savepos=with_savepos)
displayed = False
while not displayed:
if 'contrast_level' not in dopts:
dopts['contrast_level'] = 0.5
if len(self.im_displays) == 0:
imd = self.add_imdisplay(title=title, det=det)
imd.display(map, **dopts)
else:
try:
imd = self.im_displays[-1]
if imd.panel.conf.contrast_level not in (0, None):
dopts['contrast_level'] = imd.panel.conf.contrast_level
imd.display(map, **dopts)
displayed = True
except IndexError:
pass
except:
self.im_displays.pop()
imd.SetStatusText(info, 1)
imd.Show()
imd.Raise()
def display_2Dxrd(self, map, label='image 0', xrmfile=None, flip=True):
'''
displays 2D XRD pattern in diFFit viewer
'''
flptyp = 'vertical' if flip is True else False
poni = bytes2str(self.current_file.xrmmap['xrd1d'].attrs.get('calfile',''))
if not os.path.exists(poni):
poni = None
if self.xrddisplay2D is None:
self.xrddisplay2D = XRD2DViewerFrame(_larch=self.larch,flip=flptyp,
xrd1Dviewer=self.xrddisplay1D,
ponifile=poni)
try:
self.xrddisplay2D.plot2Dxrd(label,map)
except:
self.xrddisplay2D = XRD2DViewerFrame(_larch=self.larch,flip=flptyp,
xrd1Dviewer=self.xrddisplay1D)
self.xrddisplay2D.plot2Dxrd(label,map)
self.xrddisplay2D.Show()
def display_xrd1d(self, counts, q, energy, label='dataset 0', xrmfile=None):
'''
displays 1D XRD pattern in diFFit viewer
'''
wavelength = lambda_from_E(energy, E_units='keV')
xdat = xrd1d(label=label, energy=energy, wavelength=wavelength)
xdat.set_xy_data(np.array([q, counts]), 'q')
if self.xrddisplay1D is None:
self.xrddisplay1D = XRD1DViewerFrame(_larch=self.larch)
try:
self.xrddisplay1D.xrd1Dviewer.add1Ddata(xdat)
self.xrddisplay1D.Show()
except:
self.xrddisplay1D = XRD1DViewerFrame(_larch=self.larch)
self.xrddisplay1D.xrd1Dviewer.add1Ddata(xdat)
self.xrddisplay1D.Show()
def init_larch(self):
self.SetStatusText('ready')
self.datagroups = self.larch.symtable
if ESCAN_CRED is not None:
self.move_callback = self.onMoveToPixel
try:
self.scandb = connect_scandb(_larch=self.larch)
self.instdb = self.larch.symtable._scan._instdb
self.inst_name = self.scandb.get_info('samplestage_instrument',
default='SampleStage')
print(" ScanDB: %s, Instrument=%s" % (self.scandb.engine, self.inst_name))
except:
etype, emsg, tb = sys.exc_info()
print('Could not connect to ScanDB: %s' % (emsg))
self.scandb = self.instdb = None
wx.CallAfter(self.onFolderSelect)
def ShowFile(self, evt=None, filename=None, process_file=True, **kws):
if filename is None and evt is not None:
filename = evt.GetString()
if not self.h5convert_done or filename not in self.filemap:
return
self.current_file = self.filemap[filename]
if (self.check_ownership(filename) and
self.current_file.folder_has_newdata()):
if process_file:
mnew = self.roimap_panel.mapproc_nrows.GetStringSelection()
try:
mnew = int(mnew)
except:
mnew = None
self.process_file(filename, max_new_rows=mnew)
ny, nx = self.current_file.get_shape()
self.title.SetLabel('%s: (%i x %i)' % (filename, nx, ny))
fnames = self.filelist.GetItems()
cb = getattr(self.nb.GetCurrentPage(), 'update_xrmmap', None)
if callable(cb):
cb(xrmfile=self.current_file)
cb = getattr(self.nb.GetCurrentPage(), 'set_file_choices', None)
if callable(cb):
cb(fnames)
def createMenus(self):
self.menubar = wx.MenuBar()
fmenu = wx.Menu()
MenuItem(self, fmenu, '&Open XRM Map File\tCtrl+O', 'Read XRM Map File', self.onReadFile)
MenuItem(self, fmenu, '&Open XRM Map Folder\tCtrl+F', 'Read XRM Map Folder', self.onReadFolder)
fmenu.AppendSeparator()
MenuItem(self, fmenu, 'Change &Working Folder', 'Choose working directory',
self.onFolderSelect)
MenuItem(self, fmenu, 'Show Larch Buffer\tCtrl+L', 'Show Larch Programming Buffer',
self.onShowLarchBuffer)
# cmenu = fmenu.Append(-1, '&Watch HDF5 Files\tCtrl+W', 'Watch HDF5 Files', kind=wx.ITEM_CHECK)
# fmenu.Check(cmenu.Id, self.watch_files) ## False
# self.Bind(wx.EVT_MENU, self.onWatchFiles, id=cmenu.Id)
fmenu.AppendSeparator()
MenuItem(self, fmenu, '&Quit\tCtrl+Q',
'Quit program', self.onClose)
rmenu = wx.Menu()
MenuItem(self, rmenu, 'Define new ROI',
'Define new ROI', self.defineROI)
MenuItem(self, rmenu, 'Load ROI File for 1DXRD',
'Load ROI File for 1DXRD', self.add1DXRDFile)
rmenu.AppendSeparator()
MenuItem(self, rmenu, 'Load XRD calibration file',
'Load XRD calibration file', self.openPONI)
MenuItem(self, rmenu, 'Add 1DXRD for HDF5 file',
'Calculate 1DXRD for HDF5 file', self.add1DXRD)
# cmenu = fmenu.Append(-1, 'Display 1DXRD for areas',
# 'Display 1DXRD for areas',
# kind=wx.ITEM_CHECK)
#fmenu.Check(cmenu.Id, self.showxrd) ## False
#self.Bind(wx.EVT_MENU, self.onShow1DXRD, id=cmenu.Id)
hmenu = wx.Menu()
MenuItem(self, hmenu, 'About GSE XRM MapViewer', 'About GSE XRM MapViewer',
self.onAbout)
MenuItem(self, hmenu, 'Check for Updates', 'Check for Updates',
self.onCheckforUpdates)
self.menubar.Append(fmenu, '&File')
self.menubar.Append(rmenu, '&ROIs')
self.menubar.Append(hmenu, '&Help')
self.SetMenuBar(self.menubar)
self.Bind(wx.EVT_CLOSE, self.onClose)
def onShowLarchBuffer(self, evt=None):
if self.larch_buffer is None:
self.larch_buffer = LarchFrame(_larch=self.larch, is_standalone=False)
self.larch_buffer.Show()
self.larch_buffer.Raise()
def onFolderSelect(self, evt=None):
dlg = wx.DirDialog(self, 'Select Working Directory:',
os.getcwd(),
style=wx.DD_DIR_MUST_EXIST|wx.DD_DEFAULT_STYLE)
if dlg.ShowModal() == wx.ID_OK:
basedir = os.path.abspath(str(dlg.GetPath()))
try:
if len(basedir) > 0:
os.chdir(nativepath(basedir))
save_workdir(nativepath(basedir))
except OSError:
print( 'Changed folder failed')
pass
save_workdir('gsemap.dat')
dlg.Destroy()
def onAbout(self, event=None):
info = AboutDialogInfo()
info.SetName('GSE XRM MapViewer')
info.SetDescription('X-ray Microprobe Mapping Data Visualization and Analysis')
info.SetVersion(larch.version.__version__)
info.AddDeveloper('Matthew Newville: newville at cars.uchicago.edu')
dlg = AboutBox(info)
def onCheckforUpdates(self, event=None):
dlg = LarchUpdaterDialog(self, caller='GSE MapViewer')
dlg.Raise()
dlg.SetWindowStyle(wx.STAY_ON_TOP)
res = dlg.GetResponse()
dlg.Destroy()
if res.ok and res.run_updates:
from larch.apps import update_larch
update_larch()
self.onClose(evt=event, prompt=False)
def onClose(self, evt=None, prompt=True):
if prompt:
dlg = wx.MessageDialog(None, 'Really Quit?', 'Question',
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
ret = dlg.ShowModal()
if ret != wx.ID_YES:
return
save_workdir('gsemap.dat')
for xrmfile in self.filemap.values():
try:
xrmfile.close()
except KeyError:
pass
## Closes maps, 2D XRD image
for disp in self.im_displays + self.plot_displays + self.tomo_displays:
try:
disp.Destroy()
except:
pass
try:
self.xrfdisplay.Destroy()
except:
pass
try:
self.xrddisplay1D.Destroy()
except:
pass
try:
self.xrddisplay2D.Destroy()
except:
pass
wx.CallAfter(self.larch.symtable._plotter.close_all_displays)
if self.larch_buffer is not None:
try:
self.larch_buffer.Show()
self.larch_buffer.onExit(force=True)
except:
pass
self.Destroy()
def onReadFile(self, evt=None):
if not self.h5convert_done:
print('cannot open file while processing a map folder')
return
dlg = wx.FileDialog(self, message='Read XRM Map File',
defaultDir=os.getcwd(),
wildcard=FILE_WILDCARDS,
style=wx.FD_OPEN|wx.FD_MULTIPLE)
path, read = None, False
if dlg.ShowModal() == wx.ID_OK:
read = True
paths = [p.replace('\\', '/') for p in dlg.GetPaths()]
dlg.Destroy()
if not read:
return
for path in paths:
parent, fname = os.path.split(path)
read = True
if fname in self.filemap:
read = (wx.ID_YES == Popup(self, "Re-read file '%s'?" % path,
'Re-read file?', style=wx.YES_NO))
if read:
xrmfile = GSEXRM_MapFile(filename=str(path), scandb=self.scandb)
self.add_xrmfile(xrmfile)
def onRead(self, path):
"simple Read and install XRM Map File"
xrmfile = GSEXRM_MapFile(filename=str(path), scandb=self.scandb)
self.add_xrmfile(xrmfile)
def onReadFolder(self, evt=None):
if not self.h5convert_done:
print( 'cannot open file while processing a map folder')
return
dlg = wx.DirDialog(self, message='Read XRM Map Folder',
defaultPath=os.getcwd(),
style=wx.DD_DIR_MUST_EXIST|wx.DD_DEFAULT_STYLE)
if dlg.ShowModal() == wx.ID_OK:
folder = os.path.abspath(dlg.GetPath())
dlg.Destroy()
xrmfile = GSEXRM_MapFile(folder=folder, scandb=self.scandb)
self.add_xrmfile(xrmfile)
def add_xrmfile(self, xrmfile):
parent, fname = os.path.split(xrmfile.filename)
# print("Add XRM File ", fname)
# look for group with this name or for next available group
for i in range(1000):
gname = 'map%3.3i' % (i+1)
xgroup = getattr(self.datagroups, gname, None)
if xgroup is None:
break
gpar, gfname = os.path.split(xgroup.filename)
if gfname == fname:
break
setattr(self.datagroups, gname, xrmfile)
xrmfile.groupname = gname
if fname not in self.filemap:
self.filemap[fname] = xrmfile
if fname not in self.filelist.GetItems():
self.filelist.Append(fname)
self.filelist.SetStringSelection(fname)
if self.check_ownership(fname):
mnew = self.roimap_panel.mapproc_nrows.GetStringSelection()
try:
mnew = int(mnew)
except:
mnew = None
self.process_file(fname, max_new_rows=mnew)
self.ShowFile(filename=fname)
if parent is not None and len(parent) > 0:
try:
os.chdir(nativepath(parent))
save_workdir(nativepath(parent))
except:
pass
def openPONI(self, evt=None):
"""
Read specified poni file.
mkak 2016.07.21
"""
if len(self.filemap) > 0:
myDlg = OpenPoniFile()
read = False
if myDlg.ShowModal() == wx.ID_OK:
read = True
path = myDlg.XRDInfo[1].GetValue()
flip = False if myDlg.XRDInfo[0].GetSelection() == 1 else True
myDlg.Destroy()
if read:
self.current_file.add_XRDfiles(xrdcalfile=path,flip=flip)
update_xrmmap = getattr(self.nb.GetCurrentPage(),
'update_xrmmap', None)
if callable(update_xrmmap):
update_xrmmap(xrmfile=self.current_file)
def defineROI(self, event=None):
if not self.h5convert_done:
print( 'cannot open file while processing a map folder')
return
if len(self.filemap) > 0:
myDlg = ROIPopUp(self)
path, read = None, False
if myDlg.ShowModal() == wx.ID_OK:
read = True
myDlg.Destroy()
if read:
update_xrmmap = getattr(self.nb.GetCurrentPage(),
'update_xrmmap', None)
if callable(update_xrmmap):
update_xrmmap(xrmfile=self.current_file)
def add1DXRDFile(self, event=None):
if len(self.filemap) > 0:
read = False
wildcards = '1D-XRD ROI file (*.dat)|*.dat|All files (*.*)|*.*'
dlg = wx.FileDialog(self, message='Select 1D-XRD ROI file',
defaultDir=os.getcwd(),
wildcard=wildcards,
style=wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
read = True
path = dlg.GetPath().replace('\\', '/')
dlg.Destroy()
if read and os.path.exists(path):
time.sleep(1) ## will hopefully allow time for dialog window to close
self.current_file.read_xrd1D_ROIFile(path)
def add1DXRD(self, event=None):
if len(self.filemap) > 0:
xrd1Dgrp = ensure_subgroup('xrd1d',self.current_file.xrmmap)
poni_path = bytes2str(xrd1Dgrp.attrs.get('calfile',''))
if not os.path.exists(poni_path):
self.openPONI()
poni_path = bytes2str(xrd1Dgrp.attrs.get('calfile',''))
if os.path.exists(poni_path):
self.current_file.add_xrd1d()
def onShow1DXRD(self, event=None):
self.showxrd = event.IsChecked()
if self.showxrd:
msg = 'Show 1DXRD data for area'
else:
msg = 'Not displaying 1DXRD for area'
self.message(msg)
##print(msg)
# def onCorrectDeadtime(self, event=None):
# self.dtcor = event.IsChecked()
# if self.dtcor:
# msg = 'Using deadtime corrected data...'
# else:
# msg = 'Using raw data...'
# self.message(msg)
# ##print(msg)
#
# def onHotColumns(self, event=None):
# self.hotcols = event.IsChecked()
# if self.hotcols:
# msg = 'Ignoring first/last data columns.'
# else:
# msg = 'Using all data columns'
# self.message(msg)
# ##print(msg)
def onWatchFiles(self, event=None):
self.watch_files = event.IsChecked()
if not self.watch_files:
self.file_timer.Stop()
msg = 'Watching Files/Folders for Changes: Off'
else:
self.file_timer.Start(10000)
msg = 'Watching Files/Folders for Changes: On'
self.message(msg)
def onFileWatchTimer(self, event=None):
if self.current_file is not None and len(self.files_in_progress) == 0:
if self.current_file.folder_has_newdata():
path, fname = os.path.split(self.current_file.filename)
self.process_file(fname, max_new_rows=1e6)
def process_file(self, filename, max_new_rows=None, on_complete=None):
"""Request processing of map file.
This can take awhile, so is done in a separate thread,
with updates displayed in message bar
"""
xrmfile = self.filemap[filename]
if xrmfile.status == GSEXRM_FileStatus.created:
xrmfile.initialize_xrmmap(callback=self.updateTimer)
if xrmfile.dimension is None and isGSEXRM_MapFolder(self.folder):
xrmfile.read_master()
if (xrmfile.folder_has_newdata() and self.h5convert_done
and filename not in self.files_in_progress):
self.files_in_progress.append(filename)
self.h5convert_fname = filename
self.h5convert_done = False
self.h5convert_oncomplete = on_complete
self.htimer.Start(500)
maxrow = None
if max_new_rows is not None:
maxrow = max_new_rows + xrmfile.last_row + 1
## this calls process function of xrm_mapfile class
self.h5convert_thread = Thread(target=xrmfile.process,
kwargs={'callback':self.updateTimer,
'maxrow': maxrow})
self.h5convert_thread.start()
elif callable(on_complete):
on_complete()
def updateTimer(self, row=None, maxrow=None, filename=None, status=None):
if row is not None: self.h5convert_irow = row
if maxrow is not None: self.h5convert_nrow = maxrow
if filename is not None: self.h5convert_fname = filename
self.h5convert_done = True if status == 'complete' else False
msg = 'processing %s: row %i of %i' % (self.h5convert_fname,
self.h5convert_irow,
self.h5convert_nrow)
wx.CallAfter(self.message, msg)
def onTimer(self, event=None):
fname, irow, nrow = self.h5convert_fname, self.h5convert_irow, self.h5convert_nrow
# self.message('processing %s: row %i of %i' % (fname, irow, nrow))
# print("process timer ", self.h5convert_done, irow)
if self.h5convert_done:
self.htimer.Stop()
self.h5convert_thread.join()
self.files_in_progress = []
self.message('MapViewer processing %s: complete!' % fname)
_path, _fname = os.path.split(fname)
if _fname in self.filemap:
cfile = self.current_file = self.filemap[_fname]
ny, nx = cfile.get_shape()
self.title.SetLabel('%s: (%i x %i)' % (_fname, nx, ny))
update_xrmmap = getattr(self.nb.GetCurrentPage(),
'update_xrmmap', None)
if callable(update_xrmmap) and _fname in self.filemap:
update_xrmmap(xrmfile=cfile)
if self.h5convert_oncomplete is not None:
self.h5convert_oncomplete()
def message(self, msg, win=0):
self.statusbar.SetStatusText(msg, win)
def check_ownership(self, fname):
"""
check whether we're currently owner of the file.
this is important!! HDF5 files can be corrupted.
"""
if not self.filemap[fname].check_hostid():
if (wx.ID_YES == Popup(self, NOT_OWNER_MSG % fname,
'Not Owner of HDF5 File',
style=wx.YES_NO)):
self.filemap[fname].take_ownership()
return self.filemap[fname].check_hostid()
class OpenPoniFile(wx.Dialog):
""""""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
dialog = wx.Dialog.__init__(self, None, title='XRD Calibration File', size=(350, 280))
panel = wx.Panel(self)
################################################################################
cal_chc = ['Dioptas calibration file:','pyFAI calibration file:']
cal_spn = wx.SP_VERTICAL|wx.SP_ARROW_KEYS|wx.SP_WRAP
self.PoniInfo = [ Choice(panel, choices=cal_chc ),
wx.TextCtrl(panel, size=(320, 25)),
Button(panel, label='Browse...')]
self.PoniInfo[2].Bind(wx.EVT_BUTTON, self.onBROWSEponi)
ponisizer = wx.BoxSizer(wx.VERTICAL)
ponisizer.Add(self.PoniInfo[0], flag=wx.TOP, border=15)
ponisizer.Add(self.PoniInfo[1], flag=wx.TOP, border=5)
ponisizer.Add(self.PoniInfo[2], flag=wx.TOP|wx.BOTTOM, border=5)
################################################################################
hlpBtn = wx.Button(panel, wx.ID_HELP )
okBtn = wx.Button(panel, wx.ID_OK )
canBtn = wx.Button(panel, wx.ID_CANCEL )
minisizer = wx.BoxSizer(wx.HORIZONTAL)
minisizer.Add(hlpBtn, flag=wx.RIGHT, border=5)
minisizer.Add(canBtn, flag=wx.RIGHT, border=5)
minisizer.Add(okBtn, flag=wx.RIGHT, border=5)
################################################################################
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add((-1, 10))
sizer.Add(ponisizer, flag=wx.TOP|wx.LEFT, border=5)
sizer.Add((-1, 15))
sizer.Add(minisizer, flag=wx.ALIGN_RIGHT, border=5)
panel.SetSizer(sizer)
################################################################################
## Set defaults
self.PoniInfo[0].SetSelection(0)
self.FindWindowById(wx.ID_OK).Disable()
def checkOK(self,event=None):
if os.path.exists(self.PoniInfo[1].GetValue()):
self.FindWindowById(wx.ID_OK).Enable()
else:
self.FindWindowById(wx.ID_OK).Disable()
def onBROWSEponi(self,event=None):
wildcards = 'XRD calibration file (*.poni)|*.poni|All files (*.*)|*.*'
if os.path.exists(self.PoniInfo[1].GetValue()):
dfltDIR = self.PoniInfo[1].GetValue()
else:
dfltDIR = os.getcwd()
dlg = wx.FileDialog(self, message='Select XRD calibration file',
defaultDir=dfltDIR,
wildcard=wildcards, style=wx.FD_OPEN)
path, read = None, False
if dlg.ShowModal() == wx.ID_OK:
read = True
path = dlg.GetPath().replace('\\', '/')
dlg.Destroy()
if read:
self.PoniInfo[1].Clear()
self.PoniInfo[1].SetValue(str(path))
self.checkOK()
##################
class ROIPopUp(wx.Dialog):
""""""
#----------------------------------------------------------------------
def __init__(self, owner, **kws):
"""Constructor"""
dialog = wx.Dialog.__init__(self, None, title='ROI Tools', size=(450, 500))
panel = wx.Panel(self)
################################################################################
self.owner = owner
self.cfile = self.owner.current_file
self.xrmmap = self.cfile.xrmmap
self.gp = GridPanel(panel, nrows=8, ncols=4,
itemstyle=LEFT, gap=3, **kws)
self.roi_name = wx.TextCtrl(self, -1, 'ROI_001', size=(120, -1))
self.roi_chc = [Choice(self, size=(150, -1)),
Choice(self, size=(150, -1))]
fopts = dict(minval=-1, precision=3, size=(100, -1))
self.roi_lims = [FloatCtrl(self, value=0, **fopts),
FloatCtrl(self, value=-1, **fopts),
FloatCtrl(self, value=0, **fopts),
FloatCtrl(self, value=-1, **fopts)]
self.gp.Add(SimpleText(self, ' Add new ROI: '), dcol=2, style=LEFT, newrow=True)
self.gp.Add(SimpleText(self, ' Name:'), newrow=True)
self.gp.Add(self.roi_name, dcol=2)
self.gp.Add(SimpleText(self, ' Type:'), newrow=True)
self.gp.Add(self.roi_chc[0], dcol=2)
self.gp.Add(SimpleText(self, ' Limits:'), newrow=True)
self.gp.AddMany((self.roi_lims[0],self.roi_lims[1],self.roi_chc[1]),
dcol=1, style=LEFT)
self.gp.AddMany((SimpleText(self, ' '),self.roi_lims[2],self.roi_lims[3]),
dcol=1, style=LEFT, newrow=True)
self.gp.AddMany((SimpleText(self, ' '),
Button(self, 'Add ROI', size=(100, -1), action=self.onCreateROI)),
dcol=1, style=LEFT, newrow=True)
###############################################################################
self.rm_roi_ch = [Choice(self, size=(120, -1)),
Choice(self, size=(120, -1))]
fopts = dict(minval=-1, precision=3, size=(100, -1))
self.rm_roi_lims = SimpleText(self, '')
self.gp.Add(SimpleText(self, 'Delete ROI: '), dcol=2, newrow=True)
self.gp.AddMany((SimpleText(self, 'Detector:'),self.rm_roi_ch[0]), newrow=True)
self.gp.AddMany((SimpleText(self, 'ROI:'),self.rm_roi_ch[1]), newrow=True)
self.gp.Add(SimpleText(self, 'Limits:'), newrow=True)
self.gp.Add(self.rm_roi_lims, dcol=3)
self.gp.AddMany((SimpleText(self, ''),Button(self, 'Remove ROI', size=(100, -1), action=self.onRemoveROI)), newrow=True)
self.gp.Add(SimpleText(self, ''),newrow=True)
self.gp.AddMany((SimpleText(self, ''),SimpleText(self, ''),
wx.Button(self, wx.ID_OK, label='Done')), newrow=True)
self.roi_chc[0].Bind(wx.EVT_CHOICE, self.roiUNITS)
self.roi_lims[2].Disable()
self.roi_lims[3].Disable()
self.rm_roi_ch[1].Bind(wx.EVT_CHOICE, self.roiSELECT)
self.gp.pack()
self.cfile.reset_flags()
self.roiTYPE()
def roiTYPE(self,event=None):
roitype = []
delroi = []
if self.cfile.has_xrf:
roitype += ['XRF']
if self.cfile.has_xrd1d:
roitype += ['1DXRD']
delroi = ['xrd1d']
if self.cfile.has_xrd2d:
roitype += ['2DXRD']
if len(roitype) < 1:
roitype = ['']
self.roi_chc[0].SetChoices(roitype)
self.roiUNITS()
if len(delroi) > 0:
self.rm_roi_ch[0].SetChoices(delroi)
self.setROI()
def onRemoveROI(self,event=None):
detname = self.rm_roi_ch[0].GetStringSelection()
roiname = self.rm_roi_ch[1].GetStringSelection()
if detname == 'xrd1d':
self.cfile.del_xrd1droi(roiname)
self.setROI()
def setROI(self):
detname = self.rm_roi_ch[0].GetStringSelection()
try:
detgrp = self.cfile.xrmmap['roimap'][detname]
except:
return
limits = []
names = detgrp.keys()
for name in names:
limits += [list(detgrp[name]['limits'][:])]
if len(limits) > 0:
self.rm_roi_ch[1].SetChoices([x for (y,x) in sorted(zip(limits,names))])
self.roiSELECT()
def roiSELECT(self, event=None):
detname = self.rm_roi_ch[0].GetStringSelection()
roiname = self.rm_roi_ch[1].GetStringSelection()
roimap = self.cfile.xrmmap['roimap']
roi = None
if detname in roimap:
detroi = roimap[detname]
if roiname in detroi:
roi = detroi[roiname]
if roi is None:
return
limits = roi['limits'][:]
units = bytes2str(roi['limits'].attrs.get('units',''))
if units == '1/A':
roistr = '[%0.2f to %0.2f %s]' % (limits[0],limits[1],units)
else:
roistr = '[%0.1f to %0.1f %s]' % (limits[0],limits[1],units)
self.rm_roi_lims.SetLabel(roistr)
def roiUNITS(self,event=None):
choice = self.roi_chc[0].GetStringSelection()
roiunit = ['']
if choice == 'XRF':
roiunit = ['eV','keV','channels']
self.roi_lims[2].Disable()
self.roi_lims[3].Disable()
elif choice == '1DXRD':
roiunit = [u'\u212B\u207B\u00B9 (q)',u'\u00B0 (2\u03B8)',u'\u212B (d)']
self.roi_lims[2].Disable()
self.roi_lims[3].Disable()
elif choice == '2DXRD':
roiunit = ['pixels']
self.roi_lims[2].Enable()
self.roi_lims[3].Enable()
self.roi_chc[1].SetChoices(roiunit)
def onCreateROI(self,event=None):
xtyp = self.roi_chc[0].GetStringSelection()
xunt = self.roi_chc[1].GetStringSelection()
xname = self.roi_name.GetValue()
xrange = [float(lims.GetValue()) for lims in self.roi_lims]
# print("Create ROI ", xtyp, xunt, xname, xrange)
if xtyp != '2DXRD': xrange = xrange[:2]
self.owner.message('Building ROI data for: %s' % xname)
if xtyp == 'XRF':
self.cfile.add_xrfroi(xrange,xname,unit=xunt)
elif xtyp == '1DXRD':
xrd = ['q','2th','d']
unt = xrd[self.roi_chc[1].GetSelection()]
self.cfile.add_xrd1droi(xrange, xname, unit=unt)
elif xtyp == '2DXRD':
self.cfile.add_xrd2droi(xrange,xname,unit=xunt)
self.owner.message('Added ROI: %s' % xname)
##################
class OpenMapFolder(wx.Dialog):
""""""
#----------------------------------------------------------------------
def __init__(self, folder):
"""Constructor"""
self.folder = folder
pref, f = os.path.split(folder)
title = "Read XRM Map Folder: %s" % f
dialog = wx.Dialog.__init__(self, None,
title=title, size=(475, 750))
panel = wx.Panel(self)
ChkTtl = SimpleText(panel, label='Build map including data:' )
self.ChkBx = [ Check(panel, label='XRF' ),
Check(panel, label='2DXRD' ),
Check(panel, label='1DXRD (requires calibration file)' )]
for chkbx in self.ChkBx:
chkbx.Bind(wx.EVT_CHECKBOX, self.checkOK)
cbsizer = wx.BoxSizer(wx.HORIZONTAL)
cbsizer.Add(self.ChkBx[0])
cbsizer.Add(self.ChkBx[1])
cbsizer.Add(self.ChkBx[2])
ckbxsizer = wx.BoxSizer(wx.VERTICAL)
ckbxsizer.Add(ChkTtl, flag=wx.BOTTOM|wx.LEFT)
ckbxsizer.Add(cbsizer)
################################################################################
infoTtl = [ SimpleText(panel, label='Facility'),
SimpleText(panel, label='Beamline'),
SimpleText(panel, label='Run cycle'),
SimpleText(panel, label='Proposal'),
SimpleText(panel, label='User group')]
self.info = [ wx.TextCtrl(panel, size=(100, 25) ),
wx.TextCtrl(panel, size=(100, 25) ),
wx.TextCtrl(panel, size=(100, 25) ),
wx.TextCtrl(panel, size=(100, 25) ),
wx.TextCtrl(panel, size=(320, 25) )]
infosizer0 = wx.BoxSizer(wx.HORIZONTAL)
infosizer0.Add(infoTtl[0], flag=wx.RIGHT, border=5)
infosizer0.Add(self.info[0], flag=wx.RIGHT, border=15)
infosizer0.Add(infoTtl[1], flag=wx.RIGHT, border=5)
infosizer0.Add(self.info[1], flag=wx.RIGHT, border=15)
infosizer1 = wx.BoxSizer(wx.HORIZONTAL)
infosizer1.Add(infoTtl[2], flag=wx.RIGHT, border=5)
infosizer1.Add(self.info[2], flag=wx.RIGHT, border=15)
infosizer1.Add(infoTtl[3], flag=wx.RIGHT, border=5)
infosizer1.Add(self.info[3], flag=wx.RIGHT, border=15)
infosizer2 = wx.BoxSizer(wx.HORIZONTAL)
infosizer2.Add(infoTtl[4], flag=wx.RIGHT, border=5)
infosizer2.Add(self.info[4], flag=wx.RIGHT, border=15)
infosizer = wx.BoxSizer(wx.VERTICAL)
infosizer.Add(infosizer0, flag=wx.TOP, border=5)
infosizer.Add(infosizer1, flag=wx.TOP|wx.BOTTOM, border=5)
infosizer.Add(infosizer2, flag=wx.BOTTOM, border=15)
################################################################################
cal_chc = ['Dioptas calibration file:','pyFAI calibration file:']
bkgd_chc = ['2DXRD background (optional):','1DXRD background (optional):']
cal_spn = wx.SP_VERTICAL|wx.SP_ARROW_KEYS|wx.SP_WRAP
self.XRDInfo = [ Choice(panel, choices=cal_chc ),
wx.TextCtrl(panel, size=(320, 25)),
Button(panel, label='Browse...'),
SimpleText(panel, label='Steps:'),
wx.TextCtrl(panel, size=(80, 25)),
SimpleText(panel, label='Wedges:'),
wx.SpinCtrl(panel, style=cal_spn, size=(100, -1)),
Choice(panel, choices=bkgd_chc ),
wx.TextCtrl(panel, size=(320, 25)),
Button(panel, label='Browse...'),
SimpleText(panel, label='Background scale:'),
wx.TextCtrl(panel, size=(80, 25)),
SimpleText(panel, label='2DXRD mask file (optional):'),
wx.TextCtrl(panel, size=(320, 25)),
Button(panel, label='Browse...'),]
for i in [1,8,13]:
self.XRDInfo[i+1].Bind(wx.EVT_BUTTON, partial(self.onBROWSEfile,i=i))
xrdsizer1 = wx.BoxSizer(wx.HORIZONTAL)
xrdsizer1.Add(self.XRDInfo[3], flag=wx.RIGHT, border=5)
xrdsizer1.Add(self.XRDInfo[4], flag=wx.RIGHT, border=5)
xrdsizer1.Add(self.XRDInfo[5], flag=wx.RIGHT, border=5)
xrdsizer1.Add(self.XRDInfo[6], flag=wx.RIGHT, border=5)
xrdsizer2 = wx.BoxSizer(wx.HORIZONTAL)
xrdsizer2.Add(self.XRDInfo[9], flag=wx.RIGHT, border=30)
xrdsizer2.Add(self.XRDInfo[10], flag=wx.RIGHT, border=5)
xrdsizer2.Add(self.XRDInfo[11], flag=wx.RIGHT, border=5)
xrdsizer = wx.BoxSizer(wx.VERTICAL)
xrdsizer.Add(self.XRDInfo[0], flag=wx.TOP, border=5)
xrdsizer.Add(self.XRDInfo[1], flag=wx.TOP, border=5)
xrdsizer.Add(self.XRDInfo[2], flag=wx.TOP|wx.BOTTOM, border=5)
xrdsizer.Add(xrdsizer1, flag=wx.BOTTOM, border=5)
xrdsizer.Add(self.XRDInfo[7], flag=wx.TOP, border=8)
xrdsizer.Add(self.XRDInfo[8], flag=wx.TOP, border=5)
# xrdsizer.Add(self.XRDInfo[9], flag=wx.TOP|wx.BOTTOM, border=5)
xrdsizer.Add(xrdsizer2, flag=wx.TOP|wx.BOTTOM, border=5)
xrdsizer.Add(self.XRDInfo[12], flag=wx.TOP, border=8)
xrdsizer.Add(self.XRDInfo[13], flag=wx.TOP, border=5)
xrdsizer.Add(self.XRDInfo[14], flag=wx.TOP|wx.BOTTOM, border=5)
################################################################################
h5cmpr_chc = ['gzip','lzf']
h5cmpr_opt = ['%i' % i for i in np.arange(10)]
self.H5cmprInfo = [Choice(panel, choices=h5cmpr_chc),
Choice(panel, choices=h5cmpr_opt)]
h5txt = SimpleText(panel, label='H5 File Comppression:')
self.H5cmprInfo[0].SetSelection(0)
self.H5cmprInfo[1].SetSelection(2)
self.H5cmprInfo[0].Bind(wx.EVT_CHOICE, self.onH5cmpr)
h5cmprsizer = wx.BoxSizer(wx.HORIZONTAL)
h5cmprsizer.Add(h5txt, flag=wx.RIGHT, border=5)
h5cmprsizer.Add(self.H5cmprInfo[0], flag=wx.RIGHT, border=5)
h5cmprsizer.Add(self.H5cmprInfo[1], flag=wx.RIGHT, border=5)
################################################################################
self.ok_btn = wx.Button(panel, wx.ID_OK)
self.cancel_btn = wx.Button(panel, wx.ID_CANCEL)
minisizer = wx.BoxSizer(wx.HORIZONTAL)
minisizer.Add(self.cancel_btn, flag=wx.RIGHT, border=5)
minisizer.Add(self.ok_btn, flag=wx.RIGHT, border=5)
################################################################################
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(ckbxsizer, flag=wx.TOP|wx.LEFT, border=5)
sizer.Add(HLine(panel, size=(320, 2)),flag=wx.TOP|wx.LEFT, border=5)
sizer.Add(infosizer, flag=wx.TOP|wx.LEFT, border=5)
sizer.Add(HLine(panel, size=(320, 2)),flag=wx.TOP|wx.LEFT, border=5)
sizer.Add(xrdsizer, flag=wx.TOP|wx.LEFT, border=5)
sizer.Add(HLine(panel, size=(320, 2)),flag=wx.TOP|wx.LEFT, border=5)
sizer.Add(h5cmprsizer, flag=wx.TOP|wx.LEFT, border=5)
sizer.Add(minisizer, flag=wx.ALIGN_RIGHT, border=5)
pack(panel, sizer)
w, h = panel.GetBestSize()
w = 25*(2 + int(w*0.04))
h = 25*(2 + int(h*0.04))
panel.SetSize((w, h))
# HX
################################################################################
## Set defaults
self.ChkBx[0].SetValue(True)
self.ChkBx[1].SetValue(False)
self.ChkBx[2].SetValue(False)
self.XRDInfo[0].SetSelection(0)
self.XRDInfo[7].SetSelection(0)
self.XRDInfo[4].SetValue('5001')
self.XRDInfo[6].SetValue(1)
self.XRDInfo[6].SetRange(0,36)
self.XRDInfo[11].SetValue('1.0')
for poniinfo in self.XRDInfo:
poniinfo.Disable()
self.info[0].SetValue(FACILITY)
self.info[1].SetValue(BEAMLINE)
for line in open(os.path.join(self.folder, 'Scan.ini'), 'r'):
if line.split()[0] == 'basedir':
npath = line.split()[-1].replace('\\', '/').split('/')
cycle, usr = npath[-2], npath[-1]
self.info[2].SetValue(cycle)
self.info[4].SetValue(usr)
self.checkOK()
def checkOK(self, evt=None):
if self.ChkBx[2].GetValue():
for poniinfo in self.XRDInfo:
poniinfo.Enable()
elif self.ChkBx[1].GetValue():
for poniinfo in self.XRDInfo[8:]:
poniinfo.Enable()
for poniinfo in self.XRDInfo[:8]:
poniinfo.Disable()
self.XRDInfo[7].SetSelection(0)
else:
for poniinfo in self.XRDInfo:
poniinfo.Disable()
def onH5cmpr(self,event=None):
if self.H5cmprInfo[0].GetSelection() == 0:
self.H5cmprInfo[1].Enable()
self.H5cmprInfo[1].SetChoices(['%i' % i for i in np.arange(10)])
self.H5cmprInfo[1].SetSelection(2)
else:
self.H5cmprInfo[1].Disable()
self.H5cmprInfo[1].SetChoices([''])
def onBROWSEfile(self,event=None,i=1):
if i == 8:
wldcd = '2D XRD background file (*.tiff)|*.tif;*.tiff;*.edf|All files (*.*)|*.*'
if i == 13:
wldcd = '1D XRD background file (*.xy)|*.xy|All files (*.*)|*.*'
else: ## elif i == 1:
wldcd = 'XRD calibration file (*.poni)|*.poni|All files (*.*)|*.*'
if os.path.exists(self.XRDInfo[i].GetValue()):
dfltDIR = self.XRDInfo[i].GetValue()
else:
dfltDIR = os.getcwd()
dlg = wx.FileDialog(self, message='Select %s' % wldcd.split(' (')[0],
defaultDir=dfltDIR,
wildcard=wldcd, style=wx.FD_OPEN)
path, read = None, False
if dlg.ShowModal() == wx.ID_OK:
read = True
path = dlg.GetPath().replace('\\', '/')
dlg.Destroy()
if read:
self.XRDInfo[i].Clear()
self.XRDInfo[i].SetValue(str(path))
class MapViewer(LarchWxApp):
def __init__(self, use_scandb=False, _larch=None, filename=None,
version_info=None, with_inspect=False, **kws):
self.filename = filename
self.use_scandb = use_scandb
LarchWxApp.__init__(self, _larch=_larch,
version_info=version_info,
with_inspect=with_inspect, **kws)
def createApp(self):
frame = MapViewerFrame(use_scandb=self.use_scandb,
filename=self.filename,
version_info=self.version_info,
_larch=self._larch)
self.SetTopWindow(frame)
return True
def mapviewer(use_scandb=False, filename=None, _larch=None,
with_inspect=False, **kws):
MapViewer(use_scandb=use_scandb, filename=filename, _larch=_larch,
with_inspect=with_inspect, **kws)
|
[] |
[] |
[
"ESCAN_CREDENTIALS"
] |
[]
|
["ESCAN_CREDENTIALS"]
|
python
| 1 | 0 | |
tests/terminal_test.py
|
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unittest for terminal module."""
import sys
import unittest
# pylint: disable=redefined-builtin
from six.moves import range
from textfsm import terminal
class TerminalTest(unittest.TestCase):
def setUp(self):
self.environ_orig = terminal.os.environ
self.open_orig = terminal.os.open
self.terminal_orig = terminal.TerminalSize
def tearDown(self):
terminal.os.environ = self.environ_orig
terminal.os.open = self.open_orig
terminal.TerminalSize = self.terminal_orig
def testAnsiCmd(self):
self.assertEqual('\033[0m', terminal._AnsiCmd(['reset']))
self.assertEqual('\033[0m', terminal._AnsiCmd(['RESET']))
self.assertEqual('\033[0;32m', terminal._AnsiCmd(['reset', 'Green']))
self.assertRaises(ValueError, terminal._AnsiCmd, ['bogus'])
self.assertRaises(ValueError, terminal._AnsiCmd, ['reset', 'bogus'])
def testAnsiText(self):
self.assertEqual('\033[0mhello world\033[0m',
terminal.AnsiText('hello world'))
self.assertEqual('\033[31mhello world\033[0m',
terminal.AnsiText('hello world', ['red']))
self.assertEqual('\033[31;46mhello world',
terminal.AnsiText(
'hello world', ['red', 'bg_cyan'], False))
def testStripAnsi(self):
text = 'ansi length'
self.assertEqual(text, terminal.StripAnsiText(text))
ansi_text = '\033[5;32;44mansi\033[0m length'
self.assertEqual(text, terminal.StripAnsiText(ansi_text))
def testEncloseAnsi(self):
text = 'ansi length'
self.assertEqual(text, terminal.EncloseAnsiText(text))
ansi_text = '\033[5;32;44mansi\033[0m length'
ansi_enclosed = '\001\033[5;32;44m\002ansi\001\033[0m\002 length'
self.assertEqual(ansi_enclosed, terminal.EncloseAnsiText(ansi_text))
def testTerminalSize(self):
# pylint: disable=unused-argument
def StubOpen(args, *kwargs):
raise IOError
terminal.open = StubOpen
terminal.os.environ = {}
# Raise exceptions on ioctl and environ and assign a default.
self.assertEqual((24, 80), terminal.TerminalSize())
terminal.os.environ = {'LINES': 'bogus', 'COLUMNS': 'bogus'}
self.assertEqual((24, 80), terminal.TerminalSize())
# Still raise exception on ioctl and use environ.
terminal.os.environ = {'LINES': '10', 'COLUMNS': '20'}
self.assertEqual((10, 20), terminal.TerminalSize())
def testLineWrap(self):
terminal.TerminalSize = lambda: (5, 11)
text = ''
self.assertEqual(text, terminal.LineWrap(text))
text = 'one line'
self.assertEqual(text, terminal.LineWrap(text))
text = 'two\nlines'
self.assertEqual(text, terminal.LineWrap(text))
text = 'one line that is too long'
text2 = 'one line th\nat is too l\nong'
self.assertEqual(text2, terminal.LineWrap(text))
# Counting ansi characters won't matter if there are none.
self.assertEqual(text2, terminal.LineWrap(text, False))
text = 'one line \033[5;32;44mthat\033[0m is too long with ansi'
text2 = 'one line \033[5;32;44mth\nat\033[0m is too l\nong with an\nsi'
text3 = 'one line \033[\n5;32;44mtha\nt\033[0m is to\no long with\n ansi'
# Ansi does not factor and the line breaks stay the same.
self.assertEqual(text2, terminal.LineWrap(text, True))
# If we count the ansi escape as characters then the line breaks change.
self.assertEqual(text3, terminal.LineWrap(text, False))
# False is implicit default.
self.assertEqual(text3, terminal.LineWrap(text))
# Couple of edge cases where we split on token boundary.
text4 = 'ooone line \033[5;32;44mthat\033[0m is too long with ansi'
text5 = 'ooone line \033[5;32;44m\nthat\033[0m is too\n long with \nansi'
self.assertEqual(text5, terminal.LineWrap(text4, True))
text6 = 'e line \033[5;32;44mthat\033[0m is too long with ansi'
text7 = 'e line \033[5;32;44mthat\033[0m\n is too lon\ng with ansi'
self.assertEqual(text7, terminal.LineWrap(text6, True))
def testIssue1(self):
self.assertEqual(10, len(terminal.StripAnsiText('boembabies' '\033[0m')))
terminal.TerminalSize = lambda: (10, 10)
text1 = terminal.LineWrap('\033[32m' + 'boembabies, ' * 10 + 'boembabies' +
'\033[0m', omit_sgr=True)
text2 = ('\033[32m' +
terminal.LineWrap('boembabies, ' * 10 + 'boembabies') +
'\033[0m')
self.assertEqual(text1, text2)
class FakeTerminal(object):
def __init__(self):
self.output = ''
# pylint: disable=C6409
def write(self, text):
self.output += text
# pylint: disable=C6409
def CountLines(self):
return len(self.output.splitlines())
def flush(self):
pass
class PagerTest(unittest.TestCase):
def setUp(self):
sys.stdout = FakeTerminal()
self.get_ch_orig = terminal.Pager._GetCh
terminal.Pager._GetCh = lambda self: 'q'
self.ts_orig = terminal.TerminalSize
terminal.TerminalSize = lambda: (24, 80)
self.p = terminal.Pager()
def tearDown(self):
terminal.Pager._GetCh = self.get_ch_orig
terminal.TerminalSize = self.ts_orig
sys.stdout = sys.__stdout__
def testPager(self):
self.assertEqual(terminal.TerminalSize()[0], self.p._cli_lines)
self.p.Clear()
self.assertEqual('', self.p._text)
self.assertEqual(0, self.p._displayed)
self.assertEqual(1, self.p._lastscroll)
def testPage(self):
txt = ''
for i in range(100):
txt += '%d a random line of text here\n' % i
self.p._text = txt
self.p.Page()
self.assertEqual(terminal.TerminalSize()[0]+2, sys.stdout.CountLines())
sys.stdout.output = ''
self.p = terminal.Pager()
self.p._text = ''
for i in range(10):
self.p._text += 'a' * 100 + '\n'
self.p.Page()
self.assertEqual(20, sys.stdout.CountLines())
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
pydfs_lineup_optimizer/__init__.py
|
from pydfs_lineup_optimizer.version import __version__
from pydfs_lineup_optimizer.constants import Site, Sport
from pydfs_lineup_optimizer.player import Player, LineupPlayer
from pydfs_lineup_optimizer.exceptions import LineupOptimizerException, LineupOptimizerIncorrectTeamName, \
LineupOptimizerIncorrectPositionName, LineupOptimizerIncorrectCSV
from pydfs_lineup_optimizer.lineup_optimizer import LineupOptimizer
from pydfs_lineup_optimizer.lineup import Lineup
from pydfs_lineup_optimizer.sites import SitesRegistry
from pydfs_lineup_optimizer.lineup_exporter import CSVLineupExporter, FantasyDraftCSVLineupExporter
from pydfs_lineup_optimizer.tz import set_timezone
from pydfs_lineup_optimizer.stacks import PlayersGroup, TeamStack, PositionsStack, Stack
from pydfs_lineup_optimizer.exposure_strategy import TotalExposureStrategy, AfterEachExposureStrategy
from pydfs_lineup_optimizer import db_writer
from pydfs_lineup_optimizer.fantasy_points_strategy import StandardFantasyPointsStrategy, RandomFantasyPointsStrategy, \
ProgressiveFantasyPointsStrategy
from pydfs_lineup_optimizer.player_pool import PlayerFilter
__all__ = [
'get_optimizer', 'Site', 'Sport', 'Player', 'LineupOptimizerException', 'LineupOptimizerIncorrectTeamName',
'LineupOptimizerIncorrectPositionName', 'LineupOptimizerIncorrectCSV', 'LineupOptimizer', 'Lineup',
'CSVLineupExporter', 'set_timezone', 'FantasyDraftCSVLineupExporter', 'PlayersGroup', 'TeamStack', 'PositionsStack',
'Stack', 'TotalExposureStrategy', 'AfterEachExposureStrategy', 'StandardFantasyPointsStrategy',
'RandomFantasyPointsStrategy', 'ProgressiveFantasyPointsStrategy', 'LineupPlayer', 'PlayerFilter',
]
def get_optimizer(site: str, sport: str, **kwargs) -> LineupOptimizer:
return LineupOptimizer(SitesRegistry.get_settings(site, sport), **kwargs)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
tests/flytekit/unit/common_tests/types/impl/test_schema.py
|
from __future__ import absolute_import
import collections as _collections
import os as _os
import pytest as _pytest
import pandas as _pd
import uuid as _uuid
import datetime as _datetime
from flytekit.common.types.impl import schema as _schema_impl
from flytekit.common.types import primitives as _primitives, blobs as _blobs
from flytekit.common import utils as _utils
from flytekit.models import types as _type_models, literals as _literal_models
from flytekit.sdk import test_utils as _test_utils
import six.moves as _six_moves
def test_schema_type():
_schema_impl.SchemaType()
_schema_impl.SchemaType([])
_schema_impl.SchemaType([
('a', _primitives.Integer),
('b', _primitives.String),
('c', _primitives.Float),
('d', _primitives.Boolean),
('e', _primitives.Datetime)
])
with _pytest.raises(ValueError):
_schema_impl.SchemaType({'a': _primitives.Integer})
with _pytest.raises(TypeError):
_schema_impl.SchemaType([('a', _blobs.Blob)])
with _pytest.raises(ValueError):
_schema_impl.SchemaType([('a', _primitives.Integer, 1)])
_schema_impl.SchemaType([('1', _primitives.Integer)])
with _pytest.raises(TypeError):
_schema_impl.SchemaType([(1, _primitives.Integer)])
with _pytest.raises(TypeError):
_schema_impl.SchemaType([('1', [_primitives.Integer])])
value_type_tuples = [
('abra', _primitives.Integer, [1, 2, 3, 4, 5]),
('CADABRA', _primitives.Float, [1.0, 2.0, 3.0, 4.0, 5.0]),
('HoCuS', _primitives.String, ["A", "B", "C", "D", "E"]),
('Pocus', _primitives.Boolean, [True, False, True, False]),
(
'locusts',
_primitives.Datetime,
[
_datetime.datetime(day=1, month=1, year=2017, hour=1, minute=1, second=1, microsecond=1) -
_datetime.timedelta(days=i)
for i in _six_moves.range(5)
]
)
]
@_pytest.mark.parametrize("value_type_pair", value_type_tuples)
def test_simple_read_and_write_with_different_types(value_type_pair):
column_name, flyte_type, values = value_type_pair
values = [tuple([value]) for value in values]
schema_type = _schema_impl.SchemaType(columns=[(column_name, flyte_type)])
with _test_utils.LocalTestFileSystem() as sandbox:
with _utils.AutoDeletingTempDir("test") as t:
a = _schema_impl.Schema.create_at_known_location(t.name, mode='wb', schema_type=schema_type)
assert a.local_path is None
with a as writer:
for _ in _six_moves.range(5):
writer.write(_pd.DataFrame.from_records(values, columns=[column_name]))
assert a.local_path.startswith(sandbox.name)
assert a.local_path is None
b = _schema_impl.Schema.create_at_known_location(t.name, mode='rb', schema_type=schema_type)
assert b.local_path is None
with b as reader:
for df in reader.iter_chunks():
for check, actual in _six_moves.zip(values, df[column_name].tolist()):
assert check[0] == actual
assert reader.read() is None
reader.seek(0)
df = reader.read(concat=True)
for iter_count, actual in enumerate(df[column_name].tolist()):
assert values[iter_count % len(values)][0] == actual
assert b.local_path.startswith(sandbox.name)
assert b.local_path is None
def test_datetime_coercion_explicitly():
"""
Sanity check that we're using a version of pyarrow that allows us to
truncate timestamps
"""
dt = _datetime.datetime(day=1, month=1, year=2017, hour=1, minute=1, second=1, microsecond=1)
values = [(dt,)]
df = _pd.DataFrame.from_records(values, columns=['testname'])
assert df['testname'][0] == dt
with _utils.AutoDeletingTempDir('test') as tmpdir:
tmpfile = tmpdir.get_named_tempfile('repro.parquet')
df.to_parquet(tmpfile, coerce_timestamps='ms', allow_truncated_timestamps=True)
df2 = _pd.read_parquet(tmpfile)
dt2 = _datetime.datetime(day=1, month=1, year=2017, hour=1, minute=1, second=1)
assert df2['testname'][0] == dt2
def test_datetime_coercion():
values = [
tuple([_datetime.datetime(day=1, month=1, year=2017, hour=1, minute=1, second=1, microsecond=1) -
_datetime.timedelta(days=x)])
for x in _six_moves.range(5)
]
schema_type = _schema_impl.SchemaType(columns=[('testname', _primitives.Datetime)])
with _test_utils.LocalTestFileSystem():
with _utils.AutoDeletingTempDir("test") as t:
a = _schema_impl.Schema.create_at_known_location(t.name, mode='wb', schema_type=schema_type)
with a as writer:
for _ in _six_moves.range(5):
# us to ms coercion segfaults unless we explicitly allow truncation.
writer.write(
_pd.DataFrame.from_records(values, columns=['testname']),
coerce_timestamps='ms',
allow_truncated_timestamps=True)
# TODO: Uncomment when segfault bug is resolved
# with _pytest.raises(Exception):
# writer.write(
# _pd.DataFrame.from_records(values, columns=['testname']),
# coerce_timestamps='ms')
b = _schema_impl.Schema.create_at_known_location(t.name, mode='wb', schema_type=schema_type)
with b as writer:
for _ in _six_moves.range(5):
writer.write(_pd.DataFrame.from_records(values, columns=['testname']))
@_pytest.mark.parametrize("value_type_pair", value_type_tuples)
def test_fetch(value_type_pair):
column_name, flyte_type, values = value_type_pair
values = [tuple([value]) for value in values]
schema_type = _schema_impl.SchemaType(columns=[(column_name, flyte_type)])
with _utils.AutoDeletingTempDir("test") as tmpdir:
for i in _six_moves.range(3):
_pd.DataFrame.from_records(values, columns=[column_name]).to_parquet(
tmpdir.get_named_tempfile(str(i).zfill(6)), coerce_timestamps='us')
with _utils.AutoDeletingTempDir("test2") as local_dir:
schema_obj = _schema_impl.Schema.fetch(
tmpdir.name,
local_path=local_dir.get_named_tempfile('schema_test'),
schema_type=schema_type
)
with schema_obj as reader:
for df in reader.iter_chunks():
for check, actual in _six_moves.zip(values, df[column_name].tolist()):
assert check[0] == actual
assert reader.read() is None
reader.seek(0)
df = reader.read(concat=True)
for iter_count, actual in enumerate(df[column_name].tolist()):
assert values[iter_count % len(values)][0] == actual
@_pytest.mark.parametrize("value_type_pair", value_type_tuples)
def test_download(value_type_pair):
column_name, flyte_type, values = value_type_pair
values = [tuple([value]) for value in values]
schema_type = _schema_impl.SchemaType(columns=[(column_name, flyte_type)])
with _utils.AutoDeletingTempDir("test") as tmpdir:
for i in _six_moves.range(3):
_pd.DataFrame.from_records(values, columns=[column_name]).to_parquet(
tmpdir.get_named_tempfile(str(i).zfill(6)), coerce_timestamps='us')
with _utils.AutoDeletingTempDir("test2") as local_dir:
schema_obj = _schema_impl.Schema(tmpdir.name, schema_type=schema_type)
schema_obj.download(local_dir.get_named_tempfile(_uuid.uuid4().hex))
with schema_obj as reader:
for df in reader.iter_chunks():
for check, actual in _six_moves.zip(values, df[column_name].tolist()):
assert check[0] == actual
assert reader.read() is None
reader.seek(0)
df = reader.read(concat=True)
for iter_count, actual in enumerate(df[column_name].tolist()):
assert values[iter_count % len(values)][0] == actual
with _pytest.raises(Exception):
schema_obj = _schema_impl.Schema(tmpdir.name, schema_type=schema_type)
schema_obj.download()
with _test_utils.LocalTestFileSystem():
schema_obj = _schema_impl.Schema(tmpdir.name, schema_type=schema_type)
schema_obj.download()
with schema_obj as reader:
for df in reader.iter_chunks():
for check, actual in _six_moves.zip(values, df[column_name].tolist()):
assert check[0] == actual
assert reader.read() is None
reader.seek(0)
df = reader.read(concat=True)
for iter_count, actual in enumerate(df[column_name].tolist()):
assert values[iter_count % len(values)][0] == actual
def test_hive_queries(monkeypatch):
def return_deterministic_uuid():
class FakeUUID4(object):
def __init__(self):
self.hex = 'test_uuid'
class Uuid(object):
def uuid4(self):
return FakeUUID4()
return Uuid()
monkeypatch.setattr(_schema_impl, '_uuid', return_deterministic_uuid())
all_types = _schema_impl.SchemaType([
('a', _primitives.Integer),
('b', _primitives.String),
('c', _primitives.Float),
('d', _primitives.Boolean),
('e', _primitives.Datetime)
])
with _test_utils.LocalTestFileSystem():
df, query = _schema_impl.Schema.create_from_hive_query(
"SELECT a, b, c, d, e FROM some_place WHERE i = 0",
stage_query="CREATE TEMPORARY TABLE some_place AS SELECT * FROM some_place_original",
known_location="s3://my_fixed_path/",
schema_type=all_types
)
full_query = """
CREATE TEMPORARY TABLE some_place AS SELECT * FROM some_place_original;
CREATE TEMPORARY TABLE test_uuid_tmp AS SELECT a, b, c, d, e FROM some_place WHERE i = 0;
CREATE EXTERNAL TABLE test_uuid LIKE test_uuid_tmp STORED AS PARQUET;
ALTER TABLE test_uuid SET LOCATION 's3://my_fixed_path/';
INSERT OVERWRITE TABLE test_uuid
SELECT
a as a,
b as b,
CAST(c as double) c,
d as d,
e as e
FROM test_uuid_tmp;
DROP TABLE test_uuid;
"""
full_query = " ".join(full_query.split())
query = " ".join(query.split())
assert query == full_query
# Test adding partition
full_query = """
ALTER TABLE some_table ADD IF NOT EXISTS PARTITION (
region = 'SEA',
ds = '2017-01-01'
) LOCATION 's3://my_fixed_path/';
ALTER TABLE some_table PARTITION (
region = 'SEA',
ds = '2017-01-01'
) SET LOCATION 's3://my_fixed_path/';
"""
query = df.get_write_partition_to_hive_table_query(
'some_table',
partitions=_collections.OrderedDict([('region', 'SEA'), ('ds', '2017-01-01')]))
full_query = " ".join(full_query.split())
query = " ".join(query.split())
assert query == full_query
def test_partial_column_read():
with _test_utils.LocalTestFileSystem():
a = _schema_impl.Schema.create_at_any_location(
schema_type=_schema_impl.SchemaType([('a', _primitives.Integer), ('b', _primitives.Integer)])
)
with a as writer:
writer.write(_pd.DataFrame.from_dict({'a': [1, 2, 3, 4], 'b': [5, 6, 7, 8]}))
b = _schema_impl.Schema.fetch(
a.uri,
schema_type=_schema_impl.SchemaType([('a', _primitives.Integer), ('b', _primitives.Integer)])
)
with b as reader:
df = reader.read(columns=['b'])
assert df.columns.values == ['b']
assert df['b'].tolist() == [5, 6, 7, 8]
def test_casting():
pass
def test_from_python_std():
pass
def test_promote_from_model_schema_type():
m = _type_models.SchemaType(
[
_type_models.SchemaType.SchemaColumn(
"a",
_type_models.SchemaType.SchemaColumn.SchemaColumnType.BOOLEAN
),
_type_models.SchemaType.SchemaColumn(
"b",
_type_models.SchemaType.SchemaColumn.SchemaColumnType.DATETIME
),
_type_models.SchemaType.SchemaColumn(
"c",
_type_models.SchemaType.SchemaColumn.SchemaColumnType.DURATION
),
_type_models.SchemaType.SchemaColumn(
"d",
_type_models.SchemaType.SchemaColumn.SchemaColumnType.FLOAT
),
_type_models.SchemaType.SchemaColumn(
"e",
_type_models.SchemaType.SchemaColumn.SchemaColumnType.INTEGER
),
_type_models.SchemaType.SchemaColumn(
"f",
_type_models.SchemaType.SchemaColumn.SchemaColumnType.STRING
),
]
)
s = _schema_impl.SchemaType.promote_from_model(m)
assert s.columns == m.columns
assert s.sdk_columns['a'].to_flyte_literal_type() == _primitives.Boolean.to_flyte_literal_type()
assert s.sdk_columns['b'].to_flyte_literal_type() == _primitives.Datetime.to_flyte_literal_type()
assert s.sdk_columns['c'].to_flyte_literal_type() == _primitives.Timedelta.to_flyte_literal_type()
assert s.sdk_columns['d'].to_flyte_literal_type() == _primitives.Float.to_flyte_literal_type()
assert s.sdk_columns['e'].to_flyte_literal_type() == _primitives.Integer.to_flyte_literal_type()
assert s.sdk_columns['f'].to_flyte_literal_type() == _primitives.String.to_flyte_literal_type()
assert s == m
def test_promote_from_model_schema():
m = _literal_models.Schema(
"s3://some/place/",
_type_models.SchemaType(
[
_type_models.SchemaType.SchemaColumn(
"a",
_type_models.SchemaType.SchemaColumn.SchemaColumnType.BOOLEAN
),
_type_models.SchemaType.SchemaColumn(
"b",
_type_models.SchemaType.SchemaColumn.SchemaColumnType.DATETIME
),
_type_models.SchemaType.SchemaColumn(
"c",
_type_models.SchemaType.SchemaColumn.SchemaColumnType.DURATION
),
_type_models.SchemaType.SchemaColumn(
"d",
_type_models.SchemaType.SchemaColumn.SchemaColumnType.FLOAT
),
_type_models.SchemaType.SchemaColumn(
"e",
_type_models.SchemaType.SchemaColumn.SchemaColumnType.INTEGER
),
_type_models.SchemaType.SchemaColumn(
"f",
_type_models.SchemaType.SchemaColumn.SchemaColumnType.STRING
),
]
)
)
s = _schema_impl.Schema.promote_from_model(m)
assert s.uri == "s3://some/place/"
assert s.type.sdk_columns['a'].to_flyte_literal_type() == _primitives.Boolean.to_flyte_literal_type()
assert s.type.sdk_columns['b'].to_flyte_literal_type() == _primitives.Datetime.to_flyte_literal_type()
assert s.type.sdk_columns['c'].to_flyte_literal_type() == _primitives.Timedelta.to_flyte_literal_type()
assert s.type.sdk_columns['d'].to_flyte_literal_type() == _primitives.Float.to_flyte_literal_type()
assert s.type.sdk_columns['e'].to_flyte_literal_type() == _primitives.Integer.to_flyte_literal_type()
assert s.type.sdk_columns['f'].to_flyte_literal_type() == _primitives.String.to_flyte_literal_type()
assert s == m
def test_create_at_known_location():
with _test_utils.LocalTestFileSystem():
with _utils.AutoDeletingTempDir('test') as wd:
b = _schema_impl.Schema.create_at_known_location(wd.name, schema_type=_schema_impl.SchemaType())
assert b.local_path is None
assert b.remote_location == wd.name + "/"
assert b.mode == 'wb'
with b as w:
w.write(_pd.DataFrame.from_dict({'a': [1, 2, 3, 4], 'b': [5, 6, 7, 8]}))
df = _pd.read_parquet(_os.path.join(wd.name, "000000"))
assert list(df['a']) == [1, 2, 3, 4]
assert list(df['b']) == [5, 6, 7, 8]
def test_generic_schema_read():
with _test_utils.LocalTestFileSystem():
a = _schema_impl.Schema.create_at_any_location(
schema_type=_schema_impl.SchemaType([('a', _primitives.Integer), ('b', _primitives.Integer)])
)
with a as writer:
writer.write(_pd.DataFrame.from_dict({'a': [1, 2, 3, 4], 'b': [5, 6, 7, 8]}))
b = _schema_impl.Schema.fetch(
a.remote_prefix,
schema_type=_schema_impl.SchemaType([]))
with b as reader:
df = reader.read()
assert df.columns.values.tolist() == ['a', 'b']
assert df['a'].tolist() == [1, 2, 3, 4]
assert df['b'].tolist() == [5, 6, 7, 8]
def test_extra_schema_read():
with _test_utils.LocalTestFileSystem():
a = _schema_impl.Schema.create_at_any_location(
schema_type=_schema_impl.SchemaType([('a', _primitives.Integer), ('b', _primitives.Integer)])
)
with a as writer:
writer.write(_pd.DataFrame.from_dict({'a': [1, 2, 3, 4], 'b': [5, 6, 7, 8]}))
b = _schema_impl.Schema.fetch(
a.remote_prefix,
schema_type=_schema_impl.SchemaType([('a', _primitives.Integer)]))
with b as reader:
df = reader.read(concat=True, truncate_extra_columns=False)
assert df.columns.values.tolist() == ['a', 'b']
assert df['a'].tolist() == [1, 2, 3, 4]
assert df['b'].tolist() == [5, 6, 7, 8]
with b as reader:
df = reader.read(concat=True)
assert df.columns.values.tolist() == ['a']
assert df['a'].tolist() == [1, 2, 3, 4]
def test_normal_schema_read_with_fastparquet():
with _test_utils.LocalTestFileSystem():
a = _schema_impl.Schema.create_at_any_location(
schema_type=_schema_impl.SchemaType([('a', _primitives.Integer), ('b', _primitives.Boolean)])
)
with a as writer:
writer.write(_pd.DataFrame.from_dict({'a': [1, 2, 3, 4], 'b': [False, True, True, False]}))
import os as _os
original_engine = _os.getenv('PARQUET_ENGINE')
_os.environ['PARQUET_ENGINE'] = 'fastparquet'
b = _schema_impl.Schema.fetch(
a.remote_prefix,
schema_type=_schema_impl.SchemaType([]))
with b as reader:
df = reader.read()
assert df['a'].tolist() == [1, 2, 3, 4]
assert _pd.api.types.is_bool_dtype(df.dtypes['b'])
assert df['b'].tolist() == [False, True, True, False]
if original_engine is None:
del _os.environ['PARQUET_ENGINE']
else:
_os.environ['PARQUET_ENGINE'] = original_engine
def test_type_promoted_schema_read_with_fastparquet():
with _test_utils.LocalTestFileSystem():
a = _schema_impl.Schema.create_at_any_location(
schema_type=_schema_impl.SchemaType([('a', _primitives.Integer), ('b', _primitives.Boolean)])
)
with a as writer:
writer.write(_pd.DataFrame.from_dict({'a': [1, 2, 3, 4], 'b': [None, True, None, False]}))
import os as _os
original_engine = _os.getenv('PARQUET_ENGINE')
_os.environ['PARQUET_ENGINE'] = 'fastparquet'
b = _schema_impl.Schema.fetch(
a.remote_prefix,
schema_type=_schema_impl.SchemaType([]))
with b as reader:
df = reader.read()
assert df['a'].tolist() == [1, 2, 3, 4]
assert _pd.api.types.is_object_dtype(df.dtypes['b'])
assert df['b'].tolist() == [None, True, None, False]
if original_engine is None:
del _os.environ['PARQUET_ENGINE']
else:
_os.environ['PARQUET_ENGINE'] = original_engine
def test_schema_read_consistency_between_two_engines():
with _test_utils.LocalTestFileSystem():
a = _schema_impl.Schema.create_at_any_location(
schema_type=_schema_impl.SchemaType([('a', _primitives.Integer), ('b', _primitives.Boolean)])
)
with a as writer:
writer.write(_pd.DataFrame.from_dict({'a': [1, 2, 3, 4], 'b': [None, True, None, False]}))
import os as _os
original_engine = _os.getenv('PARQUET_ENGINE')
_os.environ['PARQUET_ENGINE'] = 'fastparquet'
b = _schema_impl.Schema.fetch(
a.remote_prefix,
schema_type=_schema_impl.SchemaType([]))
with b as b_reader:
b_df = b_reader.read()
_os.environ['PARQUET_ENGINE'] = 'pyarrow'
c = _schema_impl.Schema.fetch(
a.remote_prefix,
schema_type=_schema_impl.SchemaType([]))
with c as c_reader:
c_df = c_reader.read()
assert b_df.equals(c_df)
if original_engine is None:
del _os.environ['PARQUET_ENGINE']
else:
_os.environ['PARQUET_ENGINE'] = original_engine
|
[] |
[] |
[
"PARQUET_ENGINE"
] |
[]
|
["PARQUET_ENGINE"]
|
python
| 1 | 0 | |
app/app/celery.py
|
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "app.settings")
app = Celery("app")
app.config_from_object("django.conf:settings", namespace="CELERY")
app.autodiscover_tasks()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
jax/_src/numpy/lax_numpy.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pytype: skip-file
"""
Implements the NumPy API, using the primitives in :mod:`jax.lax`.
NumPy operations are implemented in Python in terms of the primitive operations
in :mod:`jax.lax`. Since NumPy operations are not primitive and instead are
implemented in terms of :mod:`jax.lax` operations, we do not need to define
transformation rules such as gradient or batching rules. Instead,
transformations for NumPy primitives can be derived from the transformation
rules for the underlying :code:`lax` primitives.
"""
import builtins
import collections
import operator
import os
import types
from typing import Sequence, FrozenSet, Tuple, Union, Iterable
from textwrap import dedent as _dedent
import warnings
import numpy as np
import opt_einsum
import jax
from jax import jit, custom_jvp
from .vectorize import vectorize
from .util import _wraps
from jax import core
from jax import dtypes
from jax.core import UnshapedArray, ShapedArray, ConcreteArray, canonicalize_shape
from jax.config import flags, config
from jax.interpreters.xla import DeviceArray, _DeviceArray, _CppDeviceArray
from jax.interpreters.masking import Poly
from jax import lax
from jax._src.lax.lax import _device_put_raw
from jax import ops
from jax.util import (partial, unzip2, prod as _prod,
subvals, safe_zip, canonicalize_axis as _canonicalize_axis)
from jax.tree_util import tree_leaves, tree_flatten
FLAGS = flags.FLAGS
flags.DEFINE_enum(
'jax_numpy_rank_promotion', os.getenv('JAX_NUMPY_RANK_PROMOTION', 'allow'),
enum_values=['allow', 'warn', 'raise'],
help=
'Control NumPy-style automatic rank promotion broadcasting '
'("allow", "warn", or "raise").')
newaxis = None
# Common docstring additions:
_PRECISION_DOC = """\
In addition to the original NumPy arguments listed below, also supports
``precision`` for extra control over matrix-multiplication precision
on supported devices. ``precision`` may be set to ``None``, which means
default precision for the backend, a ``lax.Precision`` enum value
(``Precision.DEFAULT``, ``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple
of two ``lax.Precision`` enums indicating separate precision for each argument.
"""
# We replace some builtin names to follow Numpy's API, so we capture here.
_abs = builtins.abs
_all = builtins.all
_any = builtins.any
_max = builtins.max
_min = builtins.min
_sum = builtins.sum
_divmod = builtins.divmod
# NumPy constants
pi = np.pi
e = np.e
euler_gamma = np.euler_gamma
inf = np.inf
NINF = np.NINF
PZERO = np.PZERO
NZERO = np.NZERO
nan = np.nan
# And some numpy utility functions
set_printoptions = np.set_printoptions
# We want isinstance(x, np.ndarray) checks in user code to work with the our
# array-like types, including DeviceArray and UnshapedArray (i.e. the abstract
# array base class). We can override the isinstance behavior directly, without
# having the complexity of multiple inheritance on those classes, by defining
# the ndarray class to have a metaclass with special __instancecheck__ behavior.
_arraylike_types = (np.ndarray, UnshapedArray, DeviceArray)
class _ArrayMeta(type(np.ndarray)): # type: ignore
"""Metaclass for overriding ndarray isinstance checks."""
def __instancecheck__(self, instance):
try:
return isinstance(instance.aval, _arraylike_types)
except AttributeError:
return isinstance(instance, _arraylike_types)
class ndarray(np.ndarray, metaclass=_ArrayMeta):
dtype: np.dtype
shape: Tuple[int, ...]
size: int
def __init__(shape, dtype=None, buffer=None, offset=0, strides=None,
order=None):
raise TypeError("jax.numpy.ndarray() should not be instantiated explicitly."
" Use jax.numpy.array, or jax.numpy.zeros instead.")
iscomplexobj = np.iscomplexobj
shape = _shape = np.shape
ndim = _ndim = np.ndim
size = np.size
_dtype = dtypes.result_type
# At present JAX doesn't have a reason to distinguish between scalars and arrays
# in its object system. Further, we want JAX scalars to have the same type
# promotion behaviors as JAX arrays. Rather than introducing a new type of JAX
# scalar object with JAX promotion behaviors, instead we make the JAX scalar
# types return JAX arrays when instantiated.
class _ScalarMeta(type):
def __hash__(self):
return hash(self.dtype.type)
def __eq__(self, other):
return id(self) == id(other) or self.dtype.type == other
def __ne__(self, other):
return not (self == other)
def __call__(self, x):
return array(x, dtype=self.dtype)
def _make_scalar_type(np_scalar_type):
return _ScalarMeta(np_scalar_type.__name__, (object,),
{"dtype": np.dtype(np_scalar_type)})
bool_ = _make_scalar_type(np.bool_)
uint8 = _make_scalar_type(np.uint8)
uint16 = _make_scalar_type(np.uint16)
uint32 = _make_scalar_type(np.uint32)
uint64 = _make_scalar_type(np.uint64)
int8 = _make_scalar_type(np.int8)
int16 = _make_scalar_type(np.int16)
int32 = _make_scalar_type(np.int32)
int64 = _make_scalar_type(np.int64)
bfloat16 = _make_scalar_type(dtypes.bfloat16)
float16 = _make_scalar_type(np.float16)
float32 = single = _make_scalar_type(np.float32)
float64 = double = _make_scalar_type(np.float64)
complex64 = csingle = _make_scalar_type(np.complex64)
complex128 = cdouble = _make_scalar_type(np.complex128)
int_ = int32 if dtypes.int_ == np.int32 else int64
float_ = float32 if dtypes.float_ == np.float32 else float64
complex_ = complex64 if dtypes.complex_ == np.complex64 else complex128
number = np.number
inexact = np.inexact
complexfloating = np.complexfloating
floating = np.floating
integer = np.integer
signedinteger = np.signedinteger
unsignedinteger = np.unsignedinteger
flexible = np.flexible
character = np.character
object_ = np.object_
iinfo = dtypes.iinfo
dtype = np.dtype
can_cast = dtypes.can_cast
issubsctype = dtypes.issubsctype
promote_types = dtypes.promote_types
ComplexWarning = np.ComplexWarning
array_str = np.array_str
array_repr = np.array_repr
save = np.save
savez = np.savez
load = np.load
### utility functions
_DEFAULT_TYPEMAP = {
np.bool_: bool_,
np.int_: int_,
np.float_: float_,
np.complex_: complex_
}
_INT_DTYPES = {
16: np.int16,
32: np.int32,
64: np.int64,
}
def _np_array(obj, dtype=None, **kwargs):
"""Return a properly-typed numpy array.
`_np_array(obj, **kwds)` is equivalent to `np.array(obj, **kwds)`, with the
exception that when obj.dtype is not defined and dtype is not specified, it
uses Jax's default dtypes.
"""
arr = np.array(obj, dtype=dtype, **kwargs)
obj_dtype = getattr(obj, 'dtype', None)
arr_dtype = np.dtype(arr.dtype).type
if dtype is None and obj_dtype is None and arr_dtype in _DEFAULT_TYPEMAP:
arr = arr.astype(_DEFAULT_TYPEMAP[arr_dtype])
return arr
_np_asarray = partial(_np_array, copy=False)
def _promote_shapes(fun_name, *args):
"""Prepend implicit leading singleton dimensions for Numpy broadcasting."""
if len(args) < 2:
return args
else:
shapes = [shape(arg) for arg in args]
nonscalar_ranks = [len(shp) for shp in shapes if shp]
if not nonscalar_ranks or len(set(nonscalar_ranks)) == 1:
return args
else:
if FLAGS.jax_numpy_rank_promotion != "allow":
_rank_promotion_warning_or_error(fun_name, shapes)
result_rank = len(lax.broadcast_shapes(*shapes))
return [broadcast_to(arg, (1,) * (result_rank - len(shp)) + shp)
for arg, shp in zip(args, shapes)]
def _rank_promotion_warning_or_error(fun_name, shapes):
if FLAGS.jax_numpy_rank_promotion == "warn":
msg = ("Following NumPy automatic rank promotion for {} on shapes {}. "
"Set the jax_numpy_rank_promotion config option to 'allow' to "
"disable this warning; for more information, see "
"https://jax.readthedocs.io/en/latest/rank_promotion_warning.html.")
warnings.warn(msg.format(fun_name, ' '.join(map(str, shapes))))
elif FLAGS.jax_numpy_rank_promotion == "raise":
msg = ("Operands could not be broadcast together for {} on shapes {} "
"and with the config option jax_numpy_rank_promotion='raise'. "
"For more information, see "
"https://jax.readthedocs.io/en/latest/rank_promotion_warning.html.")
raise ValueError(msg.format(fun_name, ' '.join(map(str, shapes))))
def _promote_dtypes(*args):
"""Convenience function to apply Numpy argument dtype promotion."""
# TODO(dougalm,mattjj): This is a performance bottleneck. Consider memoizing.
if len(args) < 2:
return args
else:
to_dtype_raw = dtypes._result_type_raw(*args)
weak_type = to_dtype_raw in set(dtypes._weak_types)
to_dtype = dtypes.canonicalize_dtype(to_dtype_raw)
return [lax.convert_element_type(x, to_dtype, weak_type) for x in args]
def _promote_dtypes_inexact(*args):
"""Convenience function to apply Numpy argument dtype promotion.
Promotes arguments to an inexact type."""
to_dtype_raw = dtypes._result_type_raw(*args)
to_dtype = dtypes.canonicalize_dtype(to_dtype_raw)
to_dtype_inexact = _to_inexact_dtype(to_dtype)
weak_type = (to_dtype == to_dtype_inexact
and to_dtype_raw in set(dtypes._weak_types))
return [lax.convert_element_type(x, to_dtype_inexact, weak_type) for x in args]
def _to_inexact_dtype(dtype):
"""Promotes a dtype into an inexact dtype, if it is not already one."""
return dtype if issubdtype(dtype, inexact) else promote_types(dtype, float_)
def _complex_elem_type(dtype):
"""Returns the float type of the real/imaginary parts of a complex dtype."""
return np.abs(np.zeros((), dtype)).dtype
def _result_dtype(op, *args):
"""Compute result dtype of applying op to arguments with given dtypes."""
args = [np.ones((0,) * ndim(arg), _dtype(arg)) for arg in args]
return _dtype(op(*args))
def _arraylike(x): return isinstance(x, ndarray) or isscalar(x)
def _check_arraylike(fun_name, *args):
"""Check if all args fit JAX's definition of arraylike (ndarray or scalar)."""
assert isinstance(fun_name, str), f"fun_name must be a string. Got {fun_name}"
if _any(not _arraylike(arg) for arg in args):
pos, arg = next((i, arg) for i, arg in enumerate(args)
if not _arraylike(arg))
msg = "{} requires ndarray or scalar arguments, got {} at position {}."
raise TypeError(msg.format(fun_name, type(arg), pos))
def _check_no_float0s(fun_name, *args):
"""Check if none of the args have dtype float0."""
if _any(dtypes.dtype(arg) is dtypes.float0 for arg in args):
raise TypeError(
f"Called {fun_name} with a float0 array. "
"float0s do not support any operations by design because they "
"are not compatible with non-trivial vector spaces. No implicit dtype "
"conversion is done. You can use np.zeros_like(arr, dtype=np.float) "
"to cast a float0 array to a regular zeros array. \n"
"If you didn't expect to get a float0 you might have accidentally "
"taken a gradient with respect to an integer argument.")
def _promote_args(fun_name, *args):
"""Convenience function to apply Numpy argument shape and dtype promotion."""
_check_arraylike(fun_name, *args)
_check_no_float0s(fun_name, *args)
return _promote_shapes(fun_name, *_promote_dtypes(*args))
def _promote_args_inexact(fun_name, *args):
"""Convenience function to apply Numpy argument shape and dtype promotion.
Promotes non-inexact types to an inexact type."""
_check_arraylike(fun_name, *args)
_check_no_float0s(fun_name, *args)
return _promote_shapes(fun_name, *_promote_dtypes_inexact(*args))
def _constant_like(x, const):
return np.array(const, dtype=_dtype(x))
### implementations of numpy functions in terms of lax
@_wraps(np.fmin)
def fmin(x1, x2):
return where((x1 < x2) | isnan(x2), x1, x2)
@_wraps(np.fmax)
def fmax(x1, x2):
return where((x1 > x2) | isnan(x2), x1, x2)
@_wraps(np.finfo)
def finfo(dtype):
return dtypes.finfo(dtype)
@_wraps(np.issubdtype)
def issubdtype(arg1, arg2):
return dtypes.issubdtype(arg1, arg2)
@_wraps(np.isscalar)
def isscalar(element):
return dtypes.is_python_scalar(element) or np.isscalar(element)
iterable = np.iterable
@_wraps(np.result_type)
def result_type(*args):
return dtypes.result_type(*args)
def _one_to_one_unop(numpy_fn, lax_fn, promote_to_inexact=False, lax_doc=False):
if promote_to_inexact:
fn = lambda x: lax_fn(*_promote_args_inexact(numpy_fn.__name__, x))
else:
fn = lambda x: lax_fn(*_promote_args(numpy_fn.__name__, x))
if lax_doc:
doc = _dedent('\n\n'.join(lax_fn.__doc__.split('\n\n')[1:])).strip()
return _wraps(numpy_fn, lax_description=doc)(fn)
else:
return _wraps(numpy_fn)(fn)
def _one_to_one_binop(numpy_fn, lax_fn, promote_to_inexact=False, lax_doc=False):
if promote_to_inexact:
fn = lambda x1, x2: lax_fn(*_promote_args_inexact(numpy_fn.__name__, x1, x2))
else:
fn = lambda x1, x2: lax_fn(*_promote_args(numpy_fn.__name__, x1, x2))
if lax_doc:
doc = _dedent('\n\n'.join(lax_fn.__doc__.split('\n\n')[1:])).strip()
return _wraps(numpy_fn, lax_description=doc)(fn)
else:
return _wraps(numpy_fn)(fn)
def _maybe_bool_binop(numpy_fn, lax_fn, bool_lax_fn, lax_doc=False):
def fn(x1, x2):
x1, x2 = _promote_args(numpy_fn.__name__, x1, x2)
return lax_fn(x1, x2) if x1.dtype != bool_ else bool_lax_fn(x1, x2)
return _wraps(numpy_fn)(fn)
if lax_doc:
doc = _dedent('\n\n'.join(lax_fn.__doc__.split('\n\n')[1:])).strip()
return _wraps(numpy_fn, lax_description=doc)(fn)
else:
return _wraps(numpy_fn)(fn)
fabs = _one_to_one_unop(np.fabs, lax.abs, True)
bitwise_not = _one_to_one_unop(np.bitwise_not, lax.bitwise_not)
invert = _one_to_one_unop(np.invert, lax.bitwise_not)
negative = _one_to_one_unop(np.negative, lax.neg)
positive = _one_to_one_unop(np.positive, lambda x: x)
floor = _one_to_one_unop(np.floor, lax.floor, True)
ceil = _one_to_one_unop(np.ceil, lax.ceil, True)
exp = _one_to_one_unop(np.exp, lax.exp, True)
log = _one_to_one_unop(np.log, lax.log, True)
expm1 = _one_to_one_unop(np.expm1, lax.expm1, True)
log1p = _one_to_one_unop(np.log1p, lax.log1p, True)
sin = _one_to_one_unop(np.sin, lax.sin, True)
cos = _one_to_one_unop(np.cos, lax.cos, True)
tan = _one_to_one_unop(np.tan, lax.tan, True)
arcsin = _one_to_one_unop(np.arcsin, lax.asin, True)
arccos = _one_to_one_unop(np.arccos, lax.acos, True)
arctan = _one_to_one_unop(np.arctan, lax.atan, True)
sinh = _one_to_one_unop(np.sinh, lax.sinh, True)
cosh = _one_to_one_unop(np.cosh, lax.cosh, True)
arcsinh = _one_to_one_unop(np.arcsinh, lax.asinh, True)
tanh = _one_to_one_unop(np.tanh, lax.tanh, True)
arcsinh = _one_to_one_unop(np.arcsinh, lax.asinh, True)
arctanh = _one_to_one_unop(np.arctanh, lax.atanh, True)
sqrt = _one_to_one_unop(np.sqrt, lax.sqrt, True)
add = _maybe_bool_binop(np.add, lax.add, lax.bitwise_or)
bitwise_and = _one_to_one_binop(np.bitwise_and, lax.bitwise_and)
bitwise_or = _one_to_one_binop(np.bitwise_or, lax.bitwise_or)
bitwise_xor = _one_to_one_binop(np.bitwise_xor, lax.bitwise_xor)
left_shift = _one_to_one_binop(np.left_shift, lax.shift_left)
equal = _one_to_one_binop(np.equal, lax.eq)
multiply = _maybe_bool_binop(np.multiply, lax.mul, lax.bitwise_and)
not_equal = _one_to_one_binop(np.not_equal, lax.ne)
subtract = _one_to_one_binop(np.subtract, lax.sub)
arctan2 = _one_to_one_binop(np.arctan2, lax.atan2, True)
minimum = _one_to_one_binop(np.minimum, lax.min)
maximum = _one_to_one_binop(np.maximum, lax.max)
float_power = _one_to_one_binop(np.float_power, lax.pow, True)
nextafter = _one_to_one_binop(np.nextafter, lax.nextafter, True, True)
@_wraps(np.arccosh)
def arccosh(x):
# Note: arccosh is multi-valued for complex input, and lax.acosh uses a different
# convention than np.arccosh.
out = lax.acosh(*_promote_args_inexact("arccosh", x))
if issubdtype(out.dtype, np.complexfloating):
out = where(real(out) < 0, lax.neg(out), out)
return out
def _comparison_op(numpy_fn, lax_fn):
def fn(x1, x2):
x1, x2 = _promote_args(numpy_fn.__name__, x1, x2)
# Comparison on complex types are defined as a lexicographic ordering on
# the (real, imag) pair.
if issubdtype(_dtype(x1), complexfloating):
rx = lax.real(x1)
ry = lax.real(x2)
return lax.select(lax.eq(rx, ry), lax_fn(lax.imag(x1), lax.imag(x2)),
lax_fn(rx, ry))
return lax_fn(x1, x2)
return _wraps(numpy_fn)(fn)
greater_equal = _comparison_op(np.greater_equal, lax.ge)
greater = _comparison_op(np.greater, lax.gt)
less_equal = _comparison_op(np.less_equal, lax.le)
less = _comparison_op(np.less, lax.lt)
def _logical_op(np_op, bitwise_op):
@_wraps(np_op, update_doc=False)
def op(*args):
zero = lambda x: lax.full_like(x, shape=(), fill_value=0)
args = (x if issubdtype(_dtype(x), bool_) else lax.ne(x, zero(x))
for x in args)
return bitwise_op(*_promote_args(np_op.__name__, *args))
return op
logical_and = _logical_op(np.logical_and, lax.bitwise_and)
logical_not = _logical_op(np.logical_not, lax.bitwise_not)
logical_or = _logical_op(np.logical_or, lax.bitwise_or)
logical_xor = _logical_op(np.logical_xor, lax.bitwise_xor)
@_wraps(np.right_shift)
def right_shift(x1, x2):
x1, x2 = _promote_args(np.right_shift.__name__, x1, x2)
lax_fn = lax.shift_right_logical if \
np.issubdtype(x1.dtype, np.unsignedinteger) else lax.shift_right_arithmetic
return lax_fn(x1, x2)
@_wraps(np.absolute)
def absolute(x):
_check_arraylike('absolute', x)
dt = _dtype(x)
return x if dt == bool_ or issubdtype(dt, unsignedinteger) else lax.abs(x)
abs = _wraps(np.abs)(absolute)
@_wraps(np.rint)
def rint(x):
_check_arraylike('rint', x)
dtype = _dtype(x)
if issubdtype(dtype, integer):
return lax.convert_element_type(x, float_)
if issubdtype(dtype, complexfloating):
return lax.complex(rint(lax.real(x)), rint(lax.imag(x)))
return lax.round(x, lax.RoundingMethod.TO_NEAREST_EVEN)
@_wraps(np.sign)
def sign(x):
_check_arraylike('sign', x)
dtype = _dtype(x)
if issubdtype(dtype, complexfloating):
re = lax.real(x)
return lax.complex(
lax.sign(where(re != 0, re, lax.imag(x))), _constant_like(re, 0))
return lax.sign(x)
@_wraps(np.copysign)
def copysign(x1, x2):
x1, x2 = _promote_args_inexact("copysign", x1, x2)
if issubdtype(_dtype(x1), complexfloating):
raise TypeError("copysign does not support complex-valued inputs")
return where(signbit(x2), -lax.abs(x1), lax.abs(x1))
@_wraps(np.true_divide)
def true_divide(x1, x2):
x1, x2 = _promote_args_inexact("true_divide", x1, x2)
return lax.div(x1, x2)
divide = true_divide
@_wraps(np.floor_divide)
def floor_divide(x1, x2):
x1, x2 = _promote_args("floor_divide", x1, x2)
dtype = _dtype(x1)
if issubdtype(dtype, integer):
quotient = lax.div(x1, x2)
select = logical_and(lax.sign(x1) != lax.sign(x2), lax.rem(x1, x2) != 0)
# TODO(mattjj): investigate why subtracting a scalar was causing promotion
return where(select, quotient - np.array(1, _dtype(quotient)), quotient)
elif issubdtype(dtype, complexfloating):
x1r = lax.real(x1)
x1i = lax.imag(x1)
x2r = lax.real(x2)
x2i = lax.imag(x2)
which = lax.ge(lax.abs(x2r), lax.abs(x2i))
rat1 = where(which, lax._const(x2i, 1), lax.div(x2r, x2i))
rat2 = where(which, lax.div(x2i, x2r), lax._const(x2i, 1))
out = lax.floor(lax.div(lax.add(lax.mul(x1r, rat1), lax.mul(x1i, rat2)),
lax.add(lax.mul(x2r, rat1), lax.mul(x2i, rat2))))
return lax.convert_element_type(out, dtype)
else:
return _float_divmod(x1, x2)[0]
@_wraps(np.divmod)
def divmod(x1, x2):
x1, x2 = _promote_args("divmod", x1, x2)
if issubdtype(_dtype(x1), integer):
return floor_divide(x1, x2), remainder(x1, x2)
else:
return _float_divmod(x1, x2)
def _float_divmod(x1, x2):
# see float_divmod in floatobject.c of CPython
mod = lax.rem(x1, x2)
div = lax.div(lax.sub(x1, mod), x2)
ind = lax.bitwise_and(mod != 0, lax.sign(x2) != lax.sign(mod))
mod = lax.select(ind, mod + x2, mod)
div = lax.select(ind, div - _constant_like(div, 1), div)
return lax.round(div), mod
@_wraps(np.power)
def power(x1, x2):
# Special case for small positive integer scalars: use binary exponentiation.
# Using lax.pow may be imprecise for floating-point values; the goal of this
# code path is to make sure we end up with a precise output for the common
# pattern ``x ** 2`` or similar.
if isinstance(x2, int):
return lax.integer_pow(x1, x2)
x1, x2 = _promote_args("power", x1, x2)
dtype = _dtype(x1)
if not issubdtype(dtype, integer):
return lax.pow(x1, x2)
# Integer power => use binary exponentiation.
# TODO(phawkins): add integer pow support to XLA.
bits = 6 # Anything more would overflow for any x1 > 1
acc = ones(shape(x1), dtype=dtype)
for _ in range(bits):
acc = where(lax.bitwise_and(x2, _constant_like(x2, 1)),
lax.mul(acc, x1), acc)
x1 = lax.mul(x1, x1)
x2 = lax.shift_right_logical(x2, _constant_like(x2, 1))
return acc
@custom_jvp
@_wraps(np.logaddexp)
def logaddexp(x1, x2):
x1, x2 = _promote_shapes("logaddexp", *_promote_dtypes_inexact(x1, x2))
amax = lax.max(x1, x2)
delta = lax.sub(x1, x2)
return lax.select(isnan(delta),
lax.add(x1, x2), # NaNs or infinities of the same sign.
lax.add(amax, lax.log1p(lax.exp(-lax.abs(delta)))))
@logaddexp.defjvp
def _logaddexp_jvp(primals, tangents):
x1, x2 = primals
t1, t2 = tangents
x1, x2, t1, t2 = broadcast_arrays(x1, x2, t1, t2)
primal_out = logaddexp(x1, x2)
tangent_out = (t1 * exp(_replace_inf(x1) - _replace_inf(primal_out)) +
t2 * exp(_replace_inf(x2) - _replace_inf(primal_out)))
return primal_out, tangent_out
def _replace_inf(x):
return lax.select(isposinf(x), zeros_like(x), x)
@custom_jvp
@_wraps(np.logaddexp2)
def logaddexp2(x1, x2):
x1, x2 = _promote_shapes("logaddexp2", *_promote_dtypes_inexact(x1, x2))
amax = lax.max(x1, x2)
delta = lax.sub(x1, x2)
return lax.select(isnan(delta),
lax.add(x1, x2), # NaNs or infinities of the same sign.
lax.add(amax, lax.div(lax.log1p(exp2(-lax.abs(delta))),
_constant_like(x1, np.log(2)))))
@logaddexp2.defjvp
def _logaddexp2_jvp(primals, tangents):
x1, x2 = primals
t1, t2 = tangents
x1, x2, t1, t2 = broadcast_arrays(x1, x2, t1, t2)
primal_out = logaddexp2(x1, x2)
tangent_out = (t1 * 2 ** (_replace_inf(x1) - _replace_inf(primal_out)) +
t2 * 2 ** (_replace_inf(x2) - _replace_inf(primal_out)))
return primal_out, tangent_out
@_wraps(np.log2)
def log2(x):
x, = _promote_dtypes_inexact(x)
return lax.div(lax.log(x), lax.log(_constant_like(x, 2)))
@_wraps(np.log10)
def log10(x):
x, = _promote_dtypes_inexact(x)
return lax.div(lax.log(x), lax.log(_constant_like(x, 10)))
@_wraps(np.exp2)
def exp2(x):
x, = _promote_dtypes_inexact(x)
return lax.exp(lax.mul(lax.log(_constant_like(x, 2)), x))
@_wraps(np.signbit)
def signbit(x):
x, = _promote_shapes("signbit", x)
dtype = _dtype(x)
if issubdtype(dtype, integer):
return lax.lt(x, _constant_like(x, 0))
elif issubdtype(dtype, bool_):
return full_like(x, False, dtype=bool_)
elif not issubdtype(dtype, floating):
raise ValueError(
"jax.numpy.signbit is not well defined for %s" % dtype)
# TPU supports BF16 but not S16 types, so as a workaround, convert BF16 to
# F32.
if dtype == bfloat16:
dtype = float32
x = lax.convert_element_type(x, float32)
info = finfo(dtype)
if info.bits not in _INT_DTYPES:
raise NotImplementedError(
"jax.numpy.signbit only supports 16, 32, and 64-bit types.")
int_type = _INT_DTYPES[info.bits]
x = lax.bitcast_convert_type(x, int_type)
return lax.convert_element_type(x >> (info.nexp + info.nmant), np.bool_)
@_wraps(np.trapz)
def trapz(y, x=None, dx=1.0, axis=-1):
_check_arraylike('trapz', y)
y = moveaxis(y, axis, -1)
if x is not None:
if ndim(x) == 1:
dx = diff(x)
else:
dx = moveaxis(diff(x, axis=axis), axis, -1)
return 0.5 * (dx * (y[..., 1:] + y[..., :-1])).sum(-1)
@_wraps(np.trunc)
def trunc(x):
_check_arraylike('trunc', x)
return where(lax.lt(x, lax._const(x, 0)), ceil(x), floor(x))
def _conv(x, y, mode, op, precision):
if issubdtype(_dtype(x), complexfloating) or issubdtype(_dtype(y), complexfloating):
raise NotImplementedError(f"{op}() does not support complex inputs")
if ndim(x) != 1 or ndim(y) != 1:
raise ValueError(f"{op}() only support 1-dimensional inputs.")
x, y = _promote_dtypes_inexact(x, y)
if len(x) == 0 or len(y) == 0:
raise ValueError(f"{op}: inputs cannot be empty, got shapes {x.shape} and {y.shape}.")
out_order = slice(None)
if len(x) < len(y):
x, y = y, x
if op == "correlate":
out_order = slice(None, None, -1)
if op == 'convolve':
y = y[::-1]
if mode == 'valid':
padding = [(0, 0)]
elif mode == 'same':
padding = [(y.shape[0] // 2, y.shape[0] - y.shape[0] // 2 - 1)]
elif mode == 'full':
padding = [(y.shape[0] - 1, y.shape[0] - 1)]
else:
raise ValueError("mode must be one of ['full', 'same', 'valid']")
result = lax.conv_general_dilated(x[None, None, :], y[None, None, :], (1,),
padding, precision=precision)
return result[0, 0, out_order]
@_wraps(np.convolve, lax_description=_PRECISION_DOC)
def convolve(a, v, mode='full', *, precision=None):
_check_arraylike("convolve", a, v)
return _conv(a, v, mode, 'convolve', precision)
@_wraps(np.correlate, lax_description=_PRECISION_DOC)
def correlate(a, v, mode='valid', *, precision=None):
_check_arraylike("correlate", a, v)
return _conv(a, v, mode, 'correlate', precision)
def _normalize_float(x):
info = finfo(_dtype(x))
cond = lax.abs(x) < info.tiny
x1 = where(cond, x * lax._const(x, 1 << info.nmant), x)
x2 = where(cond, lax._const(np.int32, -info.nmant), lax._const(np.int32, 0))
int_type = _INT_DTYPES[info.bits]
return lax.bitcast_convert_type(x1, int_type), x2
@_wraps(np.ldexp)
@jit
def ldexp(x1, x2):
dtype = dtypes.canonicalize_dtype(_result_dtype(np.ldexp, x1, x2))
x1, x2 = _promote_shapes("ldexp", x1, x2)
x1 = lax.convert_element_type(x1, dtype)
info = finfo(dtype)
mask = (1 << info.nexp) - 1
bias = ((1 << info.nexp) - 1) >> 1
int_type = _INT_DTYPES[info.bits]
x, e = _normalize_float(x1)
x2 += e + ((x >> info.nmant) & mask) - bias
# find underflow/overflow before denormalization
underflow_cond = x2 < -(bias + info.nmant)
overflow_cond = x2 > bias
m = ones_like(x, dtype=dtype)
# denormals
cond = x2 < -bias + 1
x2 = where(cond, x2 + info.nmant, x2)
m = where(cond, m / (1 << info.nmant), m)
x2 = lax.convert_element_type(x2, np.int32)
x &= ~(mask << info.nmant)
x |= ((lax.convert_element_type(x2, int_type) + bias) << info.nmant)
x = lax.convert_element_type(m, dtype) * lax.bitcast_convert_type(x, dtype)
# underflow
x = where(underflow_cond, zeros_like(x, dtype=dtype), x)
# overflow
x = where(overflow_cond, lax.sign(x1) * full_like(x, np.inf), x)
# ldexp(x1, x2) = x1 for x1 = inf, -inf, nan, 0
return where(isinf(x1) | isnan(x1) | (x1 == 0), x1, x)
@_wraps(np.frexp)
@jit
def frexp(x):
x = asarray(x)
if issubdtype(x.dtype, complexfloating):
raise TypeError("frexp does not support complex-valued inputs")
elif not issubdtype(x.dtype, floating):
x = lax.convert_element_type(x, float_)
dtype = _dtype(x)
info = finfo(dtype)
mask = (1 << info.nexp) - 1
bias = ((1 << info.nexp) - 1) >> 1
x1, x2 = _normalize_float(x)
x2 += ((x1 >> info.nmant) & mask) - bias + 1
x1 &= ~(mask << info.nmant)
x1 |= (bias - 1) << info.nmant
x1 = lax.bitcast_convert_type(x1, dtype)
cond = isinf(x) | isnan(x) | (x == 0)
x2 = where(cond, zeros_like(x2), x2)
return where(cond, x, x1), lax.convert_element_type(x2, int32)
@_wraps(np.remainder)
def remainder(x1, x2):
x1, x2 = _promote_args("remainder", x1, x2)
zero = _constant_like(x1, 0)
trunc_mod = lax.rem(x1, x2)
trunc_mod_not_zero = lax.ne(trunc_mod, zero)
do_plus = lax.bitwise_and(
lax.ne(lax.lt(trunc_mod, zero), lax.lt(x2, zero)), trunc_mod_not_zero)
return lax.select(do_plus, lax.add(trunc_mod, x2), trunc_mod)
mod = _wraps(np.mod)(remainder)
@_wraps(np.fmod)
def fmod(x1, x2):
_check_arraylike("fmod", x1, x2)
if issubdtype(_dtype(x1, x2), integer):
x2 = where(x2 == 0, 1, x2)
return lax.rem(*_promote_args("fmod", x1, x2))
@_wraps(np.cbrt)
def cbrt(x):
_check_arraylike("cbrt", x)
x, = _promote_dtypes_inexact(x)
return lax.sign(x) * power(lax.abs(x), _constant_like(x, 1. / 3.))
@_wraps(np.square)
def square(x):
_check_arraylike("square", x)
return lax.integer_pow(x, 2)
@_wraps(np.deg2rad)
def deg2rad(x):
_check_arraylike("deg2rad", x)
x, = _promote_dtypes_inexact(x)
return lax.mul(x, lax._const(x, pi / 180))
@_wraps(np.rad2deg)
def rad2deg(x):
_check_arraylike("rad2deg", x)
x, = _promote_dtypes_inexact(x)
return lax.mul(x, lax._const(x, 180 / pi))
degrees = rad2deg
radians = deg2rad
@_wraps(np.histogram_bin_edges)
def histogram_bin_edges(a, bins=10, range=None, weights=None):
if isinstance(bins, str):
raise NotImplementedError("string values for `bins` not implemented.")
a = ravel(a)
b = asarray(bins)
if b.ndim == 1:
return b
if range is None:
range = (a.min(), a.max())
assert len(range) == 2
range = asarray(range)
range = (where(ptp(range) == 0, range[0] - 0.5, range[0]),
where(ptp(range) == 0, range[1] + 0.5, range[1]))
dtype = _dtype(a)
if issubdtype(dtype, integer):
dtype = promote_types(dtype, float32)
return linspace(range[0], range[1], bins + 1, dtype=dtype)
@_wraps(np.histogram)
def histogram(a, bins=10, range=None, weights=None, density=None):
if weights is not None and a.shape != weights.shape:
raise ValueError("weights should have the same shape as a.")
a = ravel(a)
if weights is not None:
weights = ravel(weights)
else:
weights = ones_like(a)
bin_edges = histogram_bin_edges(a, bins, range, weights)
bin_idx = searchsorted(bin_edges, a, side='right')
bin_idx = where(a == bin_edges[-1], len(bin_edges) - 1, bin_idx)
counts = bincount(bin_idx, weights, length=len(bin_edges))[1:]
if density:
bin_widths = diff(bin_edges)
counts = counts / bin_widths / counts.sum()
return counts, bin_edges
@_wraps(np.histogram2d)
def histogram2d(x, y, bins=10, range=None, weights=None, density=None):
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
x_edges = y_edges = asarray(bins)
bins = [x_edges, y_edges]
sample = transpose(asarray([x, y]))
hist, edges = histogramdd(sample, bins, range, weights, density)
return hist, edges[0], edges[1]
@_wraps(np.histogramdd)
def histogramdd(sample, bins=10, range=None, weights=None, density=None):
_check_arraylike("histogramdd", sample)
N, D = shape(sample)
if weights is not None and weights.shape != (N,):
raise ValueError("should have one weight for each sample.")
try:
num_bins = len(bins)
if num_bins != D:
raise ValueError("should be a bin for each dimension.")
except TypeError:
# when bin_size is integer, the same bin is used for each dimension
bins = D * [bins]
bin_idx_by_dim = D*[None]
nbins = np.empty(D, int)
bin_edges_by_dim = D*[None]
dedges = D*[None]
for i in builtins.range(D):
bin_edges = histogram_bin_edges(sample[:, i], bins[i], range, weights)
bin_idx = searchsorted(bin_edges, sample[:, i], side='right')
bin_idx = where(sample[:, i] == bin_edges[-1], bin_idx - 1, bin_idx)
bin_idx_by_dim[i] = bin_idx
nbins[i] = len(bin_edges) + 1
bin_edges_by_dim[i] = bin_edges
dedges[i] = diff(bin_edges_by_dim[i])
xy = ravel_multi_index(bin_idx_by_dim, nbins, mode='clip')
hist = bincount(xy, weights, length=nbins.prod())
hist = reshape(hist, nbins)
core = D*(slice(1, -1),)
hist = hist[core]
if density:
s = sum(hist)
for i in builtins.range(D):
_shape = np.ones(D, int)
_shape[i] = nbins[i] - 2
hist = hist / reshape(dedges[i], _shape)
hist /= s
return hist, bin_edges_by_dim
@_wraps(np.heaviside)
def heaviside(x1, x2):
_check_arraylike("heaviside", x1, x2)
x1, x2 = _promote_dtypes_inexact(x1, x2)
zero = lax._const(x1, 0)
return where(lax.lt(x1, zero), zero,
where(lax.gt(x1, zero), lax._const(x1, 1), x2))
@_wraps(np.hypot)
def hypot(x1, x2):
_check_arraylike("hypot", x1, x2)
x1, x2 = _promote_dtypes_inexact(x1, x2)
x1 = lax.abs(x1)
x2 = lax.abs(x2)
x1, x2 = maximum(x1, x2), minimum(x1, x2)
return lax.select(x1 == 0, x1, x1 * lax.sqrt(1 + lax.square(lax.div(x2, lax.select(x1 == 0, ones_like(x1), x1)))))
@_wraps(np.reciprocal)
def reciprocal(x):
_check_arraylike("reciprocal", x)
x, = _promote_dtypes_inexact(x)
return lax.integer_pow(x, -1)
@_wraps(np.sinc, update_doc=False)
def sinc(x):
_check_arraylike("sinc", x)
x, = _promote_dtypes_inexact(x)
eq_zero = lax.eq(x, lax._const(x, 0))
pi_x = lax.mul(lax._const(x, pi), x)
safe_pi_x = where(eq_zero, lax._const(x, 0), pi_x)
return where(eq_zero, _sinc_maclaurin(0, pi_x),
lax.div(lax.sin(safe_pi_x), safe_pi_x))
@partial(custom_jvp, nondiff_argnums=(0,))
def _sinc_maclaurin(k, x):
# compute the kth derivative of x -> sin(x)/x evaluated at zero (since we
# compute the monomial term in the jvp rule)
if k % 2:
return lax.full_like(x, 0)
else:
return lax.full_like(x, (-1) ** (k // 2) / (k + 1))
@_sinc_maclaurin.defjvp
def _sinc_maclaurin_jvp(k, primals, tangents):
(x,), (t,) = primals, tangents
return _sinc_maclaurin(k, x), _sinc_maclaurin(k + 1, x) * t
@_wraps(np.transpose)
def transpose(a, axes=None):
_check_arraylike("transpose", a)
axes = np.arange(ndim(a))[::-1] if axes is None else axes
return lax.transpose(a, axes)
@_wraps(np.rot90)
def rot90(m, k=1, axes=(0, 1)):
_check_arraylike("rot90", m)
ax1, ax2 = axes
ax1 = _canonicalize_axis(ax1, ndim(m))
ax2 = _canonicalize_axis(ax2, ndim(m))
if ax1 == ax2:
raise ValueError("Axes must be different") # same as numpy error
k = k % 4
if k == 0:
return m
elif k == 2:
return flip(flip(m, ax1), ax2)
else:
perm = list(range(m.ndim))
perm[ax1], perm[ax2] = perm[ax2], perm[ax1]
if k == 1:
return transpose(flip(m, ax2), perm)
else:
return flip(transpose(m, perm), ax2)
@_wraps(np.flip)
def flip(m, axis=None):
_check_arraylike("flip", m)
if axis is None:
return lax.rev(m, list(range(len(shape(m)))))
return lax.rev(m, [_canonicalize_axis(axis, ndim(m))])
@_wraps(np.fliplr)
def fliplr(m):
return flip(m, 1)
@_wraps(np.flipud)
def flipud(m):
return flip(m, 0)
@_wraps(np.conjugate)
def conjugate(x):
_check_arraylike("conjugate", x)
return lax.conj(x) if iscomplexobj(x) else x
conj = conjugate
@_wraps(np.imag)
def imag(val):
_check_arraylike("imag", val)
return lax.imag(val) if iscomplexobj(val) else zeros_like(val)
@_wraps(np.real)
def real(val):
_check_arraylike("real", val)
return lax.real(val) if iscomplexobj(val) else val
@_wraps(np.iscomplex)
def iscomplex(x):
i = imag(x)
return lax.ne(i, lax._const(i, 0))
@_wraps(np.isreal)
def isreal(x):
i = imag(x)
return lax.eq(i, lax._const(i, 0))
@_wraps(np.angle)
def angle(z):
re = real(z)
im = imag(z)
dtype = _dtype(re)
if not issubdtype(dtype, inexact) or (
issubdtype(_dtype(z), floating) and ndim(z) == 0):
dtype = dtypes.canonicalize_dtype(float_)
re = lax.convert_element_type(re, dtype)
im = lax.convert_element_type(im, dtype)
return lax.atan2(im, re)
@_wraps(np.diff)
def diff(a, n=1, axis=-1, prepend=None, append=None):
_check_arraylike("diff", a)
if n == 0:
return a
if n < 0:
raise ValueError(f"order must be non-negative but got {n}")
if ndim(a) == 0:
raise ValueError(f"diff requires input that is at least one dimensional; got {a}")
nd = a.ndim
combined = []
if prepend is not None:
_check_arraylike("diff", prepend)
if isscalar(prepend):
shape = list(a.shape)
shape[axis] = 1
prepend = broadcast_to(prepend, tuple(shape))
combined.append(prepend)
combined.append(a)
if append is not None:
_check_arraylike("diff", append)
if isscalar(append):
shape = list(a.shape)
shape[axis] = 1
append = broadcast_to(append, tuple(shape))
combined.append(append)
if len(combined) > 1:
a = concatenate(combined, axis)
slice1 = [slice(None)] * nd
slice2 = [slice(None)] * nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
op = not_equal if a.dtype == np.bool_ else subtract
for _ in range(n):
a = op(a[slice1], a[slice2])
return a
_EDIFF1D_DOC = """\
Unlike NumPy's implementation of ediff1d, :py:func:`jax.numpy.ediff1d` will not
issue an error if casting ``to_end`` or ``to_begin`` to the type of ``ary``
loses precision.
"""
@_wraps(np.ediff1d, lax_description=_EDIFF1D_DOC)
def ediff1d(ary, to_end=None, to_begin=None):
ary = ravel(asarray(ary))
result = lax.sub(ary[1:], ary[:-1])
if to_begin is not None:
result = concatenate((ravel(asarray(to_begin, dtype=ary.dtype)), result))
if to_end is not None:
result = concatenate((result, ravel(asarray(to_end, dtype=ary.dtype))))
return result
@partial(jit, static_argnums=2)
def _gradient(a, varargs, axis):
def gradient_along_axis(a, h, axis):
sliced = partial(lax.slice_in_dim, a, axis=axis)
a_grad = concatenate((
(sliced(1, 2) - sliced(0, 1)), # upper edge
(sliced(2, None) - sliced(None, -2)) * 0.5, # inner
(sliced(-1, None) - sliced(-2, -1)), # lower edge
), axis)
return a_grad / h
if axis is None:
axis = range(a.ndim)
else:
if isinstance(axis, int):
axis = (axis,)
if not isinstance(axis, tuple) and not isinstance(axis, list):
raise ValueError("Give `axis` either as int or iterable")
elif len(axis) == 0:
return []
axis = [_canonicalize_axis(i, a.ndim) for i in axis]
if _min([s for i, s in enumerate(a.shape) if i in axis]) < 2:
raise ValueError("Shape of array too small to calculate "
"a numerical gradient, "
"at least 2 elements are required.")
len_axes = len(axis)
n = len(varargs)
if n == 0 or varargs is None:
# no spacing
dx = [1.0] * len_axes
elif n == 1:
# single value for all axes
dx = varargs * len_axes
elif n == len_axes:
dx = varargs
else:
TypeError("Invalid number of spacing arguments %d" % n)
if ndim(dx[0]) != 0:
raise NotImplementedError("Non-constant spacing not implemented")
# TODO: use jax.lax loop tools if possible
a_grad = [gradient_along_axis(a, h, ax) for ax, h in zip(axis, dx)]
if len(axis) == 1:
a_grad = a_grad[0]
return a_grad
@_wraps(np.gradient)
def gradient(f, *varargs, axis=None, edge_order=None):
if edge_order is not None:
raise NotImplementedError("The 'edge_order' argument to jnp.gradient is not supported.")
return _gradient(f, varargs, axis)
@_wraps(np.isrealobj)
def isrealobj(x):
return not iscomplexobj(x)
@_wraps(np.reshape)
def reshape(a, newshape, order="C"):
try:
return a.reshape(newshape, order=order) # forward to method for ndarrays
except AttributeError:
return _reshape(a, newshape, order=order)
def _compute_newshape(a, newshape):
"""Fixes a -1 value in newshape, if present."""
# other errors, like having more than one -1, are caught downstream
try: iter(newshape)
except: iterable = False
else: iterable = True
def check(size):
return size if type(size) is Poly else core.concrete_or_error(
int, size, "The error arose in jax.numpy.reshape.")
newshape = [check(size) for size in newshape] if iterable else check(newshape)
if np.any(np.equal(newshape, -1)):
fix = -a.size // (newshape if type(newshape) is Poly else _prod(newshape))
return [d if d != -1 else fix for d in newshape]
else:
return newshape
def _reshape(a, newshape, order="C"):
computed_newshape = _compute_newshape(a, newshape)
if order == "C":
return lax.reshape(a, computed_newshape, None)
elif order == "F":
dims = np.arange(ndim(a))[::-1]
return lax.reshape(a, computed_newshape[::-1], dims).T
elif order == "A":
raise NotImplementedError("np.reshape order=A is not implemented.")
else:
raise ValueError("Unexpected value for 'order' argument: {}.".format(order))
def _reshape_method(a, *newshape, **kwargs):
order = kwargs.pop("order", "C")
if len(kwargs) == 1:
invalid_kwarg, = kwargs
msg = "'{}' is an invalid keyword argument for this function"
raise TypeError(msg.format(invalid_kwarg)) # same as NumPy error
elif kwargs:
invalid_kwargs = "'{}'".format("'".join(kwargs))
msg = "{} are invalid keyword arguments for this function"
raise TypeError(msg.format(invalid_kwargs)) # different from NumPy error
if (len(newshape) == 1 and not isinstance(newshape[0], int) and
type(newshape[0]) is not Poly):
newshape = newshape[0]
return _reshape(a, newshape, order=order)
@_wraps(np.ravel)
def ravel(a, order="C"):
if order == "K":
raise NotImplementedError("Ravel not implemented for order='K'.")
return reshape(a, (size(a),), order)
@_wraps(np.ravel_multi_index)
def ravel_multi_index(multi_index, dims, mode='raise', order='C'):
assert len(multi_index) == len(dims), f"len(multi_index)={len(multi_index)} != len(dims)={len(dims)}"
dims = tuple(core.concrete_or_error(int, d, "in `dims` argument of ravel_multi_index().") for d in dims)
_check_arraylike("ravel_multi_index", *multi_index)
for index in multi_index:
if mode == 'raise':
core.concrete_or_error(array, index,
"The error occurred because ravel_multi_index was jit-compiled"
" with mode='raise'. Use mode='wrap' or mode='clip' instead.")
if not issubdtype(_dtype(index), integer):
raise TypeError("only int indices permitted")
if mode == "raise":
if _any(any((i < 0) | (i >= d)) for i, d in zip(multi_index, dims)):
raise ValueError("invalid entry in coordinates array")
elif mode == "clip":
multi_index = [clip(i, 0, d - 1) for i, d in zip(multi_index, dims)]
elif mode == "wrap":
multi_index = [i % d for i, d in zip(multi_index, dims)]
else:
raise ValueError(f"invalid mode={mode!r}. Expected 'raise', 'wrap', or 'clip'")
if order == "F":
strides = np.cumprod((1,) + dims[:-1])
elif order == "C":
strides = np.cumprod((1,) + dims[1:][::-1])[::-1]
else:
raise ValueError(f"invalid order={order!r}. Expected 'C' or 'F'")
result = 0
for i, s in zip(multi_index, strides):
result = result + i * s
return result
_UNRAVEL_INDEX_DOC = """\
Unlike numpy's implementation of unravel_index, negative indices are accepted
and out-of-bounds indices are clipped.
"""
@_wraps(np.unravel_index, lax_description=_UNRAVEL_INDEX_DOC)
def unravel_index(indices, shape):
indices = asarray(indices)
sizes = pad(shape, (0, 1), constant_values=1)
cumulative_sizes = cumprod(sizes[::-1])[::-1]
total_size = cumulative_sizes[0]
# Clip so raveling and unraveling an oob index will not change the behavior
clipped_indices = clip(indices, -total_size, total_size - 1)
# Add enough trailing dims to avoid conflict with flat_index
cumulative_sizes = cumulative_sizes.reshape([-1] + [1] * indices.ndim)
idx = clipped_indices % cumulative_sizes[:-1] // cumulative_sizes[1:]
return tuple(idx)
@_wraps(np.squeeze)
def squeeze(a, axis: Union[int, Tuple[int, ...]] = None):
_check_arraylike("squeeze", a)
if axis is None:
a_shape = shape(a)
axis = tuple(i for i, d in enumerate(a_shape) if d == 1)
elif not isinstance(axis, tuple):
axis = (axis,)
return lax.squeeze(a, axis)
@_wraps(np.expand_dims)
def expand_dims(a, axis: Union[int, Tuple[int, ...]]):
_check_arraylike("expand_dims", a)
if not isinstance(axis, tuple):
axis = (axis,)
return lax.expand_dims(a, axis)
@_wraps(np.swapaxes)
def swapaxes(a, axis1, axis2):
_check_arraylike("swapaxes", a)
perm = np.arange(ndim(a))
perm[axis1], perm[axis2] = perm[axis2], perm[axis1]
return lax.transpose(a, perm)
@_wraps(np.moveaxis)
def moveaxis(a, source, destination):
_check_arraylike("moveaxis", a)
try:
source = (operator.index(source),)
except TypeError:
pass
try:
destination = (operator.index(destination),)
except TypeError:
pass
source = tuple(_canonicalize_axis(i, ndim(a)) for i in source)
destination = tuple(_canonicalize_axis(i, ndim(a)) for i in destination)
if len(source) != len(destination):
raise ValueError("Inconsistent number of elements: {} vs {}"
.format(len(source), len(destination)))
perm = [i for i in range(ndim(a)) if i not in source]
for dest, src in sorted(zip(destination, source)):
perm.insert(dest, src)
return lax.transpose(a, perm)
@_wraps(np.isclose)
def isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False):
a, b = _promote_args("isclose", asarray(a), asarray(b))
dtype = _dtype(a)
if issubdtype(dtype, inexact):
if issubdtype(dtype, complexfloating):
dtype = _complex_elem_type(dtype)
rtol = lax.convert_element_type(rtol, dtype)
atol = lax.convert_element_type(atol, dtype)
out = lax.le(
lax.abs(lax.sub(a, b)),
lax.add(atol, lax.mul(rtol, lax.abs(b))))
# This corrects the comparisons for infinite and nan values
a_inf = isinf(a)
b_inf = isinf(b)
any_inf = logical_or(a_inf, b_inf)
both_inf = logical_and(a_inf, b_inf)
# Make all elements where either a or b are infinite to False
out = logical_and(out, logical_not(any_inf))
# Make all elements where both a or b are the same inf to True
same_value = lax.eq(a, b)
same_inf = logical_and(both_inf, same_value)
out = logical_or(out, same_inf)
# Make all elements where either a or b is NaN to False
a_nan = isnan(a)
b_nan = isnan(b)
any_nan = logical_or(a_nan, b_nan)
out = logical_and(out, logical_not(any_nan))
if equal_nan:
# Make all elements where both a and b is NaN to True
both_nan = logical_and(a_nan, b_nan)
out = logical_or(out, both_nan)
return _maybe_numpy_1_13_isclose_behavior(a, out)
else:
return lax.eq(a, b)
numpy_version = tuple(map(int, np.version.version.split('.')[:2]))
if numpy_version < (1, 14):
# see discussion at https://github.com/numpy/numpy/pull/9720
def _maybe_numpy_1_13_isclose_behavior(a, out):
if size(out) == 1 and issubdtype(_dtype(a), complexfloating):
return lax.reshape(out, (1,))
else:
return out
else:
def _maybe_numpy_1_13_isclose_behavior(a, out):
return out
@_wraps(np.interp)
def interp(x, xp, fp, left=None, right=None, period=None):
if shape(xp) != shape(fp) or ndim(xp) != 1:
raise ValueError("xp and fp must be one-dimensional arrays of equal size")
x, xp, fp = map(asarray, _promote_dtypes_inexact(x, xp, fp))
if period is not None:
if period == 0:
raise ValueError(f"period must be a non-zero value; got {period}")
period = abs(period)
x = x % period
xp = xp % period
xp, fp = lax.sort_key_val(xp, fp)
xp = concatenate([xp[-1:] - period, xp, xp[:1] + period])
fp = concatenate([fp[-1:], fp, fp[:1]])
i = clip(searchsorted(xp, x, side='right'), 1, len(xp) - 1)
df = fp[i] - fp[i - 1]
dx = xp[i] - xp[i - 1]
delta = x - xp[i - 1]
f = where((dx == 0), fp[i], fp[i - 1] + (delta / dx) * df)
if period is None:
f = where(x < xp[0], fp[0] if left is None else left, f)
f = where(x > xp[-1], fp[-1] if right is None else right, f)
return f
@_wraps(np.in1d, lax_description="""
In the JAX version, the `assume_unique` argument is not referenced.
""")
def in1d(ar1, ar2, assume_unique=False, invert=False):
ar1 = ravel(ar1)
ar2 = ravel(ar2)
# Note: an algorithm based on searchsorted has better scaling, but in practice
# is very slow on accelerators because it relies on lax control flow. If XLA
# ever supports binary search natively, we should switch to this:
# ar2 = jnp.sort(ar2)
# ind = jnp.searchsorted(ar2, ar1)
# if invert:
# return ar1 != ar2[ind]
# else:
# return ar1 == ar2[ind]
if invert:
return (ar1[:, None] != ar2).all(-1)
else:
return (ar1[:, None] == ar2).any(-1)
@_wraps(np.setdiff1d, lax_description="""
In the JAX version, the `assume_unique` argument is not referenced.
""")
def setdiff1d(ar1, ar2, assume_unique=False):
ar1 = core.concrete_or_error(asarray, ar1, "The error arose in setdiff1d()")
ar2 = core.concrete_or_error(asarray, ar2, "The error arose in setdiff1d()")
ar1 = unique(ar1)
ar2 = unique(ar2)
idx = in1d(ar1, ar2, invert=True)
return ar1[idx]
@partial(jit, static_argnums=2)
def _intersect1d_sorted_mask(ar1, ar2, return_indices=False):
"""
Helper function for intersect1d which is jit-able
"""
ar = concatenate((ar1, ar2))
if return_indices:
iota = lax.broadcasted_iota(np.int64, shape(ar), dimension=0)
aux, indices = lax.sort_key_val(ar, iota)
else:
aux = sort(ar)
mask = aux[1:] == aux[:-1]
if return_indices:
return aux, mask, indices
else:
return aux, mask
@_wraps(np.intersect1d)
def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
ar1 = core.concrete_or_error(asarray, ar1, "The error arose in intersect1d()")
ar2 = core.concrete_or_error(asarray, ar2, "The error arose in intersect1d()")
if not assume_unique:
if return_indices:
ar1, ind1 = unique(ar1, return_index=True)
ar2, ind2 = unique(ar2, return_index=True)
else:
ar1 = unique(ar1)
ar2 = unique(ar2)
else:
ar1 = ravel(ar1)
ar2 = ravel(ar2)
if return_indices:
aux, mask, aux_sort_indices = _intersect1d_sorted_mask(ar1, ar2, return_indices)
else:
aux, mask = _intersect1d_sorted_mask(ar1, ar2, return_indices)
int1d = aux[:-1][mask]
if return_indices:
ar1_indices = aux_sort_indices[:-1][mask]
ar2_indices = aux_sort_indices[1:][mask] - ar1.size
if not assume_unique:
ar1_indices = ind1[ar1_indices]
ar2_indices = ind2[ar2_indices]
return int1d, ar1_indices, ar2_indices
else:
return int1d
@_wraps(np.isin, lax_description="""
In the JAX version, the `assume_unique` argument is not referenced.
""")
def isin(element, test_elements, assume_unique=False, invert=False):
result = in1d(element, test_elements, assume_unique=assume_unique, invert=invert)
return result.reshape(shape(element))
# The `jit` on `where` exists to avoid materializing constants in cases like
# `np.where(np.zeros(1000), 7, 4)`. In op-by-op mode, we don't want to
# materialize the broadcast forms of scalar arguments.
@jit
def _where(condition, x=None, y=None):
if x is None or y is None:
raise ValueError("Either both or neither of the x and y arguments should "
"be provided to jax.numpy.where, got {} and {}."
.format(x, y))
if not issubdtype(_dtype(condition), bool_):
condition = lax.ne(condition, zeros_like(condition))
x, y = _promote_dtypes(x, y)
condition, x, y = broadcast_arrays(condition, x, y)
return lax.select(condition, x, y) if np.size(x) else x
_WHERE_DOC = """\
At present, JAX does not support JIT-compilation of the single-argument form
of :py:func:`jax.numpy.where` because its output shape is data-dependent. The
three-argument form does not have a data-dependent shape and can be JIT-compiled
successfully.
"""
@_wraps(np.where, update_doc=False, lax_description=_WHERE_DOC)
def where(condition, x=None, y=None):
if x is None and y is None:
return nonzero(asarray(condition))
else:
return _where(condition, x, y)
@_wraps(np.select)
def select(condlist, choicelist, default=0):
if len(condlist) != len(choicelist):
msg = "condlist must have length equal to choicelist ({} vs {})"
raise ValueError(msg.format(len(condlist), len(choicelist)))
if len(condlist) == 0:
raise ValueError("condlist must be non-empty")
choices = _promote_dtypes(default, *choicelist)
choicelist = choices[1:]
output = choices[0]
for cond, choice in zip(condlist[::-1], choicelist[::-1]):
output = where(cond, choice, output)
return output
@_wraps(np.bincount, lax_description="""\
Jax adds the optional `length` parameter which specifies the output length, and
defaults to ``x.max() + 1``. It must be specified for bincount to be compilable.
Values larger than the specified length will be discarded.
Additionally, while ``np.bincount`` raises an error if the input array contains
negative values, ``jax.numpy.bincount`` treats negative values as zero.
""")
def bincount(x, weights=None, minlength=0, *, length=None):
_check_arraylike("bincount", x)
if not issubdtype(_dtype(x), integer):
msg = f"x argument to bincount must have an integer type; got {x.dtype}"
raise TypeError(msg)
if length is None:
x = core.concrete_or_error(asarray, x,
"The error occured because of argument 'x' of jnp.bincount. "
"To avoid this error, pass a static `length` argument.")
length = max(x) + 1
length = _max(length, minlength)
if ndim(x) != 1:
raise ValueError("only 1-dimensional input supported.")
if weights is None:
weights = array(1, dtype=int32)
else:
if shape(x) != shape(weights):
raise ValueError("shape of weights must match shape of x.")
return ops.index_add(zeros((length,), _dtype(weights)), ops.index[clip(x, 0)], weights)
def broadcast_arrays(*args):
"""Like Numpy's broadcast_arrays but doesn't return views."""
shapes = [shape(arg) for arg in args]
if len(set(shapes)) == 1:
return [arg if isinstance(arg, ndarray) or isscalar(arg) else array(arg)
for arg in args]
result_shape = lax.broadcast_shapes(*shapes)
return [broadcast_to(arg, result_shape) for arg in args]
@_wraps(np.broadcast_to, lax_description="""\
The JAX version does not necessarily return a view of the input.
""")
def broadcast_to(arr, shape):
arr = arr if isinstance(arr, ndarray) else array(arr)
shape = canonicalize_shape(shape) # check that shape is concrete
arr_shape = _shape(arr)
if arr_shape == shape:
return arr
else:
nlead = len(shape) - len(arr_shape)
compatible = np.equal(arr_shape, shape[nlead:]) | np.equal(arr_shape, 1)
if nlead < 0 or not np.all(compatible):
msg = "Incompatible shapes for broadcasting: {} and requested shape {}"
raise ValueError(msg.format(arr_shape, shape))
diff, = np.where(np.not_equal(shape[nlead:], arr_shape))
new_dims = tuple(range(nlead)) + tuple(nlead + diff)
kept_dims = tuple(np.delete(np.arange(len(shape)), new_dims))
return lax.broadcast_in_dim(squeeze(arr, tuple(diff)), shape, kept_dims)
def _split(op, ary, indices_or_sections, axis=0):
axis = core.concrete_or_error(int, axis, f"in jax.numpy.{op} argument `axis`")
size = ary.shape[axis]
if isinstance(indices_or_sections, (tuple, list) + _arraylike_types):
indices_or_sections = np.array(
[core.concrete_or_error(np.int64, i_s, f"in jax.numpy.{op} argument 1")
for i_s in indices_or_sections], np.int64)
split_indices = np.concatenate([[np.int64(0)], indices_or_sections,
[np.int64(size)]])
else:
indices_or_sections = core.concrete_or_error(np.int64, indices_or_sections,
f"in jax.numpy.{op} argument 1")
part_size, r = _divmod(size, indices_or_sections)
if r == 0:
split_indices = np.arange(indices_or_sections + 1,
dtype=np.int64) * part_size
elif op == "array_split":
split_indices = np.concatenate(
[np.arange(r + 1, dtype=np.int64) * (part_size + 1),
np.arange(indices_or_sections - r, dtype=np.int64) * part_size
+ ((r + 1) * (part_size + 1) - 1)])
else:
raise ValueError("array split does not result in an equal division")
starts, ends = [0] * ndim(ary), shape(ary)
_subval = lambda x, i, v: subvals(x, [(i, v)])
return [lax.slice(ary, _subval(starts, axis, start), _subval(ends, axis, end))
for start, end in zip(split_indices[:-1], split_indices[1:])]
@_wraps(np.split)
def split(ary, indices_or_sections, axis=0):
return _split("split", ary, indices_or_sections, axis=axis)
def _split_on_axis(np_fun, axis):
@_wraps(np_fun, update_doc=False)
def f(ary, indices_or_sections):
return split(ary, indices_or_sections, axis=axis)
return f
vsplit = _split_on_axis(np.vsplit, axis=0)
hsplit = _split_on_axis(np.hsplit, axis=1)
dsplit = _split_on_axis(np.dsplit, axis=2)
@_wraps(np.array_split)
def array_split(ary, indices_or_sections, axis=0):
return _split("array_split", ary, indices_or_sections, axis=axis)
@_wraps(np.clip)
def clip(a, a_min=None, a_max=None, out=None):
_check_arraylike("clip", a)
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.clip is not supported.")
if a_min is None and a_max is None:
raise ValueError("At most one of a_min and a_max may be None")
if a_min is not None:
a = maximum(a_min, a)
if a_max is not None:
a = minimum(a_max, a)
return a
@_wraps(np.round, update_doc=False)
def round(a, decimals=0, out=None):
_check_arraylike("round", a)
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.round is not supported.")
dtype = _dtype(a)
if issubdtype(dtype, integer):
if decimals < 0:
raise NotImplementedError(
"integer np.round not implemented for decimals < 0")
return a # no-op on integer types
def _round_float(x):
if decimals == 0:
return lax.round(x, lax.RoundingMethod.TO_NEAREST_EVEN)
# TODO(phawkins): the strategy of rescaling the value isn't necessarily a
# good one since we may be left with an incorrectly rounded value at the
# end due to precision problems. As a workaround for float16, convert to
# float32,
x = lax.convert_element_type(x, np.float32) if dtype == np.float16 else x
factor = _constant_like(x, 10 ** decimals)
out = lax.div(lax.round(lax.mul(x, factor),
lax.RoundingMethod.TO_NEAREST_EVEN), factor)
return lax.convert_element_type(out, dtype) if dtype == np.float16 else out
if issubdtype(dtype, complexfloating):
return lax.complex(_round_float(lax.real(a)), _round_float(lax.imag(a)))
else:
return _round_float(a)
around = round
@_wraps(np.fix)
def fix(x, out=None):
_check_arraylike("fix", x)
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.fix is not supported.")
zero = lax._const(x, 0)
return where(lax.ge(x, zero), floor(x), ceil(x))
@_wraps(np.modf)
def modf(x, out=None):
_check_arraylike("modf", x)
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.modf is not supported.")
whole = fix(x)
return x - whole, whole
@_wraps(np.isfinite)
def isfinite(x):
_check_arraylike("isfinite", x)
dtype = _dtype(x)
if issubdtype(dtype, floating):
return lax.is_finite(x)
elif issubdtype(dtype, complexfloating):
return lax.bitwise_and(lax.is_finite(real(x)), lax.is_finite(imag(x)))
else:
return full_like(x, True, dtype=bool_)
@_wraps(np.isinf)
def isinf(x):
_check_arraylike("isinf", x)
dtype = _dtype(x)
if issubdtype(dtype, floating):
return lax.eq(lax.abs(x), _constant_like(x, inf))
elif issubdtype(dtype, complexfloating):
re = lax.real(x)
im = lax.imag(x)
return lax.bitwise_or(lax.eq(lax.abs(re), _constant_like(re, inf)),
lax.eq(lax.abs(im), _constant_like(im, inf)))
else:
return full_like(x, False, dtype=bool_)
def _isposneginf(infinity, x, out):
if out is not None:
raise NotImplementedError("The 'out' argument to isneginf/isposinf is not supported.")
dtype = _dtype(x)
if issubdtype(dtype, floating):
return lax.eq(x, _constant_like(x, infinity))
elif issubdtype(dtype, complexfloating):
raise ValueError("isposinf/isneginf are not well defined for complex types")
else:
return full_like(x, False, dtype=bool_)
isposinf = _wraps(np.isposinf)(lambda x, out=None: _isposneginf(inf, x, out))
isneginf = _wraps(np.isneginf)(lambda x, out=None: _isposneginf(-inf, x, out))
@_wraps(np.isnan)
def isnan(x):
_check_arraylike("isnan", x)
return lax.ne(x, x)
@_wraps(np.nan_to_num)
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
del copy
_check_arraylike("nan_to_num", x)
dtype = _dtype(x)
if issubdtype(dtype, complexfloating):
return lax.complex(
nan_to_num(lax.real(x), nan=nan, posinf=posinf, neginf=neginf),
nan_to_num(lax.imag(x), nan=nan, posinf=posinf, neginf=neginf))
info = finfo(dtypes.canonicalize_dtype(dtype))
posinf = info.max if posinf is None else posinf
neginf = info.min if neginf is None else neginf
x = where(isnan(x), _constant_like(x, nan), x)
x = where(isposinf(x), _constant_like(x, posinf), x)
x = where(isneginf(x), _constant_like(x, neginf), x)
return x
### Reducers
def _reduction(a, name, np_fun, op, init_val, has_identity=True,
preproc=None, bool_op=None, upcast_f16_for_computation=False,
axis=None, dtype=None, out=None, keepdims=False, initial=None, where_=None):
bool_op = bool_op or op
if out is not None:
raise NotImplementedError(f"The 'out' argument to jnp.{name} is not supported.")
_check_arraylike(name, a)
lax._check_user_dtype_supported(dtype, name)
axis = core.concrete_or_error(None, axis, f"axis argument to jnp.{name}().")
if initial is None and not has_identity:
if not size(a):
raise ValueError(f"zero-size array to reduction operation {name} which has no identity")
if where_ is not None:
raise ValueError(f"reduction operation {name} does not have an identity, so to use a "
f"where mask one has to specify 'initial'")
a = a if isinstance(a, ndarray) else asarray(a)
a = preproc(a) if preproc else a
dims = _reduction_dims(a, axis)
result_dtype = dtypes.canonicalize_dtype(dtype or _dtype(np_fun(np.ones((), dtype=_dtype(a)))))
if upcast_f16_for_computation and issubdtype(result_dtype, inexact):
computation_dtype = promote_types(result_dtype, float32)
else:
computation_dtype = result_dtype
a = lax.convert_element_type(a, computation_dtype)
op = op if computation_dtype != np.bool_ else bool_op
# NB: in XLA, init_val must be an identity for the op, so the user-specified
# initial value must be applied afterward.
init_val = _reduction_init_val(a, init_val)
if where_ is not None:
a = where(where_, a, init_val)
result = lax.reduce(a, init_val, op, dims)
if initial is not None:
result = op(_reduction_init_val(a, initial), result)
if keepdims:
result = expand_dims(result, dims)
return lax.convert_element_type(result, dtype or result_dtype)
def _reduction_dims(a, axis):
if axis is None:
return tuple(range(ndim(a)))
elif isinstance(axis, (np.ndarray, tuple, list)):
if len(axis) != len(set(axis)):
raise ValueError(f"duplicate value in 'axis': {axis}")
return tuple(_canonicalize_axis(x, ndim(a)) for x in axis)
elif isinstance(axis, int):
return (_canonicalize_axis(axis, ndim(a)),)
else:
raise TypeError("Unexpected type of axis argument: {}".format(type(axis)))
def _reduction_init_val(a, init_val):
a_dtype = dtypes.canonicalize_dtype(_dtype(a))
if a_dtype == 'bool':
return np.array(init_val > 0, dtype=a_dtype)
try:
return np.array(init_val, dtype=a_dtype)
except OverflowError:
assert issubdtype(a_dtype, integer)
sign, info = np.sign(init_val), iinfo(a_dtype)
return np.array(info.min if sign < 0 else info.max, dtype=a_dtype)
_cast_to_bool = partial(lax.convert_element_type, new_dtype=bool_)
@_wraps(np.sum)
def sum(a, axis=None, dtype=None, out=None, keepdims=None, initial=None, where=None):
return _reduction(a, "sum", np.sum, lax.add, 0,
bool_op=lax.bitwise_or, upcast_f16_for_computation=True,
axis=axis, dtype=dtype, out=out, keepdims=keepdims, initial=initial, where_=where)
@_wraps(np.prod)
def prod(a, axis=None, dtype=None, out=None, keepdims=None, initial=None, where=None):
return _reduction(a, "prod", np.prod, lax.mul, 1,
bool_op=lax.bitwise_and, upcast_f16_for_computation=True,
axis=axis, dtype=dtype, out=out, keepdims=keepdims, initial=initial, where_=where)
@_wraps(np.max)
def max(a, axis=None, out=None, keepdims=None, initial=None, where=None):
return _reduction(a, "max", np.max, lax.max, -np.inf, has_identity=False,
axis=axis, out=out, keepdims=keepdims, initial=initial, where_=where)
@_wraps(np.min)
def min(a, axis=None, out=None, keepdims=None, initial=None, where=None):
return _reduction(a, "min", np.min, lax.min, np.inf, has_identity=False,
axis=axis, out=out, keepdims=keepdims, initial=initial, where_=where)
@_wraps(np.all)
def all(a, axis=None, out=None, keepdims=None):
return _reduction(a, "all", np.all, lax.bitwise_and, True, preproc=_cast_to_bool,
axis=axis, out=out, keepdims=keepdims)
@_wraps(np.any)
def any(a, axis=None, out=None, keepdims=None):
return _reduction(a, "any", np.any, lax.bitwise_or, False, preproc=_cast_to_bool,
axis=axis, out=out, keepdims=keepdims)
product = prod
amin = min
amax = max
alltrue = all
sometrue = any
@_wraps(np.mean)
def mean(a, axis=None, dtype=None, out=None, keepdims=False):
_check_arraylike("mean", a)
lax._check_user_dtype_supported(dtype, "mean")
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.mean is not supported.")
if axis is None:
normalizer = size(a)
else:
normalizer = np.prod(np.take(shape(a), axis))
if dtype is None:
if issubdtype(_dtype(a), bool_) or issubdtype(_dtype(a), integer):
dtype = float_
else:
dtype = _dtype(a)
dtype = dtypes.canonicalize_dtype(dtype)
return lax.div(
sum(a, axis, dtype=dtype, keepdims=keepdims),
lax.convert_element_type(normalizer, dtype))
@_wraps(np.average)
def average(a, axis=None, weights=None, returned=False):
a = asarray(a)
if weights is None: # Treat all weights as 1
avg = mean(a, axis=axis)
if axis is None:
weights_sum = full((), size(a), dtype=avg.dtype)
else:
weights_sum = full_like(avg, a.shape[axis], dtype=avg.dtype)
else:
weights = asarray(weights)
if issubdtype(a.dtype, inexact):
out_dtype = result_type(a.dtype, weights.dtype)
else:
out_dtype = result_type(a.dtype, weights.dtype, float_)
out_dtype = dtypes.canonicalize_dtype(out_dtype)
a_shape = shape(a)
a_ndim = len(a_shape)
weights_shape = shape(weights)
axis = None if axis is None else _canonicalize_axis(axis, a_ndim)
if a_shape != weights_shape:
# Make sure the dimensions work out
if axis is None:
raise ValueError("Axis must be specified when shapes of a and "
"weights differ.")
if len(weights_shape) != 1:
raise ValueError("1D weights expected when shapes of a and "
"weights differ.")
if weights_shape[0] != a_shape[axis]:
raise ValueError("Length of weights not "
"compatible with specified axis.")
weights = broadcast_to(weights, (a_ndim - 1) * (1,) + weights_shape)
weights = moveaxis(weights, -1, axis)
weights_sum = sum(weights, axis=axis, dtype=out_dtype)
avg = sum(multiply(a, weights), axis=axis, dtype=out_dtype) / weights_sum
if returned:
if avg.shape != weights_sum.shape:
weights_sum = broadcast_to(weights_sum, avg.shape)
return avg, weights_sum
return avg
@_wraps(np.var)
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
_check_arraylike("var", a)
lax._check_user_dtype_supported(dtype, "var")
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.var is not supported.")
a_dtype, dtype = _var_promote_types(_dtype(a), dtype)
a_mean = mean(a, axis, dtype=a_dtype, keepdims=True)
centered = a - a_mean
if issubdtype(centered.dtype, complexfloating):
centered = lax.real(lax.mul(centered, lax.conj(centered)))
else:
centered = lax.square(centered)
if axis is None:
normalizer = size(a)
else:
normalizer = np.prod(np.take(shape(a), axis))
normalizer = normalizer - ddof
result = sum(centered, axis, keepdims=keepdims)
out = lax.div(result, lax.convert_element_type(normalizer, result.dtype))
return lax.convert_element_type(out, dtype)
def _var_promote_types(a_dtype, dtype):
if dtype:
if (not issubdtype(dtype, complexfloating) and
issubdtype(a_dtype, complexfloating)):
msg = ("jax.numpy.var does not yet support real dtype parameters when "
"computing the variance of an array of complex values. The "
"semantics of numpy.var seem unclear in this case. Please comment "
"on https://github.com/google/jax/issues/2283 if this behavior is "
"important to you.")
raise ValueError(msg)
a_dtype = promote_types(a_dtype, dtype)
else:
if not issubdtype(a_dtype, inexact):
dtype = a_dtype = dtypes.canonicalize_dtype(float_)
else:
dtype = _complex_elem_type(a_dtype)
a_dtype = promote_types(a_dtype, float32)
return a_dtype, dtype
@_wraps(np.std)
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
_check_arraylike("std", a)
lax._check_user_dtype_supported(dtype, "std")
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.std is not supported.")
return sqrt(var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims))
@_wraps(np.ptp)
def ptp(a, axis=None, out=None, keepdims=False):
_check_arraylike("ptp", a)
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.ptp is not supported.")
x = amax(a, axis=axis, keepdims=keepdims)
y = amin(a, axis=axis, keepdims=keepdims)
return lax.sub(x, y)
@_wraps(np.allclose)
def allclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False):
return all(isclose(a, b, rtol, atol, equal_nan))
@_wraps(np.count_nonzero)
def count_nonzero(a, axis=None, keepdims=False):
_check_arraylike("count_nonzero", a)
return sum(lax.ne(a, _constant_like(a, 0)), axis=axis,
dtype=dtypes.canonicalize_dtype(np.int_), keepdims=keepdims)
_NONZERO_DOC = """\
At present, JAX does not support JIT-compilation of :py:func:`jax.numpy.nonzero`
because its output shape is data-dependent.
"""
@_wraps(np.nonzero, lax_description=_NONZERO_DOC)
def nonzero(a):
# Note: this function cannot be jitted because its output has a dynamic
# shape.
a = core.concrete_or_error(atleast_1d, a, "The error arose in jnp.nonzero")
dims = shape(a)
ndims = len(dims)
ds = [lax.broadcasted_iota(int_, dims + (1,), i) for i in range(ndims)]
d = concatenate(ds, axis=-1)
indexes = d[a != 0]
return tuple(indexes[..., i] for i in range(ndims))
@_wraps(np.flatnonzero)
def flatnonzero(a):
return nonzero(ravel(a))[0]
def _nan_reduction(a, name, jnp_reduction, init_val, nan_if_all_nan,
axis=None, keepdims=None, **kwargs):
_check_arraylike(name, a)
out = jnp_reduction(where(isnan(a), _reduction_init_val(a, init_val), a),
axis=axis, keepdims=keepdims, **kwargs)
if nan_if_all_nan:
return where(all(isnan(a), axis=axis, keepdims=keepdims),
_constant_like(a, nan), out)
else:
return out
@_wraps(np.nanmin)
def nanmin(a, axis=None, out=None, keepdims=None):
return _nan_reduction(a, 'nanmin', min, inf, nan_if_all_nan=True,
axis=axis, out=out, keepdims=keepdims)
@_wraps(np.nanmax)
def nanmax(a, axis=None, out=None, keepdims=None):
return _nan_reduction(a, 'nanmax', max, -inf, nan_if_all_nan=True,
axis=axis, out=out, keepdims=keepdims)
@_wraps(np.nansum)
def nansum(a, axis=None, dtype=None, out=None, keepdims=None):
return _nan_reduction(a, 'nansum', sum, 0, nan_if_all_nan=False,
axis=axis, dtype=dtype, out=out, keepdims=keepdims)
@_wraps(np.nanprod)
def nanprod(a, axis=None, dtype=None, out=None, keepdims=None):
return _nan_reduction(a, 'nanprod', prod, 1, nan_if_all_nan=False,
axis=axis, dtype=dtype, out=out, keepdims=keepdims)
@_wraps(np.nanmean)
def nanmean(a, axis=None, dtype=None, out=None, keepdims=False):
_check_arraylike("nanmean", a)
lax._check_user_dtype_supported(dtype, "nanmean")
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.nanmean is not supported.")
if issubdtype(_dtype(a), bool_) or issubdtype(_dtype(a), integer):
return mean(a, axis, dtype, out, keepdims)
if dtype is None:
dtype = _dtype(a)
nan_mask = logical_not(isnan(a))
normalizer = sum(nan_mask, axis=axis, dtype=int32, keepdims=keepdims)
normalizer = lax.convert_element_type(normalizer, dtype)
td = lax.div(nansum(a, axis, dtype=dtype, keepdims=keepdims), normalizer)
return td
@_wraps(np.nanvar)
def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
_check_arraylike("nanvar", a)
lax._check_user_dtype_supported(dtype, "nanvar")
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.nanvar is not supported.")
a_dtype, dtype = _var_promote_types(_dtype(a), dtype)
a_mean = nanmean(a, axis, dtype=a_dtype, keepdims=True)
centered = a - a_mean
if issubdtype(centered.dtype, complexfloating):
centered = lax.real(lax.mul(centered, lax.conj(centered)))
else:
centered = lax.square(centered)
normalizer = sum(logical_not(isnan(a)), axis=axis, keepdims=keepdims)
normalizer = normalizer - ddof
if config.omnistaging_enabled:
normalizer_mask = lax.le(normalizer, 0)
else:
zero = lax.full_like(normalizer, 0, shape=())
normalizer_mask = lax.le(normalizer, zero)
result = nansum(centered, axis, keepdims=keepdims)
result = where(normalizer_mask, nan, result)
divisor = where(normalizer_mask, 1, normalizer)
out = lax.div(result, lax.convert_element_type(divisor, result.dtype))
return lax.convert_element_type(out, dtype)
@_wraps(np.nanstd)
def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
_check_arraylike("nanstd", a)
lax._check_user_dtype_supported(dtype, "nanstd")
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.nanstd is not supported.")
return sqrt(nanvar(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims))
def _make_cumulative_reduction(np_reduction, reduction, fill_nan=False, fill_value=0):
# We want to allow XLA to fuse the pad and reduce-window operators to
# avoid materializing the padded output.
# Consider removing `jit` once again if reduce-window is generalized to
# support arbitrary padding.
@partial(jit, static_argnums=(1, 2))
def _cumulative_reduction(a, axis, dtype):
if axis is None or isscalar(a):
a = ravel(a)
axis = 0
a_shape = list(shape(a))
num_dims = len(a_shape)
axis = _canonicalize_axis(axis, num_dims)
if fill_nan:
a = where(isnan(a), _constant_like(a, fill_value), a)
if not dtype and _dtype(a) == bool_:
dtype = int_
if dtype:
a = lax.convert_element_type(a, dtype)
return reduction(a, axis)
@_wraps(np_reduction)
def cumulative_reduction(a, axis=None, dtype=None, out=None):
_check_arraylike(np_reduction.__name__, a)
if out is not None:
raise NotImplementedError(f"The 'out' argument to jnp.{np_reduction.__name__} "
f"is not supported.")
lax._check_user_dtype_supported(dtype, np_reduction.__name__)
# jit doesn't support kwargs as static_args.
return _cumulative_reduction(a, axis, dtype)
return cumulative_reduction
cumsum = _make_cumulative_reduction(np.cumsum, lax.cumsum, fill_nan=False)
cumprod = _make_cumulative_reduction(np.cumprod, lax.cumprod, fill_nan=False)
cumproduct = cumprod
nancumsum = _make_cumulative_reduction(np.nancumsum, lax.cumsum,
fill_nan=True, fill_value=0)
nancumprod = _make_cumulative_reduction(np.nancumprod, lax.cumprod,
fill_nan=True, fill_value=1)
@_wraps(np.unwrap)
def unwrap(p, discont=pi, axis=-1):
_check_arraylike("unwrap", p)
dd = diff(p, axis=axis)
ddmod = mod(dd + pi, 2 * pi) - pi
ddmod = where((ddmod == -pi) & (dd > 0), pi, ddmod)
ph_correct = where(abs(dd) < discont, 0, ddmod - dd)
up = concatenate((
lax.slice_in_dim(p, 0, 1, axis=axis),
lax.slice_in_dim(p, 1, None, axis=axis) + cumsum(ph_correct, axis=axis)
), axis=axis)
return up
### Array-creation functions
def _check_no_padding(axis_padding, mode):
if (axis_padding[0] > 0 or axis_padding[1] > 0):
msg = "Cannot apply '{}' padding to empty axis"
raise ValueError(msg.format(mode))
def _pad_constant(array, pad_width, constant_values):
nd = ndim(array)
constant_values = broadcast_to(asarray(constant_values), (nd, 2))
constant_values = lax.convert_element_type(constant_values, array.dtype)
for i in range(nd):
widths = [(0, 0, 0)] * nd
widths[i] = (pad_width[i, 0], 0, 0)
array = lax.pad(array, constant_values[i, 0], widths)
widths[i] = (0, pad_width[i, 1], 0)
array = lax.pad(array, constant_values[i, 1], widths)
return array
def _pad_wrap(array, pad_width):
for i in range(ndim(array)):
if array.shape[i] == 0:
_check_no_padding(pad_width[i], "wrap")
continue
size = array.shape[i]
repeats, (left_remainder, right_remainder) = _divmod(pad_width[i], size)
total_repeats = repeats.sum() + 1
parts = []
if left_remainder:
parts += [lax.slice_in_dim(array, size - left_remainder, size, axis=i)]
parts += total_repeats * [array]
if right_remainder:
parts += [lax.slice_in_dim(array, 0, right_remainder, axis=i)]
array = lax.concatenate(parts, dimension=i)
return array
def _pad_symmetric_or_reflect(array, pad_width, mode):
assert mode in ("symmetric", "reflect")
for i in range(ndim(array)):
if array.shape[i] == 0:
_check_no_padding(pad_width[i], mode)
continue
n = array.shape[i]
rarray = lax.rev(array, dimensions=(i,))
offset = 1 if (mode == "reflect" and n > 1) else 0
def build_padding(padding, forward):
xs = []
delta = n - offset
while padding > delta:
padding -= delta
p = array if forward else rarray
xs.append(lax.slice_in_dim(p, offset, n, axis=i))
forward = not forward
if padding > 0:
x = lax.slice_in_dim(array if forward else rarray, offset,
padding + offset, axis=i)
xs.append(x)
return xs
parts = reversed(build_padding(pad_width[i, 0], forward=True))
parts = [lax.rev(x, dimensions=(i,)) for x in parts]
parts += [array]
parts += build_padding(pad_width[i, 1], forward=False)
array = lax.concatenate(parts, dimension=i)
return array
def _pad_edge(array, pad_width):
nd = ndim(array)
for i in range(nd):
if array.shape[i] == 0:
_check_no_padding(pad_width[i], "edge")
continue
n = array.shape[i]
npad_before, npad_after = pad_width[i]
edge_before = lax.slice_in_dim(array, 0, 1, axis=i)
pad_before = repeat(edge_before, npad_before, axis=i)
edge_after = lax.slice_in_dim(array, n-1, n, axis=i)
pad_after = repeat(edge_after, npad_after, axis=i)
array = lax.concatenate([pad_before, array, pad_after], dimension=i)
return array
def _pad_linear_ramp(array, pad_width, end_values):
for axis in range(ndim(array)):
edge_before = lax.slice_in_dim(array, 0, 1, axis=axis)
edge_after = lax.slice_in_dim(array, -1, None, axis=axis)
ramp_before = linspace(
start=end_values[axis][0],
stop=edge_before.squeeze(axis), # Dimension is replaced by linspace
num=pad_width[axis][0],
endpoint=False,
dtype=array.dtype,
axis=axis
)
ramp_after = linspace(
start=end_values[axis][1],
stop=edge_after.squeeze(axis), # Dimension is replaced by linspace
num=pad_width[axis][1],
endpoint=False,
dtype=array.dtype,
axis=axis
)
# Reverse linear space in appropriate dimension
ramp_after = flip(ramp_after, axis)
array = lax.concatenate([ramp_before, array, ramp_after], dimension=axis)
return array
def _pad_stats(array, pad_width, stat_length, stat_func):
nd = ndim(array)
for i in range(nd):
if stat_length is None:
stat_before = stat_func(array, axis=i, keepdims=True)
stat_after = stat_before
else:
array_length = array.shape[i]
length_before, length_after = stat_length[i]
if length_before == 0 or length_after == 0:
raise ValueError("stat_length of 0 yields no value for padding")
# Limit stat_length to length of array.
length_before = _min(length_before, array_length)
length_after = _min(length_after, array_length)
slice_before = lax.slice_in_dim(array, 0, length_before, axis=i)
slice_after = lax.slice_in_dim(array, -length_after, None, axis=i)
stat_before = stat_func(slice_before, axis=i, keepdims=True)
stat_after = stat_func(slice_after, axis=i, keepdims=True)
if np.issubdtype(array.dtype, np.integer):
stat_before = round(stat_before)
stat_after = round(stat_after)
stat_before = stat_before.astype(array.dtype)
stat_after = stat_after.astype(array.dtype)
npad_before, npad_after = pad_width[i]
pad_before = repeat(stat_before, npad_before, axis=i)
pad_after = repeat(stat_after, npad_after, axis=i)
array = lax.concatenate([pad_before, array, pad_after], dimension=i)
return array
def _broadcast_to_pairs(nvals, nd, name):
nvals_shape = np.shape(nvals)
if nvals_shape == (nd, 2):
# ((before_1, after_1), ..., (before_N, after_N))
pass
elif nvals_shape == (1, 2):
# ((before, after),)
nvals = nvals * nd
elif nvals_shape == (2,):
# (before, after) (not in the numpy docstring but works anyway)
before, after = nvals
nvals = (nvals,) * nd
elif nvals_shape == (1,):
# (pad,)
nvals, = nvals
nvals = ((nvals, nvals),) * nd
elif nvals_shape == ():
# pad
nvals = ((nvals, nvals),) * nd
else:
raise ValueError(f"{name} given unexpected structure: {nvals}. "
"See docstring for valid {name} formats.")
return nvals
@partial(jit, static_argnums=(1, 2, 4, 5))
def _pad(array, pad_width, mode, constant_values, stat_length, end_values):
array = asarray(array)
nd = ndim(array)
if nd == 0:
return array
stat_funcs = {"maximum": amax, "minimum": amin,
"mean": mean, "median": median}
pad_width = _broadcast_to_pairs(pad_width, nd, "pad_width")
pad_width = np.array(pad_width)
assert pad_width.shape == (nd, 2), pad_width
if np.any(pad_width < 0):
raise ValueError("index can't contain negative values")
if mode == "constant":
return _pad_constant(array, pad_width, constant_values)
elif mode == "wrap":
return _pad_wrap(array, pad_width)
elif mode in ("symmetric", "reflect"):
return _pad_symmetric_or_reflect(array, pad_width, mode)
elif mode == "edge":
return _pad_edge(array, pad_width)
elif mode == "linear_ramp":
end_values = _broadcast_to_pairs(end_values, nd, "end_values")
return _pad_linear_ramp(array, pad_width, end_values)
elif mode in stat_funcs:
if stat_length is not None:
stat_length = _broadcast_to_pairs(stat_length, nd, "stat_length")
return _pad_stats(array, pad_width, stat_length, stat_funcs[mode])
else:
msg = "Unimplemented padding mode '{}' for np.pad."
raise NotImplementedError(msg.format(mode))
@_wraps(np.pad)
def pad(array, pad_width, mode="constant", constant_values=0, stat_length=None,
end_values=0):
if isinstance(pad_width, Iterable):
pad_width = tuple(
tuple(int(i) for i in x) if isinstance(x, Iterable) else x
for x in pad_width)
return _pad(array, pad_width, mode, constant_values, stat_length, end_values)
@_wraps(np.stack)
def stack(arrays, axis=0, out=None):
if not len(arrays):
raise ValueError("Need at least one array to stack.")
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.stack is not supported.")
_check_arraylike("stack", *arrays)
shape0 = shape(arrays[0])
axis = _canonicalize_axis(axis, len(shape0) + 1)
new_arrays = []
for a in arrays:
if shape(a) != shape0:
raise ValueError("All input arrays must have the same shape.")
new_arrays.append(expand_dims(a, axis))
return concatenate(new_arrays, axis=axis)
@_wraps(np.tile)
def tile(A, reps):
_check_arraylike("tile", A)
if isinstance(reps, int):
reps = (reps,)
A_shape = (1,) * (len(reps) - ndim(A)) + shape(A)
reps = (1,) * (len(A_shape) - len(reps)) + tuple(reps)
result = broadcast_to(reshape(A, [j for i in A_shape for j in [1, i]]),
[k for pair in zip(reps, A_shape) for k in pair])
return reshape(result, tuple(np.multiply(A_shape, reps)))
@_wraps(np.concatenate)
def concatenate(arrays, axis=0):
_check_arraylike("concatenate", *arrays)
if not len(arrays):
raise ValueError("Need at least one array to concatenate.")
if ndim(arrays[0]) == 0:
raise ValueError("Zero-dimensional arrays cannot be concatenated.")
if axis is None:
return concatenate([ravel(a) for a in arrays], axis=0)
axis = _canonicalize_axis(axis, ndim(arrays[0]))
arrays = _promote_dtypes(*arrays)
# lax.concatenate can be slow to compile for wide concatenations, so form a
# tree of concatenations as a workaround especially for op-by-op mode.
# (https://github.com/google/jax/issues/653).
k = 16
if len(arrays) == 1:
return asarray(arrays[0])
else:
while len(arrays) > 1:
arrays = [lax.concatenate(arrays[i:i+k], axis)
for i in range(0, len(arrays), k)]
return arrays[0]
@_wraps(np.vstack)
def vstack(tup):
return concatenate([atleast_2d(m) for m in tup], axis=0)
row_stack = vstack
@_wraps(np.hstack)
def hstack(tup):
arrs = [atleast_1d(m) for m in tup]
if arrs[0].ndim == 1:
return concatenate(arrs, 0)
return concatenate(arrs, 1)
@_wraps(np.dstack)
def dstack(tup):
return concatenate([atleast_3d(m) for m in tup], axis=2)
@_wraps(np.column_stack)
def column_stack(tup):
arrays = []
for v in tup:
arr = asarray(v)
if arr.ndim < 2:
arr = atleast_2d(arr).T
arrays.append(arr)
return concatenate(arrays, 1)
@_wraps(np.choose)
def choose(a, choices, out=None, mode='raise'):
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.choose is not supported.")
_check_arraylike('choose', a, *choices)
if not issubdtype(_dtype(a), integer):
raise ValueError("`a` array must be integer typed")
N = len(choices)
if mode == 'raise':
a = core.concrete_or_error(asarray, a,
"The error occurred because jnp.choose was jit-compiled"
" with mode='raise'. Use mode='wrap' or mode='clip' instead.")
if any((a < 0) | (a >= N)):
raise ValueError("invalid entry in choice array")
elif mode == 'wrap':
a = a % N
elif mode == 'clip':
a = clip(a, 0, N - 1)
else:
raise ValueError(f"mode={mode!r} not understood. Must be 'raise', 'wrap', or 'clip'")
a, *choices = broadcast_arrays(a, *choices)
return array(choices)[(a,) + indices(a.shape, sparse=True)]
def _atleast_nd(x, n):
m = ndim(x)
return lax.broadcast(x, (1,) * (n - m)) if m < n else x
def _block(xs):
if isinstance(xs, tuple):
raise ValueError("jax.numpy.block does not allow tuples, got {}"
.format(xs))
elif isinstance(xs, list):
if len(xs) == 0:
raise ValueError("jax.numpy.block does not allow empty list arguments")
xs, depths = unzip2([_block(x) for x in xs])
if _any(d != depths[0] for d in depths[1:]):
raise ValueError("Mismatched list depths in jax.numpy.block")
rank = _max(depths[0], _max(ndim(x) for x in xs))
xs = [_atleast_nd(x, rank) for x in xs]
return concatenate(xs, axis=-depths[0]), depths[0] + 1
else:
return asarray(xs), 1
@_wraps(np.block)
@jit
def block(arrays):
out, _ = _block(arrays)
return out
@_wraps(np.atleast_1d, update_doc=False)
def atleast_1d(*arys):
if len(arys) == 1:
arr = asarray(arys[0])
return arr if ndim(arr) >= 1 else reshape(arr, -1)
else:
return [atleast_1d(arr) for arr in arys]
@_wraps(np.atleast_2d, update_doc=False)
def atleast_2d(*arys):
if len(arys) == 1:
arr = asarray(arys[0])
if ndim(arr) >= 2:
return arr
elif ndim(arr) == 1:
return expand_dims(arr, axis=0)
else:
return expand_dims(arr, axis=(0, 1))
else:
return [atleast_2d(arr) for arr in arys]
@_wraps(np.atleast_3d, update_doc=False)
def atleast_3d(*arys):
if len(arys) == 1:
arr = asarray(arys[0])
if ndim(arr) == 0:
arr = expand_dims(arr, axis=(0, 1, 2))
elif ndim(arr) == 1:
arr = expand_dims(arr, axis=(0, 2))
elif ndim(arr) == 2:
arr = expand_dims(arr, axis=2)
return arr
else:
return [atleast_3d(arr) for arr in arys]
@_wraps(np.array)
def array(object, dtype=None, copy=True, order="K", ndmin=0):
if order is not None and order != "K":
raise NotImplementedError("Only implemented for order='K'")
lax._check_user_dtype_supported(dtype, "array")
dtype = dtype and dtypes.canonicalize_dtype(dtype)
if _can_call_numpy_array(object):
object = _np_array(object, dtype=dtype, ndmin=ndmin, copy=False)
assert type(object) not in dtypes.python_scalar_dtypes
if type(object) is np.ndarray:
out = _device_put_raw(object)
if dtype: assert _dtype(out) == dtype
elif isinstance(object, (DeviceArray, core.Tracer)):
if isinstance(object, DeviceArray) and copy:
# We perform a copy by bouncing back to the host
# TODO(phawkins): add a device runtime function to copy a buffer
out = _device_put_raw(_np_asarray(object))
else:
out = object
elif isinstance(object, (list, tuple)):
if object:
out = stack([asarray(elt, dtype=dtype) for elt in object])
else:
out = _device_put_raw(_np_array([], dtype=dtype))
else:
try:
view = memoryview(object)
except TypeError:
pass # `object` does not support the buffer interface.
else:
return array(_np_asarray(view), dtype, copy)
raise TypeError("Unexpected input type for array: {}".format(type(object)))
if dtype and _dtype(out) != dtype:
out = lax.convert_element_type(out, dtype)
if ndmin > ndim(out):
out = lax.broadcast(out, (1,) * (ndmin - ndim(out)))
return out
def _can_call_numpy_array(x):
return _all(not isinstance(l, (core.Tracer, DeviceArray))
for l in tree_leaves(x))
@_wraps(np.asarray)
def asarray(a, dtype=None, order=None):
lax._check_user_dtype_supported(dtype, "asarray")
dtype = dtypes.canonicalize_dtype(dtype) if dtype is not None else dtype
return array(a, dtype=dtype, copy=False, order=order)
@_wraps(np.zeros_like)
def zeros_like(a, dtype=None, shape=None):
_check_arraylike("zeros_like", a)
lax._check_user_dtype_supported(dtype, "zeros_like")
if np.isscalar(shape):
shape = (shape,)
return lax.full_like(a, 0, dtype, shape)
@_wraps(np.ones_like)
def ones_like(a, dtype=None, shape=None):
_check_arraylike("ones_like", a)
lax._check_user_dtype_supported(dtype, "ones_like")
if np.isscalar(shape):
shape = (shape,)
return lax.full_like(a, 1, dtype, shape)
@_wraps(np.full)
def full(shape, fill_value, dtype=None):
lax._check_user_dtype_supported(dtype, "full")
shape = (shape,) if ndim(shape) == 0 else shape
return lax.full(shape, fill_value, dtype)
@_wraps(np.full_like)
def full_like(a, fill_value, dtype=None, shape=None):
_check_arraylike("full_like", a)
lax._check_user_dtype_supported(dtype, "full_like")
if np.isscalar(shape):
shape = (shape,)
return lax.full_like(a, fill_value, dtype, shape)
@_wraps(np.zeros)
def zeros(shape, dtype=None):
if isinstance(shape, types.GeneratorType):
raise TypeError("expected sequence object with len >= 0 or a single integer")
lax._check_user_dtype_supported(dtype, "zeros")
dtype = float_ if dtype is None else dtype
shape = (shape,) if ndim(shape) == 0 else shape
return lax.full(shape, 0, dtype)
@_wraps(np.ones)
def ones(shape, dtype=None):
if isinstance(shape, types.GeneratorType):
raise TypeError("expected sequence object with len >= 0 or a single integer")
lax._check_user_dtype_supported(dtype, "ones")
dtype = float_ if dtype is None else dtype
shape = (shape,) if ndim(shape) == 0 else shape
return lax.full(shape, 1, dtype)
@_wraps(np.array_equal)
def array_equal(a1, a2, equal_nan=False):
try:
a1, a2 = asarray(a1), asarray(a2)
except Exception:
return False
if shape(a1) != shape(a2):
return False
eq = asarray(a1 == a2)
if equal_nan:
eq = logical_or(eq, logical_and(isnan(a1), isnan(a2)))
return all(eq)
@_wraps(np.array_equiv)
def array_equiv(a1, a2):
try:
a1, a2 = asarray(a1), asarray(a2)
except Exception:
return False
try:
eq = equal(a1, a2)
except ValueError:
# shapes are not broadcastable
return False
return all(eq)
# We can't create uninitialized arrays in XLA; use zeros for empty.
empty_like = zeros_like
empty = zeros
@_wraps(np.eye)
def eye(N, M=None, k=0, dtype=None):
lax._check_user_dtype_supported(dtype, "eye")
dtype = float_ if dtype is None else dtype
M = N if M is None else M
k = int(k)
if N < 0 or M < 0:
msg = "negative dimensions are not allowed, got {} and {}"
raise ValueError(msg.format(N, M))
if k is not None:
k_dtype = _dtype(k)
if not issubdtype(k_dtype, integer):
msg = "eye argument `k` must be of integer dtype, got {}"
raise TypeError(msg.format(k_dtype))
return lax._eye(dtype, (N, M), k)
@_wraps(np.identity)
def identity(n, dtype=None):
lax._check_user_dtype_supported(dtype, "identity")
return eye(n, dtype=dtype)
@_wraps(np.arange)
def arange(start, stop=None, step=None, dtype=None):
lax._check_user_dtype_supported(dtype, "arange")
require = partial(core.concrete_or_error, _np_asarray)
msg = "It arose in jax.numpy.arange argument `{}`.".format
if stop is None and step is None:
start = require(start, msg("stop"))
dtype = dtype or _dtype(start)
return lax.iota(dtype, np.ceil(start)) # avoids materializing
else:
start = require(start, msg("start"))
stop = None if stop is None else require(stop, msg("stop"))
step = None if step is None else require(step, msg("step"))
if dtype is None:
dtype = _dtype(start, *(x for x in [stop, step] if x is not None))
return array(np.arange(start, stop=stop, step=step, dtype=dtype))
def _wrap_numpy_nullary_function(f):
"""Adapts `f` to return a DeviceArray instead of an np.ndarray.
`f` cannot have any non-static array arguments.
"""
@_wraps(f, update_doc=False)
def wrapper(*args, **kwargs):
return asarray(f(*args, **kwargs))
return wrapper
@_wraps(np.linspace)
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
axis=0):
"""Implementation of linspace differentiable in start and stop args."""
lax._check_user_dtype_supported(dtype, "linspace")
if num < 0:
raise ValueError("Number of samples, %s, must be non-negative." % num)
dtype = dtype or result_type(start, stop, dtypes.canonicalize_dtype(float_))
computation_dtype = promote_types(dtype, dtypes.canonicalize_dtype(float_))
start = asarray(start, dtype=computation_dtype)
stop = asarray(stop, dtype=computation_dtype)
bounds_shape = list(lax.broadcast_shapes(shape(start), shape(stop)))
broadcast_start = broadcast_to(start, bounds_shape)
broadcast_stop = broadcast_to(stop, bounds_shape)
axis = len(bounds_shape) + axis + 1 if axis < 0 else axis
bounds_shape.insert(axis, 1)
iota_shape = [1,] * len(bounds_shape)
iota_shape[axis] = num
div = (num - 1) if endpoint else num
if num > 1:
delta = lax.convert_element_type(stop - start, computation_dtype) / div
if issubdtype(dtype, integer):
# This is similar to how numpy computes linspace, but it
# can fail to recover the endpoints in float32 arithmetic.
out = (reshape(broadcast_start, bounds_shape) +
reshape(lax.iota(dtype, num), iota_shape) *
reshape(delta, bounds_shape))
else:
# This approach recovers the endpoints with float32 arithmetic,
# but can lead to rounding errors for integer outputs.
step = reshape(lax.iota(computation_dtype, num), iota_shape) / div
out = (reshape(broadcast_start, bounds_shape) * (1 - step) +
reshape(broadcast_stop, bounds_shape) * step)
elif num == 1:
delta = nan if endpoint else stop - start
out = reshape(broadcast_start, bounds_shape)
else: # num == 0 degenerate case, match numpy behavior
empty_shape = list(lax.broadcast_shapes(shape(start), shape(stop)))
empty_shape.insert(axis, 0)
delta = nan
out = reshape(array([], dtype=dtype), empty_shape)
if retstep:
return lax.convert_element_type(out, dtype), delta
else:
return lax.convert_element_type(out, dtype)
@_wraps(np.logspace)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0):
"""Implementation of logspace differentiable in start and stop args."""
lax._check_user_dtype_supported(dtype, "logspace")
dtype = dtype or result_type(start, stop, dtypes.canonicalize_dtype(float_))
computation_dtype = promote_types(dtype, dtypes.canonicalize_dtype(float_))
start = asarray(start, dtype=computation_dtype)
stop = asarray(stop, dtype=computation_dtype)
lin = linspace(start, stop, num,
endpoint=endpoint, retstep=False, dtype=None, axis=axis)
return lax.convert_element_type(power(base, lin), dtype)
@_wraps(np.geomspace)
def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):
"""Implementation of geomspace differentiable in start and stop args."""
lax._check_user_dtype_supported(dtype, "geomspace")
dtype = dtype or result_type(start, stop, dtypes.canonicalize_dtype(float_))
computation_dtype = promote_types(dtype, dtypes.canonicalize_dtype(float_))
start = asarray(start, dtype=computation_dtype)
stop = asarray(stop, dtype=computation_dtype)
# follow the numpy geomspace convention for negative and complex endpoints
signflip = 1 - (1 - sign(real(start))) * (1 - sign(real(stop))) // 2
res = signflip * logspace(log10(signflip * start),
log10(signflip * stop), num,
endpoint=endpoint, base=10.0,
dtype=computation_dtype, axis=0)
if axis != 0:
res = moveaxis(res, 0, axis)
return lax.convert_element_type(res, dtype)
@_wraps(np.meshgrid)
def meshgrid(*args, **kwargs):
indexing = kwargs.get("indexing", "xy")
sparse = kwargs.get("sparse", False)
copy = kwargs.get("copy", True)
if not copy:
raise ValueError("jax.numpy.meshgrid only supports copy=True")
args = list(args)
if indexing == "xy":
if len(args) >= 2:
args[0], args[1] = args[1], args[0]
elif indexing != "ij":
raise ValueError("Valid values for indexing are 'xy' and 'ij', got {}"
.format(indexing))
shape = []
for i, a in enumerate(args):
args[i] = a = asarray(a)
if len(a.shape) != 1:
msg = "Arguments to jax.numpy.meshgrid must be 1D, got shape {}"
raise ValueError(msg.format(a.shape))
shape.append(1 if sparse else a.shape[0])
output = []
for i, a in enumerate(args):
a = asarray(a)
s = shape
if sparse:
s = list(s)
s[i] = a.shape[0]
output.append(lax.broadcast_in_dim(a, s, (i,)))
if indexing == "xy" and len(args) >= 2:
output[0], output[1] = output[1], output[0]
return output
@_wraps(np.i0)
def i0(x):
x = lax.abs(*_promote_args_inexact("i0", x))
return lax.mul(lax.exp(x), lax.bessel_i0e(x))
@_wraps(np.ix_)
def ix_(*args):
n = len(args)
output = []
for i, a in enumerate(args):
a = asarray(a)
if len(a.shape) != 1:
msg = "Arguments to jax.numpy.ix_ must be 1-dimensional, got shape {}"
raise ValueError(msg.format(a.shape))
if _dtype(a) == bool_:
raise NotImplementedError(
"Boolean arguments to jax.numpy.ix_ are not implemented")
shape = [1] * n
shape[i] = a.shape[0]
if a.size == 0:
# Numpy uses an integer index type for empty arrays.
output.append(lax.full(shape, np.zeros((), np.intp)))
else:
output.append(lax.broadcast_in_dim(a, shape, (i,)))
return tuple(output)
@_wraps(np.indices)
def indices(dimensions, dtype=int32, sparse=False):
dimensions = tuple(
core.concrete_or_error(int, d, "dimensions argument of jnp.indices")
for d in dimensions)
N = len(dimensions)
output = []
s = dimensions
for i, dim in enumerate(dimensions):
idx = lax.iota(dtype, dim)
if sparse:
s = (1,)*i + (dim,) + (1,)*(N - i - 1)
output.append(lax.broadcast_in_dim(idx, s, (i,)))
if sparse:
return tuple(output)
return stack(output, 0) if output else array([], dtype=dtype)
_TOTAL_REPEAT_LENGTH_DOC = """\
Jax adds the optional `total_repeat_length` parameter which specifies the total
number of repeat, and defaults to sum(repeats). It must be specified for repeat
to be compilable. If `sum(repeats)` is larger than the specified
`total_repeat_length` the remaining values will be discarded. In the case of
`sum(repeats)` being smaller than the specified target length, the final value
will be repeated.
"""
@_wraps(np.repeat, lax_description=_TOTAL_REPEAT_LENGTH_DOC)
def repeat(a, repeats, axis=None, *, total_repeat_length=None):
_check_arraylike("repeat", a)
if axis is None:
a = ravel(a)
axis = 0
# If total_repeat_length is not given, can't compile, use a default.
if total_repeat_length is None:
repeats = core.concrete_or_error(np.array, repeats,
"When jit-compiling jnp.repeat, the total number of repeats must be static. "
"To fix this, either specify a static value for `repeats`, or pass a static "
"value to `total_repeat_length`.")
# Fast path for when repeats is a scalar.
if np.ndim(repeats) == 0 and ndim(a) != 0:
input_shape = a.shape
aux_axis = axis if axis < 0 else axis + 1
a = expand_dims(a, aux_axis)
reps = [1] * len(a.shape)
reps[aux_axis] = repeats
a = tile(a, reps)
result_shape = list(input_shape)
result_shape[axis] *= repeats
return reshape(a, result_shape)
repeats = np.ravel(repeats)
if ndim(a) != 0:
repeats = np.broadcast_to(repeats, [a.shape[axis]])
total_repeat_length = np.sum(repeats)
else:
repeats = ravel(repeats)
if ndim(a) != 0:
repeats = broadcast_to(repeats, [a.shape[axis]])
# Special case when a is a scalar.
if ndim(a) == 0:
if repeats.shape == (1,):
return full([total_repeat_length], a)
else:
raise ValueError('`repeat` with a scalar parameter `a` is only '
'implemented for scalar values of the parameter `repeats`.')
# Special case if total_repeat_length is zero.
if total_repeat_length == 0:
result_shape = list(a.shape)
result_shape[axis] = 0
return reshape(array([], dtype=a.dtype), result_shape)
# If repeats is on a zero sized axis, then return the array.
if a.shape[axis] == 0:
return a
# This implementation of repeat avoid having to instantiate a large.
# intermediate tensor.
# Modify repeats from e.g. [1,2,0,5] -> [0,1,2,0] for exclusive repeat.
exclusive_repeats = roll(repeats, shift=1).at[0].set(0)
# Cumsum to get indices of new number in repeated tensor, e.g. [0, 1, 3, 3]
scatter_indices = cumsum(exclusive_repeats)
# Scatter these onto a zero buffer, e.g. [1,1,0,2,0,0,0,0]
block_split_indicators = ops.index_add(
x=zeros([total_repeat_length], dtype=int32),
idx=scatter_indices,
y=1)
# Cumsum again to get scatter indices for repeat, e.g. [0,1,1,3,3,3,3,3]
gather_indices = cumsum(block_split_indicators) - 1
return take(a, gather_indices, axis=axis)
@_wraps(np.tri)
def tri(N, M=None, k=0, dtype=None):
lax._check_user_dtype_supported(dtype, "tri")
M = M if M is not None else N
dtype = dtype or float32
return lax._tri(dtype, (N, M), k)
@_wraps(np.tril)
def tril(m, k=0):
_check_arraylike("tril", m)
m_shape = shape(m)
if len(m_shape) < 2:
raise ValueError("Argument to jax.numpy.tril must be at least 2D")
mask = tri(*m_shape[-2:], k=k, dtype=bool)
return lax.select(lax.broadcast(mask, m_shape[:-2]), m, zeros_like(m))
@_wraps(np.triu, update_doc=False)
def triu(m, k=0):
_check_arraylike("triu", m)
m_shape = shape(m)
if len(m_shape) < 2:
raise ValueError("Argument to jax.numpy.triu must be at least 2D")
mask = tri(*m_shape[-2:], k=k - 1, dtype=bool)
return lax.select(lax.broadcast(mask, m_shape[:-2]), zeros_like(m), m)
@_wraps(np.trace)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
_check_arraylike("trace", a)
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.trace is not supported.")
lax._check_user_dtype_supported(dtype, "trace")
axis1 = _canonicalize_axis(axis1, ndim(a))
axis2 = _canonicalize_axis(axis2, ndim(a))
a_shape = shape(a)
if dtype is None:
dtype = _dtype(a)
if issubdtype(dtype, integer):
default_int = dtypes.canonicalize_dtype(np.int_)
if iinfo(dtype).bits < iinfo(default_int).bits:
dtype = default_int
# Move the axis? dimensions to the end.
perm = [i for i in range(len(a_shape)) if i != axis1 and i != axis2]
perm = perm + [axis1, axis2]
a = lax.transpose(a, perm)
# Mask out the diagonal and reduce.
a = where(eye(a_shape[axis1], a_shape[axis2], k=offset, dtype=bool),
a, zeros_like(a))
return sum(a, axis=(-2, -1), dtype=dtype)
def _wrap_indices_function(f):
@_wraps(f, update_doc=False)
def wrapper(*args, **kwargs):
return tuple(asarray(x) for x in f(*args, **kwargs))
return wrapper
tril_indices = _wrap_indices_function(np.tril_indices)
triu_indices = _wrap_indices_function(np.triu_indices)
mask_indices = _wrap_indices_function(np.mask_indices)
@_wraps(np.triu_indices_from)
def triu_indices_from(arr, k=0):
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
@_wraps(np.tril_indices_from)
def tril_indices_from(arr, k=0):
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
@_wraps(np.diag_indices)
def diag_indices(n, ndim=2):
if n < 0:
raise ValueError("n argument to diag_indices must be nonnegative, got {}"
.format(n))
if ndim < 0:
raise ValueError("ndim argument to diag_indices must be nonnegative, got {}"
.format(ndim))
return (lax.iota(int_, n),) * ndim
@_wraps(np.diag_indices_from)
def diag_indices_from(arr):
_check_arraylike("diag_indices_from", arr)
if not arr.ndim >= 2:
raise ValueError("input array must be at least 2-d")
if len(set(arr.shape)) != 1:
raise ValueError("All dimensions of input must be of equal length")
return diag_indices(arr.shape[0], ndim=arr.ndim)
@_wraps(np.diagonal)
def diagonal(a, offset=0, axis1=0, axis2=1):
_check_arraylike("diagonal", a)
a_shape = shape(a)
a_ndims = len(a_shape)
# Move the two dimensions to the end.
axis1 = _canonicalize_axis(axis1, a_ndims)
axis2 = _canonicalize_axis(axis2, a_ndims)
perm = [i for i in range(a_ndims) if i != axis1 and i != axis2]
perm = perm + [axis1, axis2]
a = lax.transpose(a, perm)
# Mask out the diagonal and reduce over one of the axes
a = where(eye(a_shape[axis1], a_shape[axis2], k=offset, dtype=bool),
a, zeros_like(a))
reduce_axis = -2 if offset < 0 else -1
d = sum(a, axis=reduce_axis, dtype=_dtype(a))
# Slice out the correct diagonal size.
diag_size = _max(0, _min(a_shape[axis1] + _min(offset, 0),
a_shape[axis2] - _max(offset, 0)))
return lax.slice_in_dim(d, 0, diag_size, axis=-1)
@_wraps(np.diag)
def diag(v, k=0):
_check_arraylike("diag", v)
v_shape = shape(v)
if len(v_shape) == 1:
zero = lambda x: lax.full_like(x, shape=(), fill_value=0)
n = v_shape[0] + _abs(k)
v = lax.pad(v, zero(v), ((_max(0, k), _max(0, -k), 0),))
return where(eye(n, k=k, dtype=bool), v, zeros_like(v))
elif len(v_shape) == 2:
return diagonal(v, offset=k)
else:
raise ValueError("diag input must be 1d or 2d")
_SCALAR_VALUE_DOC="""\
This differs from np.diagflat for some scalar values of v,
jax always returns a two-dimensional array, whereas numpy may
return a scalar depending on the type of v.
"""
@_wraps(np.diagflat, lax_description=_SCALAR_VALUE_DOC)
def diagflat(v, k=0):
_check_arraylike("diagflat", v)
v = ravel(v)
v_length = len(v)
adj_length = v_length + _abs(k)
res = zeros(adj_length*adj_length, dtype=v.dtype)
i = arange(0, adj_length-_abs(k))
if (k >= 0):
fi = i+k+i*adj_length
else:
fi = i+(i-k)*adj_length
res = ops.index_update(res, ops.index[fi], v)
res = res.reshape(adj_length,adj_length)
return res
@_wraps(np.polyval)
def polyval(p, x):
if isinstance(p, np.poly1d):
p = np.asarray(p)
if isinstance(x, np.poly1d):
y = 0
else:
y = zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
@_wraps(np.polyadd)
def polyadd(a1, a2):
a1 = asarray(a1)
a2 = asarray(a2)
if a2.shape[0] <= a1.shape[0]:
return a1.at[-a2.shape[0]:].add(a2)
else:
return a2.at[-a1.shape[0]:].add(a1)
@_wraps(np.polyder)
def polyder(p, m=1):
p = asarray(p)
if m < 0:
raise ValueError("Order of derivative must be positive")
if m == 0:
return p
if m % 1:
raise ValueError("m must be an integer")
coeff = (arange(len(p), m, -1) - 1 - arange(m)[:, newaxis]).prod(0)
return p[:-m] * coeff
@_wraps(np.trim_zeros)
def trim_zeros(filt, trim='fb'):
filt = core.concrete_or_error(asarray, filt,
"Error arose in the `filt` argument of trim_zeros()")
nz = asarray(filt) == 0
if all(nz):
return empty(0, _dtype(filt))
start = argmin(nz) if 'f' in trim.lower() else 0
end = argmin(nz[::-1]) if 'b' in trim.lower() else 0
return filt[start:len(filt) - end]
_LEADING_ZEROS_DOC="""\
Setting trim_leading_zeros=True makes the output match that of numpy.
But prevents the function from being able to be used in compiled code.
"""
@_wraps(np.polymul, lax_description=_LEADING_ZEROS_DOC)
def polymul(a1, a2, *, trim_leading_zeros=False):
if isinstance(a1, np.poly1d):
a1 = asarray(a1)
if isinstance(a2, np.poly1d):
a2 = asarray(a2)
if trim_leading_zeros and (len(a1) > 1 or len(a2) > 1):
a1, a2 = trim_zeros(a1, trim='f'), trim_zeros(a2, trim='f')
if len(a1) == 0:
a1 = asarray([0.])
if len(a2) == 0:
a2 = asarray([0.])
val = convolve(a1, a2, mode='full')
return val
@_wraps(np.polysub)
def polysub(a1, a2):
return polyadd(asarray(a1), -asarray(a2))
@_wraps(np.append)
def append(arr, values, axis=None):
if axis is None:
return concatenate([ravel(arr), ravel(values)], 0)
else:
return concatenate([arr, values], axis=axis)
@_wraps(np.apply_along_axis)
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
num_dims = ndim(arr)
axis = _canonicalize_axis(axis, num_dims)
func = lambda arr: func1d(arr, *args, **kwargs)
for i in range(1, num_dims - axis):
func = jax.vmap(func, in_axes=i, out_axes=-1)
for i in range(axis):
func = jax.vmap(func, in_axes=0, out_axes=0)
return func(arr)
@_wraps(np.apply_over_axes)
def apply_over_axes(func, a, axes):
for axis in axes:
b = func(a, axis=axis)
if b.ndim == a.ndim:
a = b
elif b.ndim == a.ndim - 1:
a = expand_dims(b, axis)
else:
raise ValueError("function is not returning an array of the correct shape")
return a
### Tensor contraction operations
@_wraps(np.dot, lax_description=_PRECISION_DOC)
def dot(a, b, *, precision=None): # pylint: disable=missing-docstring
_check_arraylike("dot", a, b)
a, b = _promote_dtypes(a, b)
a_ndim, b_ndim = ndim(a), ndim(b)
if a_ndim == 0 or b_ndim == 0:
return lax.mul(a, b)
if _max(a_ndim, b_ndim) <= 2:
return lax.dot(a, b, precision=precision)
if b_ndim == 1:
contract_dims = ((a_ndim - 1,), (0,))
else:
contract_dims = ((a_ndim - 1,), (b_ndim - 2,))
batch_dims = ((), ())
return lax.dot_general(a, b, (contract_dims, batch_dims), precision)
@_wraps(np.matmul, lax_description=_PRECISION_DOC)
def matmul(a, b, *, precision=None): # pylint: disable=missing-docstring
_check_arraylike("matmul", a, b)
for i, x in enumerate((a, b)):
if ndim(x) < 1:
msg = (f"matmul input operand {i} must have ndim at least 1, "
f"but it has ndim {ndim(x)}")
raise ValueError(msg)
a, b = _promote_dtypes(a, b)
a_is_mat, b_is_mat = (ndim(a) > 1), (ndim(b) > 1)
a_batch_dims = shape(a)[:-2] if a_is_mat else ()
b_batch_dims = shape(b)[:-2] if b_is_mat else ()
num_batch_dims = _max(len(a_batch_dims), len(b_batch_dims))
a_batch_dims = (None,) * (num_batch_dims - len(a_batch_dims)) + a_batch_dims
b_batch_dims = (None,) * (num_batch_dims - len(b_batch_dims)) + b_batch_dims
# Dimensions to squeeze from the inputs.
a_squeeze = []
b_squeeze = []
# Positions of batch dimensions in squeezed inputs.
a_batch = []
b_batch = []
# Desired index in final output of each kind of dimension, in the order that
# lax.dot_general will emit them.
idx_batch = []
idx_a_other = [] # other = non-batch, non-contracting.
idx_b_other = []
for i, (ba, bb) in enumerate(zip(a_batch_dims, b_batch_dims)):
if ba is None:
idx_b_other.append(i)
elif bb is None:
idx_a_other.append(i)
elif ba == 1:
idx_b_other.append(i)
a_squeeze.append(len(idx_batch) + len(idx_a_other) + len(a_squeeze))
elif bb == 1:
idx_a_other.append(i)
b_squeeze.append(len(idx_batch) + len(idx_b_other) + len(b_squeeze))
elif ba == bb:
a_batch.append(len(idx_batch) + len(idx_a_other))
b_batch.append(len(idx_batch) + len(idx_b_other))
idx_batch.append(i)
else:
raise ValueError("Incompatible shapes for matmul arguments: {} and {}"
.format(shape(a), shape(b)))
if a_is_mat: idx_a_other.append(num_batch_dims)
if b_is_mat: idx_b_other.append(num_batch_dims + a_is_mat)
perm = np.argsort(np.concatenate([idx_batch, idx_a_other, idx_b_other]))
a = lax.squeeze(a, tuple(a_squeeze))
b = lax.squeeze(b, tuple(b_squeeze))
out = lax.dot_general(
a, b, (((ndim(a) - 1,), (ndim(b) - 1 - b_is_mat,)), (a_batch, b_batch)),
precision=precision)
return lax.transpose(out, perm)
@_wraps(np.vdot, lax_description=_PRECISION_DOC)
def vdot(a, b, *, precision=None):
_check_arraylike("vdot", a, b)
if issubdtype(_dtype(a), complexfloating):
a = conj(a)
return dot(a.ravel(), b.ravel(), precision=precision)
@_wraps(np.tensordot, lax_description=_PRECISION_DOC)
def tensordot(a, b, axes=2, *, precision=None):
_check_arraylike("tensordot", a, b)
a_ndim = ndim(a)
b_ndim = ndim(b)
a, b = _promote_dtypes(a, b)
if type(axes) is int:
if axes > _min(a_ndim, b_ndim):
msg = "Number of tensordot axes (axes {}) exceeds input ranks ({} and {})"
raise TypeError(msg.format(axes, a.shape, b.shape))
contracting_dims = tuple(range(a_ndim - axes, a_ndim)), tuple(range(axes))
elif type(axes) in (list, tuple) and len(axes) == 2:
ax1, ax2 = axes
if type(ax1) == type(ax2) == int:
contracting_dims = ((_canonicalize_axis(ax1, a_ndim),),
(_canonicalize_axis(ax2, b_ndim),))
elif type(ax1) in (list, tuple) and type(ax2) in (list, tuple):
if len(ax1) != len(ax2):
msg = "tensordot requires axes lists to have equal length, got {} and {}."
raise TypeError(msg.format(ax1, ax2))
contracting_dims = (tuple(_canonicalize_axis(i, a_ndim) for i in ax1),
tuple(_canonicalize_axis(i, b_ndim) for i in ax2))
else:
msg = ("tensordot requires both axes lists to be either ints, tuples or "
"lists, got {} and {}")
raise TypeError(msg.format(ax1, ax2))
else:
msg = ("tensordot axes argument must be an int, a pair of ints, or a pair "
"of lists/tuples of ints.")
raise TypeError(msg)
return lax.dot_general(a, b, (contracting_dims, ((), ())),
precision=precision)
@_wraps(np.einsum, lax_description=_PRECISION_DOC)
def einsum(*operands, out=None, optimize='greedy', precision=None):
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.einsum is not supported.")
optimize = 'greedy' if optimize is True else optimize
# using einsum_call=True here is an internal api for opt_einsum
operands, contractions = opt_einsum.contract_path(
*operands, einsum_call=True, use_blas=True, optimize=optimize)
contractions = tuple((a, frozenset(b), c) for a, b, c, *_ in contractions)
return _einsum(operands, contractions, precision)
@_wraps(np.einsum_path)
def einsum_path(subscripts, *operands, optimize='greedy'):
# using einsum_call=True here is an internal api for opt_einsum
return opt_einsum.contract_path(subscripts, *operands, optimize=optimize)
def _removechars(s, chars):
return s.translate(str.maketrans(dict.fromkeys(chars)))
@partial(jit, static_argnums=(1, 2))
def _einsum(operands: Sequence,
contractions: Sequence[Tuple[Tuple[int, ...], FrozenSet[str], str]],
precision):
operands = list(_promote_dtypes(*operands))
def sum(x, axes):
return lax.reduce(x, np.array(0, x.dtype),
lax.add if x.dtype != bool_ else lax.bitwise_or, axes)
def sum_uniques(operand, names, uniques):
if uniques:
axes = [names.index(name) for name in uniques]
operand = sum(operand, axes)
names = _removechars(names, uniques)
return operand, names
def sum_repeats(operand, names, counts, keep_names):
for name, count in counts.items():
if count > 1:
axes = [i for i, n in enumerate(names) if n == name]
eye = lax._delta(operand.dtype, operand.shape, axes)
if name not in keep_names:
operand = sum(operand * eye, axes)
names = names.replace(name, '')
else:
operand = sum(operand * eye, axes[:-1])
names = names.replace(name, '', count - 1)
return operand, names
def filter_singleton_dims(operand, names, other_shape, other_names):
s = shape(operand)
new_shape = []
new_names = []
for i, d in enumerate(names):
other_i = other_names.find(d)
if s[i] != 1 or other_i == -1 or other_shape[other_i] == 1:
new_shape.append(s[i])
new_names.append(d)
return reshape(operand, tuple(new_shape)), "".join(new_names)
for operand_indices, contracted_names_set, einstr in contractions:
contracted_names = sorted(contracted_names_set)
input_str, result_names = einstr.split('->')
input_names = input_str.split(',')
# switch on the number of operands to be processed in this loop iteration.
# every case here sets 'operand' and 'names'.
if len(operand_indices) == 1:
operand = operands.pop(operand_indices[0])
names, = input_names
counts = collections.Counter(names)
# sum out unique contracted indices with a single reduce-sum
uniques = [name for name in contracted_names if counts[name] == 1]
operand, names = sum_uniques(operand, names, uniques)
# for every repeated index, do a contraction against an identity matrix
operand, names = sum_repeats(operand, names, counts, result_names)
elif len(operand_indices) == 2:
lhs, rhs = map(operands.pop, operand_indices)
lhs_names, rhs_names = input_names
# handle cases where one side of a contracting or batch dimension is 1
# but its counterpart is not.
lhs, lhs_names = filter_singleton_dims(lhs, lhs_names, shape(rhs),
rhs_names)
rhs, rhs_names = filter_singleton_dims(rhs, rhs_names, shape(lhs),
lhs_names)
lhs_counts = collections.Counter(lhs_names)
rhs_counts = collections.Counter(rhs_names)
# sum out unique contracted indices in lhs and rhs
lhs_uniques = [name for name in contracted_names
if lhs_counts[name] == 1 and rhs_counts[name] == 0]
lhs, lhs_names = sum_uniques(lhs, lhs_names, lhs_uniques)
rhs_uniques = [name for name in contracted_names
if rhs_counts[name] == 1 and lhs_counts[name] == 0]
rhs, rhs_names = sum_uniques(rhs, rhs_names, rhs_uniques)
# for every repeated index, contract against an identity matrix
lhs, lhs_names = sum_repeats(lhs, lhs_names, lhs_counts,
result_names + rhs_names)
rhs, rhs_names = sum_repeats(rhs, rhs_names, rhs_counts,
result_names + lhs_names)
lhs_or_rhs_names = set(lhs_names) | set(rhs_names)
contracted_names = [x for x in contracted_names if x in lhs_or_rhs_names]
lhs_and_rhs_names = set(lhs_names) & set(rhs_names)
batch_names = [x for x in result_names if x in lhs_and_rhs_names]
lhs_batch, rhs_batch = unzip2((lhs_names.find(n), rhs_names.find(n))
for n in batch_names)
# NOTE(mattjj): this can fail non-deterministically in python3, maybe
# due to opt_einsum
assert _all(
name in lhs_names and name in rhs_names and
lhs.shape[lhs_names.index(name)] == rhs.shape[rhs_names.index(name)]
for name in contracted_names)
# contract using lax.dot_general
batch_names_str = ''.join(batch_names)
lhs_cont, rhs_cont = unzip2((lhs_names.index(n), rhs_names.index(n))
for n in contracted_names)
dimension_numbers = ((lhs_cont, rhs_cont), (lhs_batch, rhs_batch))
operand = lax.dot_general(lhs, rhs, dimension_numbers, precision)
deleted_names = batch_names_str + ''.join(contracted_names)
names = (batch_names_str + _removechars(lhs_names, deleted_names)
+ _removechars(rhs_names, deleted_names))
else:
raise NotImplementedError # if this is actually reachable, open an issue!
# the resulting 'operand' with axis labels 'names' should be a permutation
# of the desired result
assert len(names) == len(result_names) == len(set(names))
assert set(names) == set(result_names)
if names != result_names:
perm = tuple([names.index(name) for name in result_names])
operand = lax.transpose(operand, perm)
operands.append(operand) # used in next iteration
return operands[0]
def _movechars(s, src, dst):
"""Helper for einsum string munging, like moveaxis on identifier strings."""
chars = [c for i, c in enumerate(s) if i not in src]
for i, j in sorted(zip(dst, src)):
chars.insert(i, s[j])
return ''.join(chars)
@_wraps(np.inner, lax_description=_PRECISION_DOC)
def inner(a, b, *, precision=None):
if ndim(a) == 0 or ndim(b) == 0:
return a * b
return tensordot(a, b, (-1, -1), precision=precision)
@_wraps(np.outer)
def outer(a, b, out=None):
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.outer is not supported.")
a, b = _promote_dtypes(a, b)
return ravel(a)[:, None] * ravel(b)[None, :]
@partial(jit, static_argnums=(2, 3, 4))
def _cross(a, b, axisa, axisb, axisc):
a = moveaxis(a, axisa, -1)
b = moveaxis(b, axisb, -1)
if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):
raise ValueError("Dimension must be either 2 or 3 for cross product")
if a.shape[-1] == 2 and b.shape[-1] == 2:
return a[..., 0] * b[..., 1] - a[..., 1] * b[..., 0]
a0 = a[..., 0]
a1 = a[..., 1]
a2 = a[..., 2] if a.shape[-1] == 3 else zeros_like(a0)
b0 = b[..., 0]
b1 = b[..., 1]
b2 = b[..., 2] if b.shape[-1] == 3 else zeros_like(b0)
c = array([a1 * b2 - a2 * b1, a2 * b0 - a0 * b2, a0 * b1 - a1 * b0])
return moveaxis(c, 0, axisc)
@_wraps(np.cross)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
if axis is not None:
axisa = axis
axisb = axis
axisc = axis
return _cross(a, b, axisa, axisb, axisc)
@_wraps(np.kron)
def kron(a, b):
a, b = _promote_dtypes(a, b)
if ndim(a) < ndim(b):
a = reshape(a, (1,) * (ndim(b) - ndim(a)) + shape(a))
elif ndim(b) < ndim(a):
b = reshape(b, (1,) * (ndim(a) - ndim(b)) + shape(b))
a_reshaped = reshape(a, [i for d in shape(a) for i in (d, 1)])
b_reshaped = reshape(b, [i for d in shape(b) for i in (1, d)])
out_shape = tuple(np.multiply(shape(a), shape(b)))
return reshape(lax.mul(a_reshaped, b_reshaped), out_shape)
@_wraps(np.vander)
def vander(x, N=None, increasing=False):
x = asarray(x)
dtype = _dtype(x)
if ndim(x) != 1:
raise ValueError("x must be a one-dimensional array")
x_shape = shape(x)
N = N or x_shape[0]
if N < 0:
raise ValueError("N must be nonnegative")
iota = lax.iota(dtype, N)
if not increasing:
iota = lax.sub(lax._const(iota, N - 1), iota)
return power(x[..., None], iota)
### Misc
@_wraps(np.argwhere)
def argwhere(a):
result = transpose(vstack(nonzero(a)))
if ndim(a) == 0:
return result[:0].reshape(result.shape[0], 0)
return result.reshape(result.shape[0], ndim(a))
@_wraps(np.argmax)
def argmax(a, axis=None, out=None):
_check_arraylike("argmax", a)
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.argmax is not supported.")
if axis is None:
a = ravel(a)
axis = 0
if a.shape[axis] == 0:
raise ValueError("attempt to get argmax of an empty sequence")
return lax.argmax(a, _canonicalize_axis(axis, a.ndim), int64)
@_wraps(np.argmin)
def argmin(a, axis=None, out=None):
_check_arraylike("argmin", a)
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.argmin is not supported.")
if axis is None:
a = ravel(a)
axis = 0
if a.shape[axis] == 0:
raise ValueError("attempt to get argmin of an empty sequence")
return lax.argmin(a, _canonicalize_axis(axis, a.ndim), int64)
_NANARG_DOC = """\
Warning: jax.numpy.arg{} returns -1 for all-NaN slices and does not raise
an error.
"""
@_wraps(np.nanargmax, lax_description=_NANARG_DOC.format("max"))
def nanargmax(a, axis=None):
_check_arraylike("nanargmax", a)
if not issubdtype(_dtype(a), inexact):
return argmax(a, axis=axis)
nan_mask = isnan(a)
a = where(nan_mask, -inf, a)
res = argmax(a, axis=axis)
return where(all(nan_mask, axis=axis), -1, res)
@_wraps(np.nanargmin, lax_description=_NANARG_DOC.format("min"))
def nanargmin(a, axis=None):
_check_arraylike("nanargmin", a)
if not issubdtype(_dtype(a), inexact):
return argmin(a, axis=axis)
nan_mask = isnan(a)
a = where(nan_mask, inf, a)
res = argmin(a, axis=axis)
return where(all(nan_mask, axis=axis), -1, res)
@_wraps(np.sort)
def sort(a, axis=-1, kind='quicksort', order=None):
_check_arraylike("sort", a)
if kind != 'quicksort':
warnings.warn("'kind' argument to sort is ignored.")
if order is not None:
raise ValueError("'order' argument to sort is not supported.")
if axis is None:
return lax.sort(a.ravel(), dimension=0)
else:
return lax.sort(a, dimension=_canonicalize_axis(axis, ndim(a)))
@_wraps(np.sort_complex)
def sort_complex(a):
_check_arraylike("sort_complex", a)
a = lax.sort(a, dimension=0)
return lax.convert_element_type(a, result_type(a, dtypes.canonicalize_dtype(complex_)))
@_wraps(np.lexsort)
def lexsort(keys, axis=-1):
keys = tuple(keys)
if len(keys) == 0:
raise TypeError("need sequence of keys with len > 0 in lexsort")
if len({shape(key) for key in keys}) > 1:
raise ValueError("all keys need to be the same shape")
if ndim(keys[0]) == 0:
return np.int64(0)
axis = _canonicalize_axis(axis, ndim(keys[0]))
iota = lax.broadcasted_iota(np.int64, shape(keys[0]), axis)
return lax.sort((*keys[::-1], iota), dimension=axis, num_keys=len(keys))[-1]
@_wraps(np.argsort)
def argsort(a, axis=-1, kind='quicksort', order=None):
_check_arraylike("argsort", a)
if kind != 'quicksort':
warnings.warn("'kind' argument to argsort is ignored.")
if order is not None:
raise ValueError("'order' argument to argsort is not supported.")
if axis is None:
return argsort(a.ravel(), 0)
else:
axis = _canonicalize_axis(axis, ndim(a))
iota = lax.broadcasted_iota(np.int64, shape(a), axis)
_, perm = lax.sort_key_val(a, iota, dimension=axis)
return perm
@_wraps(np.msort)
def msort(a):
return sort(a, axis=0)
@partial(jit, static_argnums=(2,))
def _roll(a, shift, axis):
a = asarray(a)
a_shape = shape(a)
if axis is None:
return lax.reshape(roll(ravel(a), shift, axis=0), a_shape)
a_ndim = len(a_shape)
shift = asarray(shift)
axis = np.asarray(axis)
b_shape = lax.broadcast_shapes(shift.shape, axis.shape, (1,))
if len(b_shape) != 1:
msg = "'shift' and 'axis' arguments to roll must be scalars or 1D arrays"
raise ValueError(msg)
for x, i in zip(broadcast_to(shift, b_shape),
np.broadcast_to(axis, b_shape)):
i = _canonicalize_axis(i, a_ndim)
x = remainder(x, (a_shape[i] or 1))
a = lax.concatenate((a, a), i)
a = lax.dynamic_slice_in_dim(a, a_shape[i] - x, a_shape[i], axis=i)
return a
@_wraps(np.roll)
def roll(a, shift, axis=None):
if isinstance(axis, list):
axis = tuple(axis)
return _roll(a, shift, axis)
@_wraps(np.rollaxis)
def rollaxis(a, axis, start=0):
_check_arraylike("rollaxis", a)
a_ndim = ndim(a)
axis = _canonicalize_axis(axis, a_ndim)
if not (-a_ndim <= start <= a_ndim):
raise ValueError(f"start={start} must satisfy {-a_ndim}<=start<={a_ndim}")
if start < 0:
start += a_ndim
if start > axis:
start -= 1
return moveaxis(a, axis, start)
@_wraps(np.packbits)
def packbits(a, axis=None, bitorder='big'):
a = asarray(a)
if not (issubdtype(dtype(a), integer) or issubdtype(dtype(a), bool_)):
raise TypeError('Expected an input array of integer or boolean data type')
if bitorder not in ['little', 'big']:
raise ValueError("'order' must be either 'little' or 'big'")
a = (a > 0).astype('uint8')
bits = arange(8, dtype='uint8')
if bitorder == 'big':
bits = bits[::-1]
if axis is None:
a = ravel(a)
axis = 0
a = swapaxes(a, axis, -1)
remainder = a.shape[-1] % 8
if remainder:
a = pad(a, (a.ndim - 1) * [(0, 0)] + [(0, 8 - remainder)])
a = a.reshape(a.shape[:-1] + (a.shape[-1] // 8, 8))
packed = (a << bits).sum(-1).astype('uint8')
return swapaxes(packed, axis, -1)
@_wraps(np.unpackbits)
def unpackbits(a, axis=None, count=None, bitorder='big'):
a = asarray(a)
if dtype(a) != uint8:
raise TypeError("Expected an input array of unsigned byte data type")
if bitorder not in ['little', 'big']:
raise ValueError("'order' must be either 'little' or 'big'")
bits = asarray(1) << arange(8, dtype='uint8')
if bitorder == 'big':
bits = bits[::-1]
if axis is None:
a = a.ravel()
axis = 0
a = swapaxes(a, axis, -1)
unpacked = ((a[..., None] & bits) > 0).astype('uint8')
unpacked = unpacked.reshape(unpacked.shape[:-2] + (-1,))[..., :count]
return swapaxes(unpacked, axis, -1)
@_wraps(np.take)
def take(a, indices, axis=None, out=None, mode=None):
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.take is not supported.")
a = asarray(a)
indices = asarray(indices)
if axis is None:
a = ravel(a)
axis = 0
axis = _canonicalize_axis(axis, ndim(a))
if mode == "raise":
# TODO(phawkins): we have no way to report out of bounds errors yet.
raise NotImplementedError("The 'raise' mode to jnp.take is not supported.")
elif mode == "wrap":
indices = mod(indices, _constant_like(indices, a.shape[axis]))
elif mode != "clip" and mode is not None:
raise ValueError("Invalid mode '{}' for np.take".format(mode))
index_dims = len(shape(indices))
slice_sizes = list(shape(a))
slice_sizes[axis] = _min(indices.size, 1)
dnums = lax.GatherDimensionNumbers(
offset_dims=tuple(
list(range(axis)) +
list(range(axis + index_dims, len(a.shape) + index_dims - 1))),
collapsed_slice_dims=(axis,),
start_index_map=(axis,))
return lax.gather(a, indices[..., None], dimension_numbers=dnums,
slice_sizes=tuple(slice_sizes))
def _normalize_index(index, axis_size):
"""Normalizes an index value in the range [-N, N) to the range [0, N)."""
if type(axis_size) is Poly:
return index + axis_size if index < 0 else index
return lax.select(
lax.lt(index, _constant_like(index, 0)),
lax.add(index, _constant_like(index, axis_size)),
index)
@partial(jit, static_argnums=(2,))
def _take_along_axis(arr, indices, axis):
if axis is None:
if ndim(indices) != 1:
msg = "take_along_axis indices must be 1D if axis=None, got shape {}"
raise ValueError(msg.format(indices.shape))
return take_along_axis(arr.ravel(), indices, 0)
rank = ndim(arr)
if rank != ndim(indices):
msg = "indices and arr must have the same number of dimensions; {} vs. {}"
raise ValueError(msg.format(ndim(indices), ndim(arr)))
axis = _canonicalize_axis(axis, rank)
def replace(tup, val):
lst = list(tup)
lst[axis] = val
return tuple(lst)
use_64bit_index = _any([type(d) is Poly or d >= (1 << 31) for d in arr.shape])
index_dtype = int64 if use_64bit_index else int32
indices = lax.convert_element_type(indices, index_dtype)
bcast_shape = lax.broadcast_shapes(replace(arr.shape, 1), replace(indices.shape, 1))
indices = broadcast_to(indices, replace(bcast_shape, indices.shape[axis]))
arr = broadcast_to(arr, replace(bcast_shape, arr.shape[axis]))
axis_size = arr.shape[axis]
arr_shape = replace(arr.shape, 1)
idx_shape = indices.shape
out_shape = lax.broadcast_shapes(idx_shape, arr_shape)
index_dims = [i for i, idx in enumerate(idx_shape) if i == axis or idx != 1]
gather_index_shape = tuple(np.array(out_shape)[index_dims]) + (1,)
gather_indices = []
slice_sizes = []
offset_dims = []
start_index_map = []
collapsed_slice_dims = []
j = 0
for i in range(rank):
if i == axis:
indices = _normalize_index(indices, axis_size)
gather_indices.append(lax.reshape(indices, gather_index_shape))
slice_sizes.append(1)
start_index_map.append(i)
collapsed_slice_dims.append(i)
j += 1
elif idx_shape[i] != 1:
iota = lax.iota(_dtype(indices), out_shape[i])
if not config.omnistaging_enabled:
iota = lax.tie_in(arr, iota)
iota = lax.broadcast_in_dim(iota, gather_index_shape, (j,))
gather_indices.append(iota)
slice_sizes.append(1)
start_index_map.append(i)
collapsed_slice_dims.append(i)
j += 1
else:
# If idx_shape[i] == 1, we can just take the entirety of the arr's axis
# and avoid forming an iota index.
offset_dims.append(i)
slice_sizes.append(arr_shape[i])
gather_indices = lax.concatenate(gather_indices, dimension=j)
dnums = lax.GatherDimensionNumbers(
offset_dims=tuple(offset_dims),
collapsed_slice_dims=tuple(collapsed_slice_dims),
start_index_map=tuple(start_index_map))
return lax.gather(arr, gather_indices, dnums, tuple(slice_sizes))
@_wraps(getattr(np, "take_along_axis", None), update_doc=False)
def take_along_axis(arr, indices, axis):
_check_arraylike("take_along_axis", arr)
return _take_along_axis(arr, indices, axis)
### SetOps
@partial(jit, static_argnums=1)
def _unique1d_sorted_mask(ar, optional_indices=False):
"""
Helper function for unique which is jit-able
"""
ar = asarray(ar).flatten()
if optional_indices:
perm = ar.argsort()
aux = ar[perm]
else:
aux = ar.sort()
mask = empty(aux.shape, dtype=bool_)
mask = ops.index_update(mask, ops.index[:1], True)
mask = ops.index_update(mask, ops.index[1:], aux[1:] != aux[:-1])
if optional_indices:
return aux, mask, perm
else:
return aux, mask
def _unique1d(ar, return_index=False, return_inverse=False,
return_counts=False):
"""
Find the unique elements of an array, ignoring shape.
"""
optional_indices = return_index or return_inverse
if optional_indices:
aux, mask, perm = _unique1d_sorted_mask(ar, optional_indices)
else:
aux, mask = _unique1d_sorted_mask(ar, optional_indices)
ret = (aux[mask],)
if return_index:
ret += (perm[mask],)
if return_inverse:
imask = cumsum(mask) - 1
inv_idx = zeros(mask.shape, dtype=dtypes.canonicalize_dtype(int_))
inv_idx = ops.index_update(inv_idx, perm, imask)
ret += (inv_idx,)
if return_counts:
idx = concatenate(nonzero(mask) + (array([mask.size]),))
ret += (diff(idx),)
return ret
@_wraps(np.unique)
def unique(ar, return_index=False, return_inverse=False,
return_counts=False, axis=None):
ar = core.concrete_or_error(asarray, ar, "The error arose in jnp.unique()")
if iscomplexobj(ar):
raise NotImplementedError(
"np.unique is not implemented for complex valued arrays")
if axis is None:
ret = _unique1d(ar, return_index, return_inverse, return_counts)
if len(ret) == 1:
return ret[0]
else:
return ret
raise NotImplementedError(
"np.unique is not implemented for the axis argument")
### Indexing
def _rewriting_take(arr, idx):
# Computes arr[idx].
# All supported cases of indexing can be implemented as an XLA gather,
# followed by an optional reverse and broadcast_in_dim.
arr = asarray(arr)
treedef, static_idx, dynamic_idx = _split_index_for_jit(idx)
return _gather(arr, treedef, static_idx, dynamic_idx)
# TODO(phawkins): re-enable jit after fixing excessive recompilation for
# slice indexes (e.g., slice(0, 5, None), slice(10, 15, None), etc.).
# @partial(jit, static_argnums=(1, 2))
def _gather(arr, treedef, static_idx, dynamic_idx):
idx = _merge_static_and_dynamic_indices(treedef, static_idx, dynamic_idx)
indexer = _index_to_gather(shape(arr), idx) # shared with _scatter_update
y = arr
# Avoid calling gather if the slice shape is empty, both as a fast path and to
# handle cases like zeros(0)[array([], int32)].
if _prod(indexer.slice_shape) == 0:
return zeros(indexer.slice_shape, dtype=y.dtype)
# We avoid generating a gather when indexer.gather_indices.size is empty.
if indexer.gather_indices.size:
y = lax.gather(y, indexer.gather_indices, indexer.dnums,
indexer.gather_slice_shape)
# Reverses axes with negative strides.
if indexer.reversed_y_dims:
y = lax.rev(y, indexer.reversed_y_dims)
# This adds np.newaxis/None dimensions.
return expand_dims(y, indexer.newaxis_dims)
_Indexer = collections.namedtuple("_Indexer", [
# The expected shape of the slice output.
"slice_shape",
# The slice shape to pass to lax.gather().
"gather_slice_shape",
# The gather indices to use.
"gather_indices",
# A GatherDimensionNumbers object describing the gather to perform.
"dnums",
# Slice dimensions that have negative strides, and so must be reversed after
# the gather.
"reversed_y_dims",
# Keep track of any axes created by `newaxis`. These must be inserted for
# gathers and eliminated for scatters.
"newaxis_dims",
])
def _split_index_for_jit(idx):
"""Splits indices into necessarily-static and dynamic parts.
Used to pass indices into `jit`-ted function.
"""
# Convert list indices to tuples in cases (deprecated by NumPy.)
idx = _eliminate_deprecated_list_indexing(idx)
# Expand any (concrete) boolean indices. We can then use advanced integer
# indexing logic to handle them.
idx = _expand_bool_indices(idx)
leaves, treedef = tree_flatten(idx)
dynamic = [None] * len(leaves)
static = [None] * len(leaves)
for i, x in enumerate(leaves):
if x is Ellipsis:
static[i] = x
elif isinstance(x, slice):
# slice objects aren't hashable.
static[i] = (x.start, x.stop, x.step)
else:
dynamic[i] = x
return treedef, tuple(static), dynamic
def _merge_static_and_dynamic_indices(treedef, static_idx, dynamic_idx):
"""Recombines indices that were split by _split_index_for_jit."""
idx = []
for s, d in zip(static_idx, dynamic_idx):
if d is not None:
idx.append(d)
elif isinstance(s, tuple):
idx.append(slice(s[0], s[1], s[2]))
else:
idx.append(s)
return treedef.unflatten(idx)
def _int(aval):
return not aval.shape and issubdtype(aval.dtype, integer)
def _index_to_gather(x_shape, idx):
# Remove ellipses and add trailing slice(None)s.
idx = _canonicalize_tuple_index(len(x_shape), idx)
# Check for advanced indexing:
# https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
# Do the advanced indexing axes appear contiguously? If not, NumPy semantics
# move the advanced axes to the front.
advanced_axes_are_contiguous = False
advanced_indexes = None
# The positions of the advanced indexing axes in `idx`.
idx_advanced_axes = []
# The positions of the advanced indexes in x's shape.
# collapsed, after None axes have been removed. See below.
x_advanced_axes = None
if _is_advanced_int_indexer(idx):
idx_no_nones = [(i, d) for i, d in enumerate(idx) if d is not None]
advanced_pairs = (
(asarray(e), i, j) for j, (i, e) in enumerate(idx_no_nones)
if isscalar(e) or isinstance(e, (Sequence, ndarray)))
advanced_pairs = ((_normalize_index(e, x_shape[j]), i, j)
for e, i, j in advanced_pairs)
advanced_indexes, idx_advanced_axes, x_advanced_axes = zip(*advanced_pairs)
advanced_axes_are_contiguous = np.all(np.diff(idx_advanced_axes) == 1)
x_axis = 0 # Current axis in x.
y_axis = 0 # Current axis in y, before collapsing. See below.
collapsed_y_axis = 0 # Current axis in y, after collapsing.
# Scatter dimension numbers.
offset_dims = []
collapsed_slice_dims = []
start_index_map = []
use_64bit_index = _any([type(d) is Poly or d >= (1 << 31) for d in x_shape])
index_dtype = int64 if use_64bit_index else int32
gather_indices = np.zeros((0,), dtype=index_dtype) # use np to save a compilation
# We perform three transformations to y before the scatter op, in order:
# First, y is broadcast to slice_shape. In general `y` only need broadcast to
# the right shape.
slice_shape = []
# Next, y is squeezed to remove newaxis_dims. This removes np.newaxis/`None`
# indices, which the scatter cannot remove itself.
newaxis_dims = []
# Finally, we reverse reversed_y_dims to handle slices with negative strides.
reversed_y_dims = []
gather_slice_shape = []
for idx_pos, i in enumerate(idx):
# Handle the advanced indices here if:
# * the advanced indices were not contiguous and we are the start.
# * we are at the position of the first advanced index.
if (advanced_indexes is not None and
(advanced_axes_are_contiguous and idx_pos == idx_advanced_axes[0] or
not advanced_axes_are_contiguous and idx_pos == 0)):
advanced_indexes = broadcast_arrays(*advanced_indexes)
shape = advanced_indexes[0].shape
ndim = len(shape)
advanced_indexes = [
lax.convert_element_type(lax.reshape(a, shape + (1,)), index_dtype)
for a in advanced_indexes]
# Broadcast gather_indices from [..., k] to [..., 1, 1, ..., 1, k].
gather_indices = lax.broadcast_in_dim(
gather_indices, np.insert(gather_indices.shape, -1, shape),
tuple(range(gather_indices.ndim - 1)) + (gather_indices.ndim + ndim - 1,))
gather_indices = concatenate([gather_indices] + advanced_indexes, -1)
start_index_map.extend(x_advanced_axes)
collapsed_slice_dims.extend(x_advanced_axes)
slice_shape.extend(shape)
y_axis += ndim
collapsed_y_axis += ndim
# Per-index bookkeeping for advanced indexes.
if idx_pos in idx_advanced_axes:
x_axis += 1
gather_slice_shape.append(1)
continue
try:
abstract_i = core.get_aval(i)
except TypeError:
abstract_i = None
# Handle basic int indexes.
if isinstance(abstract_i, (ConcreteArray,ShapedArray)) and _int(abstract_i):
if x_shape[x_axis] == 0:
# XLA gives error when indexing into an axis of size 0
raise IndexError(f"index is out of bounds for axis {x_axis} with size 0")
i = _normalize_index(i, x_shape[x_axis])
if type(i) is Poly:
# dummy index if i is polynomial, doesn't matter for shape inference
# TODO(mattjj,j-towns,juliuskunze): revise this logic
i = 0
i = lax.convert_element_type(i, index_dtype)
i = broadcast_to(i, tuple(gather_indices.shape[:-1]) + (1,))
gather_indices = concatenate((gather_indices, i), -1)
collapsed_slice_dims.append(x_axis)
gather_slice_shape.append(1)
start_index_map.append(x_axis)
x_axis += 1
# Handle np.newaxis (None)
elif i is None:
slice_shape.append(1)
newaxis_dims.append(y_axis)
y_axis += 1
# Handle slice(None)
elif _is_slice_none(i):
slice_shape.append(x_shape[x_axis])
gather_slice_shape.append(x_shape[x_axis])
offset_dims.append(collapsed_y_axis)
collapsed_y_axis += 1
y_axis += 1
x_axis += 1
# Handle slice index (only static, otherwise an error is raised)
elif isinstance(i, slice):
if not _all(elt is None or type(elt) is Poly
or type(core.get_aval(elt)) is ConcreteArray
for elt in (i.start, i.stop, i.step)):
msg = ("Array slice indices must have static start/stop/step to be used "
"with NumPy indexing syntax. To index a statically sized "
"array at a dynamic position, try lax.dynamic_slice/"
"dynamic_update_slice (JAX does not support dynamically sized "
"arrays within JIT compiled functions).")
raise IndexError(msg)
start, limit, stride, needs_rev = _static_idx(i, x_shape[x_axis])
if needs_rev:
reversed_y_dims.append(collapsed_y_axis)
if stride == 1:
i = lax.convert_element_type(start, index_dtype)
i = broadcast_to(i, tuple(gather_indices.shape[:-1]) + (1,))
gather_indices = concatenate((gather_indices, i), -1)
slice_shape.append(limit - start)
gather_slice_shape.append(limit - start)
offset_dims.append(collapsed_y_axis)
start_index_map.append(x_axis)
else:
i = arange(start, limit, stride, dtype=index_dtype)
size = i.shape[0]
slice_shape.append(size)
gather_slice_shape.append(1)
gather_indices_shape = tuple(gather_indices.shape[:-1]) + (size,)
i = lax.broadcast_in_dim(
i, shape=gather_indices_shape + (1,),
broadcast_dimensions=(len(gather_indices_shape) - 1,))
gather_indices = lax.broadcast_in_dim(
gather_indices,
shape=gather_indices_shape + (len(start_index_map),),
broadcast_dimensions=(
tuple(range(len(gather_indices_shape) - 1)) +
(len(gather_indices_shape),)))
gather_indices = concatenate(
(gather_indices, i), len(gather_indices_shape))
start_index_map.append(x_axis)
collapsed_slice_dims.append(x_axis)
collapsed_y_axis += 1
y_axis += 1
x_axis += 1
else:
if (abstract_i is not None and
not (issubdtype(abstract_i.dtype, integer) or issubdtype(abstract_i.dtype, bool_))):
msg = ("Indexer must have integer or boolean type, got indexer "
"with type {} at position {}, indexer value {}")
raise TypeError(msg.format(abstract_i.dtype.name, idx_pos, i))
msg = "Indexing mode not yet supported. Open a feature request!\n{}"
raise IndexError(msg.format(idx))
dnums = lax.GatherDimensionNumbers(
offset_dims = tuple(offset_dims),
collapsed_slice_dims = tuple(sorted(collapsed_slice_dims)),
start_index_map = tuple(start_index_map)
)
return _Indexer(
slice_shape=slice_shape,
newaxis_dims=tuple(newaxis_dims),
gather_slice_shape=gather_slice_shape,
reversed_y_dims=reversed_y_dims,
dnums=dnums,
gather_indices=gather_indices)
def _should_unpack_list_index(x):
"""Helper for _eliminate_deprecated_list_indexing."""
return (isinstance(x, ndarray) and np.ndim(x) != 0
or isinstance(x, (Sequence, slice))
or x is Ellipsis or x is None)
def _eliminate_deprecated_list_indexing(idx):
# "Basic slicing is initiated if the selection object is a non-array,
# non-tuple sequence containing slice objects, [Ellipses, or newaxis
# objects]". Detects this and raises a TypeError.
if not isinstance(idx, tuple):
if isinstance(idx, Sequence) and not isinstance(idx, ndarray):
# As of numpy 1.16, some non-tuple sequences of indices result in a warning, while
# others are converted to arrays, based on a set of somewhat convoluted heuristics
# (See https://github.com/numpy/numpy/blob/v1.19.2/numpy/core/src/multiarray/mapping.c#L179-L343)
# In JAX, we raise an informative TypeError for *all* non-tuple sequences.
if _any(_should_unpack_list_index(i) for i in idx):
msg = ("Using a non-tuple sequence for multidimensional indexing is not allowed; "
"use `arr[tuple(seq)]` instead of `arr[seq]`. "
"See https://github.com/google/jax/issues/4564 for more information.")
else:
msg = ("Using a non-tuple sequence for multidimensional indexing is not allowed; "
"use `arr[array(seq)]` instead of `arr[seq]`. "
"See https://github.com/google/jax/issues/4564 for more information.")
raise TypeError(msg)
else:
idx = (idx,)
return idx
def _expand_bool_indices(idx):
"""Converts concrete bool indexes into advanced integer indexes."""
out = []
for i in idx:
try:
abstract_i = core.get_aval(i)
except TypeError:
abstract_i = None
if (isinstance(abstract_i, ShapedArray) and issubdtype(abstract_i.dtype, bool_)
or isinstance(i, list) and _all(not _shape(e) and issubdtype(_dtype(e), bool_)
for e in i)):
if isinstance(i, list):
i = array(i)
abstract_i = core.get_aval(i)
if not type(abstract_i) is ConcreteArray:
# TODO(mattjj): improve this error by tracking _why_ the indices are not
# concrete
raise IndexError("Array boolean indices must be concrete.")
else:
out.extend(np.where(i))
else:
out.append(i)
return tuple(out)
def _is_slice_none(idx):
"""Return True if idx is equal to slice(None), False otherwise."""
if isinstance(idx, slice):
return idx.start is None and idx.stop is None and idx.step is None
# TODO(mattjj): clean up this logic
def _is_advanced_int_indexer(idx):
"""Returns True if idx should trigger int array indexing, False otherwise."""
# https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
assert isinstance(idx, tuple)
if _all(np.ndim(elt) == 0 for elt in idx):
return False
return _all(e is None or e is Ellipsis or isinstance(e, slice)
or _is_int_arraylike(e) for e in idx)
def _is_int_arraylike(x):
"""Returns True if x is array-like with integer dtype, False otherwise."""
return (isinstance(x, int) and not isinstance(x, bool)
or issubdtype(getattr(x, "dtype", None), np.integer)
or isinstance(x, (list, tuple)) and _all(_is_int_arraylike(e) for e in x))
def _canonicalize_tuple_index(arr_ndim, idx):
"""Helper to remove Ellipsis and add in the implicit trailing slice(None)."""
len_without_none = _sum(1 for e in idx if e is not None and e is not Ellipsis)
if len_without_none > arr_ndim:
msg = "Too many indices for array: {} non-None/Ellipsis indices for dim {}."
raise IndexError(msg.format(len_without_none, arr_ndim))
ellipses = (i for i, elt in enumerate(idx) if elt is Ellipsis)
ellipsis_index = next(ellipses, None)
if ellipsis_index is not None:
if next(ellipses, None) is not None:
msg = "Multiple ellipses (...) not supported: {}."
raise IndexError(msg.format(list(map(type, idx))))
colons = (slice(None),) * (arr_ndim - len_without_none)
idx = idx[:ellipsis_index] + colons + idx[ellipsis_index + 1:]
elif len_without_none < arr_ndim:
colons = (slice(None),) * (arr_ndim - len_without_none)
idx = tuple(idx) + colons
return idx
def _polymorphic_slice_indices(idx: slice, size: Union[int, Poly]):
# like idx.indices(size), but allows for polymorphic indices and size
# see https://github.com/python/cpython/blob/6d6508765514c7c10719478a0430f5e47c9a96ac/Objects/sliceobject.c#L372
assert isinstance(idx, slice)
step = 1 if idx.step is None else idx.step
step_is_negative = step < 0
lower = -1 if step_is_negative else 0
upper = size + lower
def sanitize(index, default):
if index is None:
return default
elif type(index) is Poly:
return index
elif index < 0:
return _max(index + size, lower)
else:
return _min(index, upper)
start = sanitize(idx.start, default=upper if step_is_negative else lower)
stop = sanitize(idx.stop, default=lower if step_is_negative else upper)
return start, stop, step
def _static_idx(idx: slice, size: Union[int, Poly]):
"""Helper function to compute the static slice start/limit/stride values."""
if _any(type(s) is Poly for s in (idx.start, idx.stop, idx.step, size)):
start, stop, step = _polymorphic_slice_indices(idx, size)
elif isinstance(size, int):
start, stop, step = idx.indices(size)
else:
raise TypeError(size)
if type(start) is not Poly and type(stop) is not Poly:
if (step < 0 and stop >= start) or (step > 0 and start >= stop):
return 0, 0, 1, False # sliced to size zero
if step > 0:
return start, stop, step, False
else:
k = (start - stop - 1) % (-step)
return stop + k + 1, start + 1, -step, True
blackman = _wrap_numpy_nullary_function(np.blackman)
bartlett = _wrap_numpy_nullary_function(np.bartlett)
hamming = _wrap_numpy_nullary_function(np.hamming)
hanning = _wrap_numpy_nullary_function(np.hanning)
# TODO: lower `kaiser` via lax to allow non-constant beta values.
kaiser = _wrap_numpy_nullary_function(np.kaiser)
def _gcd_cond_fn(xs):
x1, x2 = xs
return any(x2 != 0)
def _gcd_body_fn(xs):
x1, x2 = xs
x1, x2 = (where(x2 != 0, x2, x1),
where(x2 != 0, lax.rem(x1, x2), lax._const(x2, 0)))
return (where(x1 < x2, x2, x1), where(x1 < x2, x1, x2))
@_wraps(getattr(np, "gcd", None))
def gcd(x1, x2):
_check_arraylike("gcd", x1, x2)
if (not issubdtype(_dtype(x1), integer) or
not issubdtype(_dtype(x2), integer)):
raise ValueError("Arguments to jax.numpy.gcd must be integers.")
x1, x2 = _promote_dtypes(x1, x2)
x1, x2 = broadcast_arrays(x1, x2)
gcd, _ = lax.while_loop(_gcd_cond_fn, _gcd_body_fn, (abs(x1), abs(x2)))
return gcd
@_wraps(getattr(np, "lcm", None))
def lcm(x1, x2):
_check_arraylike("lcm", x1, x2)
x1, x2 = _promote_dtypes(x1, x2)
d = gcd(x1, x2)
return where(d == 0, lax._const(d, 0),
abs(multiply(x1, floor_divide(x2, d))))
@_wraps(np.extract)
def extract(condition, arr):
return compress(ravel(condition), ravel(arr))
@_wraps(np.compress)
def compress(condition, a, axis=None, out=None):
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.compress is not supported.")
if ndim(condition) != 1:
raise ValueError("condition must be a 1D array")
condition = asarray(condition).astype(bool)
a = asarray(a)
if axis is None:
axis = 0
a = ravel(a)
else:
a = moveaxis(a, axis, 0)
condition, extra = condition[:a.shape[0]], condition[a.shape[0]:]
if any(extra):
raise ValueError("condition contains entries that are out of bounds")
a = a[:condition.shape[0]]
return moveaxis(a[condition], 0, axis)
@_wraps(np.cov)
def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
aweights=None):
if y is not None: raise NotImplementedError(
"jax.numpy.cov not implemented for nontrivial y. "
"Open a feature request at https://github.com/google/jax/issues !")
m, = _promote_args_inexact("cov", m)
if m.ndim > 2:
raise ValueError("m has more than 2 dimensions") # same as numpy error
X = atleast_2d(m)
if not rowvar and X.shape[0] != 1:
X = X.T
if X.shape[0] == 0:
return array([]).reshape(0, 0)
if ddof is None:
ddof = 1 if bias == 0 else 0
w = None
if fweights is not None:
_check_arraylike("cov", fweights)
if ndim(fweights) > 1:
raise RuntimeError("cannot handle multidimensional fweights")
if shape(fweights)[0] != X.shape[1]:
raise RuntimeError("incompatible numbers of samples and fweights")
if not issubdtype(_dtype(fweights), integer):
raise TypeError("fweights must be integer.")
# Ensure positive fweights; note that numpy raises an error on negative fweights.
w = asarray(abs(fweights))
if aweights is not None:
_check_arraylike("cov", aweights)
if ndim(aweights) > 1:
raise RuntimeError("cannot handle multidimensional aweights")
if shape(aweights)[0] != X.shape[1]:
raise RuntimeError("incompatible numbers of samples and aweights")
# Ensure positive aweights: note that numpy raises an error for negative aweights.
aweights = abs(aweights)
w = aweights if w is None else w * aweights
avg, w_sum = average(X, axis=1, weights=w, returned=True)
w_sum = w_sum[0]
if w is None:
f = X.shape[1] - ddof
elif ddof == 0:
f = w_sum
elif aweights is None:
f = w_sum - ddof
else:
f = w_sum - ddof * sum(w * aweights) / w_sum
X = X - avg[:, None]
X_T = X.T if w is None else (X * w).T
return true_divide(dot(X, X_T.conj()), f).squeeze()
@_wraps(np.corrcoef)
def corrcoef(x, y=None, rowvar=True):
_check_arraylike("corrcoef", x)
c = cov(x, y, rowvar)
if len(shape(c)) == 0:
# scalar - this should yield nan for values (nan/nan, inf/inf, 0/0), 1 otherwise
return divide(c, c)
d = diag(c)
stddev = sqrt(real(d))
c = divide(c, stddev[:,None])
c = divide(c, stddev[None,:])
real_part = clip(real(c), -1, 1)
if iscomplexobj(c):
complex_part = clip(imag(c), -1, 1)
c = lax.complex(real_part, complex_part)
else:
c = real_part
return c
@_wraps(getattr(np, "quantile", None))
def quantile(a, q, axis=None, out=None, overwrite_input=False,
interpolation="linear", keepdims=False):
_check_arraylike("quantile", a, q)
if overwrite_input or out is not None:
msg = ("jax.numpy.quantile does not support overwrite_input=True or "
"out != None")
raise ValueError(msg)
return _quantile(a, q, axis, interpolation, keepdims, False)
@_wraps(getattr(np, "nanquantile", None))
def nanquantile(a, q, axis=None, out=None, overwrite_input=False,
interpolation="linear", keepdims=False):
_check_arraylike("nanquantile", a, q)
if overwrite_input or out is not None:
msg = ("jax.numpy.nanquantile does not support overwrite_input=True or "
"out != None")
raise ValueError(msg)
return _quantile(a, q, axis, interpolation, keepdims, True)
@partial(jit, static_argnums=(2, 3, 4, 5))
def _quantile(a, q, axis, interpolation, keepdims, squash_nans):
if interpolation not in ["linear", "lower", "higher", "midpoint", "nearest"]:
raise ValueError("interpolation can only be 'linear', 'lower', 'higher', "
"'midpoint', or 'nearest'")
a = asarray(a, dtype=promote_types(_dtype(a), float32))
q = asarray(q, dtype=promote_types(_dtype(q), float32))
if axis is None:
a = ravel(a)
axis = 0
elif isinstance(axis, tuple):
raise NotImplementedError("Tuple values for axis are not implemented")
else:
axis = _canonicalize_axis(axis, ndim(a))
q_shape = shape(q)
q_ndim = ndim(q)
if q_ndim > 1:
raise ValueError("q must be have rank <= 1, got shape {}".format(shape(q)))
a_shape = shape(a)
a = lax.sort(a, dimension=axis)
if squash_nans:
counts = sum(logical_not(isnan(a)), axis=axis, dtype=q.dtype,
keepdims=keepdims)
shape_after_reduction = counts.shape
q = lax.expand_dims(
q, tuple(range(q_ndim, len(shape_after_reduction) + q_ndim)))
counts = lax.expand_dims(counts, tuple(range(q_ndim)))
q = lax.mul(q, lax.sub(counts, _constant_like(q, 1)))
low = lax.floor(q)
high = lax.ceil(q)
high_weight = lax.sub(q, low)
low_weight = lax.sub(_constant_like(high_weight, 1), high_weight)
low = lax.max(_constant_like(low, 0), lax.min(low, counts - 1))
high = lax.max(_constant_like(high, 0), lax.min(high, counts - 1))
low = lax.convert_element_type(low, int64)
high = lax.convert_element_type(high, int64)
out_shape = q_shape + shape_after_reduction
index = [lax.broadcasted_iota(int64, out_shape, dim + q_ndim)
for dim in range(len(shape_after_reduction))]
if keepdims:
index[axis] = low
else:
index.insert(axis, low)
low_value = a[tuple(index)]
index[axis] = high
high_value = a[tuple(index)]
else:
n = a_shape[axis]
q = lax.mul(q, _constant_like(q, n - 1))
low = lax.floor(q)
high = lax.ceil(q)
high_weight = lax.sub(q, low)
low_weight = lax.sub(_constant_like(high_weight, 1), high_weight)
low = lax.clamp(_constant_like(low, 0), low, _constant_like(low, n - 1))
high = lax.clamp(_constant_like(high, 0), high, _constant_like(high, n - 1))
low = lax.convert_element_type(low, int64)
high = lax.convert_element_type(high, int64)
slice_sizes = list(a_shape)
slice_sizes[axis] = 1
dnums = lax.GatherDimensionNumbers(
offset_dims=tuple(range(
q_ndim,
len(a_shape) + q_ndim if keepdims else len(a_shape) + q_ndim - 1)),
collapsed_slice_dims=() if keepdims else (axis,),
start_index_map=(axis,))
low_value = lax.gather(a, low[..., None], dimension_numbers=dnums,
slice_sizes=slice_sizes)
high_value = lax.gather(a, high[..., None], dimension_numbers=dnums,
slice_sizes=slice_sizes)
if q_ndim == 1:
low_weight = lax.broadcast_in_dim(low_weight, low_value.shape,
broadcast_dimensions=(0,))
high_weight = lax.broadcast_in_dim(high_weight, high_value.shape,
broadcast_dimensions=(0,))
if interpolation == "linear":
result = lax.add(lax.mul(low_value.astype(q.dtype), low_weight),
lax.mul(high_value.astype(q.dtype), high_weight))
elif interpolation == "lower":
result = low_value
elif interpolation == "higher":
result = high_value
elif interpolation == "nearest":
pred = lax.le(high_weight, _constant_like(high_weight, 0.5))
result = lax.select(pred, low_value, high_value)
elif interpolation == "midpoint":
result = lax.mul(lax.add(low_value, high_value), _constant_like(low_value, 0.5))
else:
raise ValueError(f"interpolation={interpolation!r} not recognized")
return lax.convert_element_type(result, a.dtype)
@partial(jit, static_argnums=2)
@partial(vectorize, excluded={0, 2})
def _searchsorted(a, v, side):
if len(a) == 0:
return 0
op = operator.le if side == 'left' else operator.lt
def body_fun(i, state):
low, high = state
mid = (low + high) // 2
go_left = op(v, a[mid])
return (where(go_left, low, mid), where(go_left, mid, high))
n_levels = int(np.ceil(np.log2(len(a) + 1)))
return lax.fori_loop(0, n_levels, body_fun, (0, len(a)))[1]
@_wraps(np.searchsorted)
def searchsorted(a, v, side='left', sorter=None):
if side not in ['left', 'right']:
raise ValueError(f"{side!r} is an invalid value for keyword 'side'")
if sorter is not None:
raise NotImplementedError("sorter is not implemented")
a = asarray(a)
v = asarray(v)
if ndim(a) != 1:
raise ValueError("a should be 1-dimensional")
return _searchsorted(a, v, side)
@_wraps(np.digitize)
def digitize(x, bins, right=False):
if len(bins) == 0:
return zeros(x, dtype=dtypes.canonicalize_dtype(int_))
side = 'right' if not right else 'left'
return where(
bins[-1] >= bins[0],
searchsorted(bins, x, side=side),
len(bins) - searchsorted(bins[::-1], x, side=side)
)
_PIECEWISE_DOC = """\
Unlike `np.piecewise`, :py:func:`jax.numpy.piecewise` requires functions in
`funclist` to be traceable by JAX, as it is implemeted via :func:`jax.lax.switch`.
See the :func:`jax.lax.switch` documentation for more information.
"""
@_wraps(np.piecewise, lax_description=_PIECEWISE_DOC)
def piecewise(x, condlist, funclist, *args, **kw):
_check_arraylike("piecewise", x)
condlist = array(condlist, dtype=bool_)
nc, nf = len(condlist), len(funclist)
if nf == nc + 1:
funclist = funclist[-1:] + funclist[:-1]
elif nf == nc:
funclist = [0] + list(funclist)
else:
raise ValueError(f"with {nc} condition(s), either {nc} or {nc+1} functions are expected; got {nf}")
indices = argmax(cumsum(vstack([zeros_like(condlist[:1]), condlist]), 0), 0)
dtype = _dtype(x)
def _call(f):
return lambda x: f(x, *args, **kw).astype(dtype)
def _const(v):
return lambda x: full_like(x, v)
funclist = [_call(f) if callable(f) else _const(f) for f in funclist]
return vectorize(lax.switch, excluded=(1,))(indices, funclist, x)
@_wraps(np.percentile)
def percentile(a, q, axis=None, out=None, overwrite_input=False,
interpolation="linear", keepdims=False):
_check_arraylike("percentile", a)
q = true_divide(asarray(q), float32(100.0))
return quantile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,
interpolation=interpolation, keepdims=keepdims)
@_wraps(np.nanpercentile)
def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
interpolation="linear", keepdims=False):
_check_arraylike("nanpercentile", a)
q = true_divide(asarray(q), float32(100.0))
return nanquantile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,
interpolation=interpolation, keepdims=keepdims)
@_wraps(np.median)
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
_check_arraylike("median", a)
return quantile(a, 0.5, axis=axis, out=out, overwrite_input=overwrite_input,
keepdims=keepdims, interpolation='midpoint')
@_wraps(np.nanmedian)
def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=False):
_check_arraylike("nanmedian", a)
return nanquantile(a, 0.5, axis=axis, out=out,
overwrite_input=overwrite_input, keepdims=keepdims,
interpolation='midpoint')
def _astype(arr, dtype):
lax._check_user_dtype_supported(dtype, "astype")
return lax.convert_element_type(arr, dtype)
def _nbytes(arr):
return size(arr) * _dtype(arr).itemsize
def _view(arr, dtype=None, type=None):
lax._check_user_dtype_supported(dtype, "view")
if type is not None:
raise NotImplementedError("`type` argument of array.view()")
if dtype is None:
return arr
arr_dtype = _dtype(arr)
if arr_dtype == dtype:
return arr
# bool is implemented as lax:PRED, which is not compatible with lax.bitcast_convert_type.
# We work around this by casting bool to uint8.
if arr_dtype == bool_:
arr = arr.astype(uint8)
nbits_in = 8 * arr_dtype.itemsize
nbits_out = 8 * _dtype(dtype).itemsize
if nbits_in == nbits_out:
if dtype == bool_:
return lax.bitcast_convert_type(arr, uint8).astype(dtype)
return lax.bitcast_convert_type(arr, dtype)
if nbits_out > nbits_in and (shape(arr)[-1] * nbits_in) % nbits_out != 0:
raise ValueError("When changing to a larger dtype, its size must be a divisor "
"of the total size in bytes of the last axis of the array.")
byte_dtypes = {8: uint8, 16: uint16, 32: uint32, 64: uint64}
if nbits_in not in byte_dtypes:
raise NotImplementedError(f"arr.view() for arr.dtype={arr_dtype}")
if nbits_out not in byte_dtypes:
raise NotImplementedError(f"arr.view(dtype) for dtype={dtype}")
dt_in = byte_dtypes[nbits_in]
dt_out = byte_dtypes[nbits_out]
arr_bytes = lax.bitcast_convert_type(arr, dt_in)
if nbits_in < nbits_out:
shifts = arange(0, nbits_out, nbits_in, dtype=dt_out)
arr_bytes = arr_bytes.reshape(arr.shape[:-1] + (-1, nbits_out // nbits_in)).astype(dt_out)
arr_bytes = (arr_bytes << shifts).sum(-1).astype(dt_out)
else:
shifts = arange(0, nbits_in, nbits_out, dtype=dt_in)
arr_bytes = ((arr_bytes[..., newaxis] >> shifts) & iinfo(dt_out).max).astype(dt_out)
arr_bytes = arr_bytes.reshape(arr_bytes.shape[:-2] + (-1,))
if dtype == bool_:
return lax.bitcast_convert_type(arr_bytes, uint8).astype(dtype)
return lax.bitcast_convert_type(arr_bytes, dtype)
### track unimplemented functions
_NOT_IMPLEMENTED_DESC = """
*** This function is not yet implemented by jax.numpy, and will raise NotImplementedError ***
"""
def _not_implemented(fun):
@_wraps(fun, update_doc=False, lax_description=_NOT_IMPLEMENTED_DESC)
def wrapped(*args, **kwargs):
msg = "Numpy function {} not yet implemented"
raise NotImplementedError(msg.format(fun))
return wrapped
### add method and operator overloads to arraylike classes
# We add operator overloads to DeviceArray and ShapedArray. These method and
# operator overloads mainly just forward calls to the corresponding lax_numpy
# functions, which can themselves handle instances from any of these classes.
_scalar_types = (int, float, complex, np.generic)
def _defer_to_unrecognized_arg(binary_op):
# Ensure that other array types have the chance to override arithmetic.
def deferring_binary_op(self, other):
if not isinstance(other, _scalar_types + _arraylike_types + (core.Tracer,)):
return NotImplemented
return binary_op(self, other)
return deferring_binary_op
def _swap_args(f):
return lambda x, y: f(y, x)
def _unimplemented_setitem(self, i, x):
msg = ("'{}' object does not support item assignment. JAX arrays are "
"immutable; perhaps you want jax.ops.index_update or "
"jax.ops.index_add instead?")
raise TypeError(msg.format(type(self)))
def _operator_round(number, ndigits=None):
out = round(number, decimals=ndigits or 0)
# If `ndigits` is None, for a builtin float round(7.5) returns an integer.
return out.astype(int) if ndigits is None else out
_operators = {
"getitem": _rewriting_take,
"setitem": _unimplemented_setitem,
"neg": negative,
"pos": positive,
"eq": _defer_to_unrecognized_arg(equal),
"ne": _defer_to_unrecognized_arg(not_equal),
"lt": _defer_to_unrecognized_arg(less),
"le": _defer_to_unrecognized_arg(less_equal),
"gt": _defer_to_unrecognized_arg(greater),
"ge": _defer_to_unrecognized_arg(greater_equal),
"abs": abs,
"add": _defer_to_unrecognized_arg(add),
"radd": _defer_to_unrecognized_arg(add),
"sub": _defer_to_unrecognized_arg(subtract),
"rsub": _defer_to_unrecognized_arg(_swap_args(subtract)),
"mul": _defer_to_unrecognized_arg(multiply),
"rmul": _defer_to_unrecognized_arg(multiply),
"div": _defer_to_unrecognized_arg(divide),
"rdiv": _defer_to_unrecognized_arg(_swap_args(divide)),
"truediv": _defer_to_unrecognized_arg(true_divide),
"rtruediv": _defer_to_unrecognized_arg(_swap_args(true_divide)),
"floordiv": _defer_to_unrecognized_arg(floor_divide),
"rfloordiv": _defer_to_unrecognized_arg(_swap_args(floor_divide)),
"divmod": _defer_to_unrecognized_arg(divmod),
"rdivmod": _defer_to_unrecognized_arg(_swap_args(divmod)),
"mod": _defer_to_unrecognized_arg(mod),
"rmod": _defer_to_unrecognized_arg(_swap_args(mod)),
"pow": _defer_to_unrecognized_arg(power),
"rpow": _defer_to_unrecognized_arg(_swap_args(power)),
"matmul": _defer_to_unrecognized_arg(matmul),
"rmatmul": _defer_to_unrecognized_arg(_swap_args(matmul)),
"and": _defer_to_unrecognized_arg(bitwise_and),
"rand": _defer_to_unrecognized_arg(bitwise_and),
"or": _defer_to_unrecognized_arg(bitwise_or),
"ror": _defer_to_unrecognized_arg(bitwise_or),
"xor": _defer_to_unrecognized_arg(bitwise_xor),
"rxor": _defer_to_unrecognized_arg(bitwise_xor),
"invert": bitwise_not,
"lshift": _defer_to_unrecognized_arg(left_shift),
"rshift": _defer_to_unrecognized_arg(right_shift),
"rlshift": _defer_to_unrecognized_arg(_swap_args(left_shift)),
"rrshift": _defer_to_unrecognized_arg(_swap_args(right_shift)),
"round": _operator_round,
}
# These numpy.ndarray methods are just refs to an equivalent numpy function
_nondiff_methods = ["all", "any", "argmax", "argmin", "argpartition", "argsort",
"nonzero", "searchsorted", "round"]
_diff_methods = ["clip", "conj", "conjugate", "cumprod", "cumsum",
"diagonal", "dot", "max", "mean", "min", "prod", "ptp",
"ravel", "repeat", "sort", "squeeze", "std", "sum",
"swapaxes", "take", "tile", "trace", "transpose", "var"]
# These methods are mentioned explicitly by nondiff_methods, so we create
# _not_implemented implementations of them here rather than in __init__.py.
# TODO(phawkins): implement these.
argpartition = _not_implemented(np.argpartition)
_NOT_IMPLEMENTED = ['argpartition']
# Set up operator, method, and property forwarding on Tracer instances containing
# ShapedArray avals by following the forwarding conventions for Tracer.
# Forward operators using a single-underscore-prefix naming convention:
for operator_name, function in _operators.items():
setattr(ShapedArray, "_{}".format(operator_name), staticmethod(function))
# Forward methods and properties using core.aval_method and core.aval_property:
for method_name in _nondiff_methods + _diff_methods:
setattr(ShapedArray, method_name, core.aval_method(globals()[method_name]))
setattr(ShapedArray, "reshape", core.aval_method(_reshape_method))
setattr(ShapedArray, "flatten", core.aval_method(ravel))
setattr(ShapedArray, "T", core.aval_property(transpose))
setattr(ShapedArray, "real", core.aval_property(real))
setattr(ShapedArray, "imag", core.aval_property(imag))
setattr(ShapedArray, "astype", core.aval_method(_astype))
setattr(ShapedArray, "view", core.aval_method(_view))
setattr(ShapedArray, "nbytes", core.aval_property(_nbytes))
# Forward operators, methods, and properties on DeviceArray to lax_numpy
# functions (with no Tracers involved; this forwarding is direct)
for device_array in [_DeviceArray, _CppDeviceArray]:
for operator_name, function in _operators.items():
setattr(device_array, "__{}__".format(operator_name), function)
for method_name in _nondiff_methods + _diff_methods:
setattr(device_array, method_name, globals()[method_name])
setattr(device_array, "reshape", _reshape_method)
setattr(device_array, "flatten", ravel)
setattr(device_array, "T", property(transpose))
setattr(device_array, "real", property(real))
setattr(device_array, "imag", property(imag))
setattr(device_array, "astype", _astype)
setattr(device_array, "view", _view)
setattr(device_array, "nbytes", property(_nbytes))
# Experimental support for NumPy's module dispatch with NEP-37.
# Currently requires https://github.com/seberg/numpy-dispatch
_JAX_ARRAY_TYPES = (DeviceArray, core.Tracer)
_HANDLED_ARRAY_TYPES = _JAX_ARRAY_TYPES + (np.ndarray,)
def __array_module__(self, types):
if builtins.all(issubclass(t, _HANDLED_ARRAY_TYPES) for t in types):
return jax.numpy
else:
return NotImplemented
setattr(ShapedArray, "_array_module", staticmethod(__array_module__))
setattr(_DeviceArray, "__array_module__", __array_module__)
setattr(_CppDeviceArray, "__array_module__", __array_module__)
# Extra methods that are handy
setattr(ShapedArray, "broadcast", core.aval_method(lax.broadcast))
setattr(ShapedArray, "broadcast_in_dim", core.aval_method(lax.broadcast_in_dim))
setattr(ShapedArray, "split", core.aval_method(split))
for device_array in [_DeviceArray, _CppDeviceArray]:
setattr(device_array, "broadcast", lax.broadcast)
setattr(device_array, "broadcast_in_dim", lax.broadcast_in_dim)
setattr(device_array, "split", split)
def _compress_method(a, condition, axis=None, out=None):
return compress(condition, a, axis, out)
setattr(ShapedArray, "compress", _compress_method)
setattr(_DeviceArray, "compress", _compress_method)
setattr(_CppDeviceArray, "compress", _compress_method)
@partial(jit, static_argnums=(1,2,3))
def _multi_slice(arr,
start_indices: Tuple[Tuple[int, ...]],
limit_indices: Tuple[Tuple[int, ...]],
removed_dims: Tuple[Tuple[int, ...]]):
"""Extracts multiple slices from `arr`.
This is used to shard DeviceArray arguments to pmap. It's implemented as a
DeviceArray method here to avoid circular imports.
"""
results = []
for starts, limits, removed in safe_zip(start_indices, limit_indices, removed_dims):
sliced = lax.slice(arr, starts, limits)
if removed:
sliced = sliced.reshape(np.delete(sliced.shape, removed_dims))
results.append(sliced)
return results
setattr(_DeviceArray, "_multi_slice", _multi_slice)
setattr(_CppDeviceArray, "_multi_slice", _multi_slice)
# Syntactic sugar for scatter operations.
class _IndexUpdateHelper:
# Note: this docstring will appear as the docstring for the `at` property.
"""Indexable helper object to call indexed update functions.
The `at` property is syntactic sugar for calling the indexed update functions
defined in :mod:`jax.ops`, and acts as a pure equivalent of in-place
modificatons.
In particular:
- ``x = x.at[idx].set(y)`` is a pure equivalent of ``x[idx] = y``.
- ``x = x.at[idx].add(y)`` is a pure equivalent of ``x[idx] += y``.
- ``x = x.at[idx].mul(y)`` is a pure equivalent of ``x[idx] *= y``.
- ``x = x.at[idx].min(y)`` is a pure equivalent of
``x[idx] = minimum(x[idx], y)``.
- ``x = x.at[idx].max(y)`` is a pure equivalent of
``x[idx] = maximum(x[idx], y)``.
"""
__slots__ = ("array",)
def __init__(self, array):
self.array = array
def __getitem__(self, index):
return _IndexUpdateRef(self.array, index)
def __repr__(self):
return f"_IndexUpdateHelper({repr(self.array)})"
class _IndexUpdateRef:
"""Helper object to call indexed update functions for an (advanced) index.
This object references a source array and a specific indexer into that array.
Methods on this object return copies of the source array that have been
modified at the positions specified by the indexer.
"""
__slots__ = ("array", "index")
def __init__(self, array, index):
self.array = array
self.index = index
def __repr__(self):
return f"_IndexUpdateRef({repr(self.array)}, {repr(self.index)})"
def set(self, values, indices_are_sorted=False, unique_indices=False):
"""Pure equivalent of ``x[idx] = y``.
``x.at[idx].set(y)`` is syntactic sugar for
``jax.ops.index_update(x, jax.ops.index[idx], y)``, and
returns the value of ``x`` that would result from the NumPy-style
:mod:indexed assignment <numpy.doc.indexing>` ``x[idx] = y``.
See :mod:`jax.ops` for details.
"""
return ops.index_update(self.array, self.index, values,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
def add(self, values, indices_are_sorted=False, unique_indices=False):
"""Pure equivalent of ``x[idx] += y``.
``x.at[idx].add(y)`` is syntactic sugar for
``jax.ops.index_add(x, jax.ops.index[idx], y)``, and
returns the value of ``x`` that would result from the NumPy-style
:mod:indexed assignment <numpy.doc.indexing>` ``x[idx] += y``.
See :mod:`jax.ops` for details.
"""
return ops.index_add(self.array, self.index, values,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
def mul(self, values, indices_are_sorted=False, unique_indices=False):
"""Pure equivalent of ``x[idx] += y``.
``x.at[idx].mul(y)`` is syntactic sugar for
``jax.ops.index_mul(x, jax.ops.index[idx], y)``, and
returns the value of ``x`` that would result from the NumPy-style
:mod:indexed assignment <numpy.doc.indexing>` ``x[idx] *= y``.
See :mod:`jax.ops` for details.
"""
return ops.index_mul(self.array, self.index, values,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
def min(self, values, indices_are_sorted=False, unique_indices=False):
"""Pure equivalent of ``x[idx] = minimum(x[idx], y)``.
``x.at[idx].min(y)`` is syntactic sugar for
``jax.ops.index_min(x, jax.ops.index[idx], y)``, and
returns the value of ``x`` that would result from the NumPy-style
:mod:indexed assignment <numpy.doc.indexing>`
``x[idx] = minimum(x[idx], y)``.
See :mod:`jax.ops` for details.
"""
return ops.index_min(self.array, self.index, values,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
def max(self, values, indices_are_sorted=False, unique_indices=False):
"""Pure equivalent of ``x[idx] = maximum(x[idx], y)``.
``x.at[idx].max(y)`` is syntactic sugar for
``jax.ops.index_max(x, jax.ops.index[idx], y)``, and
returns the value of ``x`` that would result from the NumPy-style
:mod:indexed assignment <numpy.doc.indexing>`
``x[idx] = maximum(x[idx], y)``.
See :mod:`jax.ops` for details.
"""
return ops.index_max(self.array, self.index, values,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
setattr(_DeviceArray, "at", property(_IndexUpdateHelper))
setattr(_CppDeviceArray, "at", property(_IndexUpdateHelper))
setattr(ShapedArray, "at", core.aval_property(_IndexUpdateHelper))
|
[] |
[] |
[
"JAX_NUMPY_RANK_PROMOTION"
] |
[]
|
["JAX_NUMPY_RANK_PROMOTION"]
|
python
| 1 | 0 | |
main.py
|
"""
main.py: Main code to drive LSC-CNN
Authors : svp & dbs
"""
import argparse
import random
from data_reader import DataReader
import matplotlib
from matplotlib import pyplot as plt
import cv2
import numpy as np
import os
import random, string
import math
import pickle
from collections import OrderedDict
import torch
from torch import nn as nn, optim as optim
from torch.autograd import Variable
import datetime
from error_function import offset_sum
from scipy.misc import imsave, imresize
from utils import apply_nms
from network import LSCCNN
from utils.logging_tools import *
from utils.loss_weights import *
################ Architecture Hyper-parameters ################
# PRED_DOWNSCALE_FACTORS is the set of integer factors indicating how much to
# downscale the dimensions of the ground truth prediction for each scale output.
# Note that the data reader under default settings creates prediction maps at
# one-half resolution (wrt input sizes) and hence PRED_DOWNSCALE_FACTORS =
# (8, 4, 2, 1) translates to 1/16, 1/8, 1/4 and 1/2 prediction sizes (s={0,1,2,3}).
PRED_DOWNSCALE_FACTORS = (8, 4, 2, 1)
# Size increments for the box sizes (\gamma) as mentioned in the paper.
GAMMA = [1, 1, 2, 4]
# Number of predefined boxes per scales (n_{mathcal{B}}).
NUM_BOXES_PER_SCALE = 3
# Loss Weights (to be read from .npy file while training)
loss_weights = None
###############################################################
# ---- Computing predefined box sizes and global variables
BOX_SIZE_BINS = [1]
BOX_IDX = [0]
g_idx = 0
while len(BOX_SIZE_BINS) < NUM_BOXES_PER_SCALE * len(PRED_DOWNSCALE_FACTORS):
gamma_idx = len(BOX_SIZE_BINS) // (len(GAMMA)-1)
box_size = BOX_SIZE_BINS[g_idx] + GAMMA[gamma_idx]
box_idx = gamma_idx*(NUM_BOXES_PER_SCALE+1) + (len(BOX_SIZE_BINS) % (len(GAMMA)-1))
BOX_IDX.append(box_idx)
BOX_SIZE_BINS.append(box_size)
g_idx += 1
BOX_INDEX = dict(zip(BOX_SIZE_BINS, BOX_IDX))
SCALE_BINS_ON_BOX_SIZE_BINS = [NUM_BOXES_PER_SCALE * (s + 1) \
for s in range(len(GAMMA))]
BOX_SIZE_BINS_NPY = np.array(BOX_SIZE_BINS)
BOXES = np.reshape(BOX_SIZE_BINS_NPY, (4, 3))
BOXES = BOXES[::-1]
metrics = ['loss1', 'new_mae']
# ----
matplotlib.use('Agg')
parser = argparse.ArgumentParser(description='PyTorch LSC-CNN Training')
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--gpu', default=1, type=int,
help='GPU number')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts),\
0-indexed - so equal to the number of epochs completed \
in the last save-file')
parser.add_argument('-b', '--batch-size', default=4, type=int, metavar='N',
help='mini-batch size (default: 4),only used for train')
parser.add_argument('--patches', default=100, type=int, metavar='N',
help='number of patches per image')
parser.add_argument('--dataset', default="parta", type=str,
help='dataset to train on')
parser.add_argument('--lr', '--learning-rate', default=1e-3, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float,
metavar='M', help='momentum')
parser.add_argument('--threshold', default=-1.0, type=float,
metavar='M', help='fixed threshold to do NMS')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W',
help='weight decay (default: 1e-4)')
parser.add_argument('--mle', action='store_true',
help='calculate mle')
parser.add_argument('--lsccnn', action='store_true',
help='use the vgg_modified network')
parser.add_argument('--trained-model', default='', type=str, metavar='PATH', help='filename of model to load', nargs='+')
dataset_paths, model_save_dir, batch_size, crop_size, dataset = None, None, None, None, None
class networkFunctions():
def __init__(self):
self.train_funcs = []
self.test_funcs = None
self.optimizers = None
'''
Get N channel ground truth for each scale. (Here N = 4 except for WIDERFACE)
B1, B2, B3, Z - Bi's are Box GT and Z is the background i.e
if there is not GT in any of the scales.
Parameters
-----------
Yss (list of torch cuda tensor)
bool_masks (list of torch cuda tensor) - Used only while training
mode (string) - To specify if the fn. is called at test/train time.
Returns
-------
Yss_out (list of torch cuda tensor)
'''
def get_box_gt(self, Yss):
Yss_out = []
for yss in Yss: # iterate over all scales!
# Make empty maps of shape gt_pred_map.shape for x, y, w, h
w_map = np.zeros((yss.shape[0], 4) + yss.shape[2:]) # (B,4,h,w)
w_map[:, 3] = 1 # Making Z initialized as 1's since they are in majority!
Yss_out.append(w_map)
assert(len(Yss_out) == 4)
# Get largest spatial gt
yss_np = Yss[0].cpu().data.numpy()
gt_ref_map = yss_np # (B, 1, h, w)
# For every gt patch from the gt_ref_map
for b in range(0, gt_ref_map.shape[0]):
y_idx, x_idx = np.where(gt_ref_map[b][0] > 0)
num_heads = y_idx.shape[0]
if num_heads > 1:
distances = (x_idx - x_idx[np.newaxis, :].T) ** 2 + (y_idx - y_idx[np.newaxis, :].T) ** 2
min_distances = np.sqrt(np.partition(distances, 1, axis=1)[:, 1])
min_distances = np.minimum(min_distances, np.inf) ##? WHY INF???
box_inds = np.digitize(min_distances, BOX_SIZE_BINS_NPY, False)
box_inds = np.maximum(box_inds - 1, 0) # to make zero based indexing
elif num_heads == 1:
box_inds = np.array([BOX_SIZE_BINS_NPY.shape[0] - 1])
else:
box_inds = np.array([])
assert(np.all(box_inds < BOX_SIZE_BINS_NPY.shape[0]))
scale_inds = np.digitize(box_inds, SCALE_BINS_ON_BOX_SIZE_BINS, False)
# Assign the w_maps
check_sum = 0
for i, (yss, w_map) in enumerate(zip(Yss, Yss_out)):
scale_sel_inds = (scale_inds == i)
check_sum += np.sum(scale_sel_inds)
if scale_sel_inds.shape[0] > 0:
# find box index in the scale
sel_box_inds = box_inds[scale_sel_inds]
scale_box_inds = sel_box_inds % 3
heads_y = y_idx[scale_sel_inds] // PRED_DOWNSCALE_FACTORS[3-i]
heads_x = x_idx[scale_sel_inds] // PRED_DOWNSCALE_FACTORS[3-i]
Yss_out[i][b, scale_box_inds, heads_y, heads_x] = BOX_SIZE_BINS_NPY[sel_box_inds]
Yss_out[i][b, 3, heads_y, heads_x] = 0
assert(check_sum == torch.sum(Yss[0][b]).item() == len(y_idx))
Yss_out = [torch.cuda.FloatTensor(w_map) for w_map in Yss_out]
check_sum = 0
for yss_out in Yss_out:
yss_out_argmax, _ = torch.max(yss_out[:, 0:3], dim=1)
yss_out_argmax = (yss_out_argmax>0).type(torch.cuda.FloatTensor)
check_sum += torch.sum(yss_out_argmax).item()
yss = (Yss[0]>0).type(torch.cuda.FloatTensor)
assert(torch.sum(yss) == check_sum)
return Yss_out
'''
This function upsamples given tensor by a factor but make sures there is no repetition
of values. Basically when upsampling by a factor of 2, there are 3 new places created. This fn.
instead of repeating the values, marks them 1.
Caveat : this function currently supports upsample by factor=2 only. For power of 2, use it
multiple times. This doesn't support factors other than powers of 2
Input - input (torch tensor) - A binary map denoting where the head is present. (Bx4xHxW)
factor (int) - factor by which you need to upsample
Output - output (torch tensor) - Upsampled and non-repeated output (Bx4xH'xW')
H' - upsampled height
W' - upsampled width
'''
def upsample_single(self, input_, factor=2):
channels = input_.size(1)
indices = torch.nonzero(input_)
indices_up = indices.clone()
# Corner case!
if indices_up.size(0) == 0:
return torch.zeros(input_.size(0),input_.size(1), input_.size(2)*factor, input_.size(3)*factor).cuda()
indices_up[:, 2] *= factor
indices_up[:, 3] *= factor
output = torch.zeros(input_.size(0),input_.size(1), input_.size(2)*factor, input_.size(3)*factor).cuda()
output[indices_up[:, 0], indices_up[:, 1], indices_up[:, 2], indices_up[:, 3]] = input_[indices[:, 0], indices[:, 1], indices[:, 2], indices[:, 3]]
output[indices_up[:, 0], channels-1, indices_up[:, 2]+1, indices_up[:, 3]] = 1.0
output[indices_up[:, 0], channels-1, indices_up[:, 2], indices_up[:, 3]+1] = 1.0
output[indices_up[:, 0], channels-1, indices_up[:, 2]+1, indices_up[:, 3]+1] = 1.0
output_check = nn.functional.max_pool2d(output, kernel_size=2)
return output
'''
This function implements the GWTA loss in which it
divides the pred and gt into grids and calculates
loss on each grid and returns the maximum of the losses.
input : pred (torch.cuda.FloatTensor) - Bx4xHxW - prediction from the network
gt (torch.cuda.FloatTensor) - BxHxW - Ground truth points
criterion - criterion to take the loss between pred and gt
grid_factor (int) - the image would be divided in 2^grid_factor number of patches for takeing WTA loss
output : max_loss (torch.FloatTensor) - Maximum of the grid losses
'''
def gwta_loss(self, pred, gt, criterion, grid_factor=2):
num_grids = np.square(grid_factor)
patch_size = pred.size(3) / grid_factor
stride = int(patch_size.item())
pred_re = pred.unfold(2, int(patch_size.item()), stride).unfold(3, int(patch_size.item()), stride)
gt_re = gt.unfold(1, int(patch_size.item()), stride).unfold(2, int(patch_size.item()), stride)
pred_re = torch.reshape(pred, (pred.size(0), pred.size(1), num_grids, patch_size, patch_size)) # Bx4xnum_gridxH'XW'
gt_re = torch.reshape(gt, (gt.size(0), num_grids, patch_size, patch_size)) # Bxnum_gridxH'XW'
max_loss = -float("inf")
for ng in range(num_grids):
out = pred_re[:, :, ng]
yss = gt_re[:, ng]
curr_loss = criterion(out, yss)
if curr_loss > max_loss:
max_loss = curr_loss
return max_loss
'''
Create network functions i.e train and test functions
for LSC-CNN.
Parameters
-----------
network: (torch model)torch model to train.
Here len(network == 1)
Returns
---------
train_funcs: list of train function for each of the network in
network
test_funcs: list of test function for each of the network in
network
'''
def create_network_functions(self, network):
self.optimizers = optim.SGD(filter(lambda p: p.requires_grad, network.parameters()),
lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
'''
Train function for LSC-CNN, with GWTA Loss
and scale-wise weighting.
Parameters
-----------
Xs - (ndarray) Batched images
Ys - (ndarray) Batched Ground truth of largest scale
Returns
---------
losses: (list of float) list of loss values of each scale.
hist_boxes: (list) histogram of boxes of predictions
hist_boxes_gt: (list) histogram of boxes of gt.
'''
def train_function(Xs, Ys, hist_boxes, hist_boxes_gt, loss_weights, network):
Ys = (Ys>0).astype(np.float32)
network = network.cuda()
self.optimizers.zero_grad()
if torch.cuda.is_available():
X = torch.autograd.Variable(torch.from_numpy(Xs)).cuda()
Y = torch.autograd.Variable(torch.FloatTensor(Ys)).cuda()
Yss = [Y]
else:
assert(0)
for s in range(0, 3):
Yss.append(torch.nn.functional.avg_pool2d(Yss[s], (2, 2)) * 4)
output_vars = [network(X, None)]
outputs_1 = [out for out in output_vars[0]]
Yss_out = self.get_box_gt(Yss) # Making 4 channel ground truth
Yss = Yss[::-1] # Reverse GT for uniformity of having lowest scale in the beginning
Yss_out = Yss_out[::-1] # Reverse pred for uniformity of having lowest scale in the beginning
# Put outputs in list
outputs = [out for out in output_vars[0]]
losses = []
sums = []
Yss_argmax = [torch.argmax(yss, dim=1) for yss in Yss_out]
alpha1 = torch.cuda.FloatTensor(loss_weights[3]) # 1/16 scale
alpha2 = torch.cuda.FloatTensor(loss_weights[2]) # 1/8 scale
alpha3 = torch.cuda.FloatTensor(loss_weights[1]) # 1/4 scale
alpha4 = torch.cuda.FloatTensor(loss_weights[0]) # 1/2 scale
m_1 = nn.CrossEntropyLoss(size_average=True, weight=alpha1)
m_2 = nn.CrossEntropyLoss(size_average=True, weight=alpha2)
m_3 = nn.CrossEntropyLoss(size_average=True, weight=alpha3)
m_4 = nn.CrossEntropyLoss(size_average=True, weight=alpha4)
loss = 0.0
'''
GWTA Loss
'''
for idx, (m, out, yss) in enumerate(zip([m_1, m_2, m_3, m_4], outputs, Yss_argmax)):
if idx != 0:
loss_ = self.gwta_loss(out, yss, m, grid_factor=np.power(2, idx))
else:
loss_ = m(out, yss)
loss += loss_
losses.append(loss_.item())
loss.backward()
self.optimizers.step()
# -- Histogram of boxes for weighting --
for out_idx, (out, yss) in enumerate(zip(outputs[::-1], Yss_out[::-1])):
out_argmax = torch.argmax(out, dim=1)
bin_ = np.bincount(out_argmax.cpu().data.numpy().flatten())
ii = np.nonzero(bin_)[0]
hist_boxes[ii+4*out_idx] += bin_[ii]
Yss_argmax = torch.argmax(yss, dim=1)
bin_gt = np.bincount(Yss_argmax.cpu().data.numpy().flatten())
ii_gt = np.nonzero(bin_gt)[0]
hist_boxes_gt[ii_gt+4*out_idx] += bin_gt[ii_gt]
return losses, hist_boxes, hist_boxes_gt
'''
Test function for LSC-CNN.
Parameters
-----------
X - (np.ndarray) Image patches (Bx3XHxW)
Y - (np.ndarray) Ground truth in highest scale (BX1XHXW)
Returns
---------
losses: (list of float) list of loss values of each scale.
upsample_pred: (list) list of torch tensor predictions for each scale ([Bx4xHxW] * number of scales)
upscaled to the prediction scale
upsample_gt: (list) list of torch tensor gt for each scale ([Bx4xHxW] * number of scales)
upscaled to the prediction scale
NOTE: Here 4 denotes the number of channels in prediction. In LSC-CNN 4 represents
[b_1, b_2, b_3, z] where b_i are boxes and z is the background.
'''
def test_function(X, Y, loss_weights, network):
Y = (Y>0).astype(np.float32)
if torch.cuda.is_available():
X = torch.autograd.Variable(torch.from_numpy(X)).cuda()
X_clone = X.clone()
Y = torch.autograd.Variable(torch.from_numpy(Y)).cuda()
Yss = [Y]
else:
assert(0)
network = network.cuda()
output = network(X, None)
for s in range(0, 3):
Yss.append(torch.nn.functional.avg_pool2d(Yss[s], (2, 2)) * 4)
assert(torch.sum(Yss[0]) == torch.sum(Yss[1]))
# Making 4 channel ground truth
Yss_out = self.get_box_gt(Yss)
Yss = Yss[::-1]
Yss_out = Yss_out[::-1]
Yss_argmax = [torch.argmax(yss, dim=1) for yss in Yss_out]
alpha1 = torch.cuda.FloatTensor(loss_weights[3]) # 1/16 scale
alpha2 = torch.cuda.FloatTensor(loss_weights[2]) # 1/8 scale
alpha3 = torch.cuda.FloatTensor(loss_weights[1]) # 1/4 scale
alpha4 = torch.cuda.FloatTensor(loss_weights[0]) # 1/2 scale
m_1 = nn.CrossEntropyLoss(size_average=True, weight=alpha1)
m_2 = nn.CrossEntropyLoss(size_average=True, weight=alpha2)
m_3 = nn.CrossEntropyLoss(size_average=True, weight=alpha3)
m_4 = nn.CrossEntropyLoss(size_average=True, weight=alpha4)
loss = 0.0
for (out, yss, m) in zip(output, Yss_argmax, [m_1, m_2, m_3, m_4]):
loss += m(out, yss)
out_softmax = [nn.functional.softmax(o, dim=1) for o in output]
out_argmax = [torch.argmax(o, dim=1) for o in out_softmax]
upsample_max = int(np.log2(16 // output_downscale))
upsample_gt = []
upsample_pred = []
for idx, (yss_out, out) in enumerate(zip(Yss_out, output)):
out = nn.functional.softmax(out, dim=1)
upsample_yss_out = yss_out
upsample_out = out
for n in range(upsample_max-idx):
upsample_yss_out = self.upsample_single(upsample_yss_out, factor=2)
upsample_out = self.upsample_single(upsample_out, factor=2)
upsample_gt.append(upsample_yss_out.cpu().data.numpy())
upsample_pred.append(upsample_out.cpu().data.numpy())
return loss.data, upsample_pred, upsample_gt
self.train_funcs.append(train_function)
self.test_funcs = test_function
return self.train_funcs, self.test_funcs
'''
This loads the model for training from ImageNet weights
initialization for VGG backbone.
Parameters
-----------
net: (torch model) network
dont_load: (list) list of layers, for which weights
should not be loaded.
Returns
---------
Returns nothing. The weights are replaced inplace.
'''
def load_model(net, dont_load=[]):
if 'scale_4' in net.name:
cfg = OrderedDict()
cfg['conv1_1'] = 0
cfg['conv1_2'] = 2
cfg['conv2_1'] = 5
cfg['conv2_2'] = 7
cfg['conv3_1'] = 10
cfg['conv3_2'] = 12
cfg['conv3_3'] = 14
cfg['conv4_1'] = 17
cfg['conv4_2'] = 19
cfg['conv4_3'] = 22
cfg['conv_middle_1'] = 'conv4_1'
cfg['conv_middle_2'] = 'conv4_2'
cfg['conv_middle_3'] = 'conv4_3'
cfg['conv_lowest_1'] = 'conv3_1'
cfg['conv_lowest_2'] = 'conv3_2'
cfg['conv_lowest_3'] = 'conv3_3'
cfg['conv_scale1_1'] = 'conv2_1'
cfg['conv_scale1_2'] = 'conv2_2'
print ('loading model ', net.name)
base_dir = "../imagenet_vgg_weights/"
layer_copy_count = 0
for layer in cfg.keys():
if layer in dont_load:
print (layer, 'skipped.')
continue
print ("Copying ", layer)
for name, module in net.named_children():
if layer == name and (not layer.startswith("conv_middle_")) and (not layer.startswith("conv_lowest_") and (not layer.startswith("conv_scale1_"))):
lyr = module
W = np.load(base_dir + layer + "W.npy")
b = np.load(base_dir + layer + "b.npy")
lyr.weight.data.copy_(torch.from_numpy(W))
lyr.bias.data.copy_(torch.from_numpy(b))
layer_copy_count += 1
elif (layer.startswith("conv_middle_") or layer.startswith("conv_lowest_")) and name == layer:
lyr = module
W = np.load(base_dir + cfg[layer] + "W.npy")
b = np.load(base_dir + cfg[layer] + "b.npy")
lyr.weight.data.copy_(torch.from_numpy(W))
lyr.bias.data.copy_(torch.from_numpy(b))
layer_copy_count += 1
print(layer_copy_count, "Copy count")
assert layer_copy_count == 16
print ('Done.')
'''
Function to get localization error (alias offset error)
Parameters
-----------
x_pred: (list) list of x-coordinates of prediction
y_pred: (list) list of y-coordinates of prediction
x_true: (list) list of x-coordinates of gt
y_true: (list) list of y-coordinates of gt
output_downscale: (int) scale in which LSC-CNN predicts
max_dist: (int, default=16) maximum distance beyond
which there's a penalty
NOTE: MLE is ALWAYS calculated in 1x scale i.e
scale of the input image and hence multiplication
with "output_downscale"
Returns
----------
off_err; (float) localization error
avg_precision: (float) average precision
avd_recall: (float) avg_recall
'''
def get_offset_error(x_pred, y_pred, x_true, y_true, output_downscale, max_dist=16):
if max_dist is None:
max_dist = 16
n = len(x_true)
m = len(x_pred)
if m == 0 or n == 0:
return 0
x_true *= output_downscale
y_true *= output_downscale
x_pred *= output_downscale
y_pred *= output_downscale
dx = np.expand_dims(x_true, 1) - x_pred
dy = np.expand_dims(y_true, 1) - y_pred
d = np.sqrt(dx ** 2 + dy ** 2)
assert d.shape == (n, m)
sorted_idx = np.asarray(np.unravel_index(np.argsort(d.ravel()), d.shape))
# Need to divide by n for average error
hit_thresholds = np.arange(12, -1, -1)
off_err, num_hits, fn = offset_sum(sorted_idx, d, n, m, max_dist, hit_thresholds, len(hit_thresholds))
off_err /= n
precisions = np.asarray(num_hits, dtype='float32') / m
recall = np.asarray(num_hits, dtype='float32') / ( np.asarray(num_hits, dtype='float32') + np.asarray(fn, dtype='float32'))
avg_precision = precisions.mean()
avg_recall = recall.mean()
return off_err, avg_precision, avg_recall
'''
Draws bounding box on predictions of LSC-CNN
Parameters
----------
image: (ndarray:HXWX3) input image
h_map: (HXW) map denoting height of the box
w_map: (HXW) map denoting width of the box
gt_pred_map: (HXW) binary map denoting points of prediction
prediction_downscale: (int) scale in which LSC-CNN predicts.
thickness: (int) thickness of bounding box
multi_colours: (bool) If True, plots different colours for different scales
Returns
----------
boxed_img: image with bounding boxes plotted
'''
def get_boxed_img(image, h_map, w_map, gt_pred_map, prediction_downscale, thickness=1, multi_colours=False):
if multi_colours:
colours = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (0, 255, 255)] # colours for [1/8, 1/4, 1/2] scales
if image.shape[2] != 3:
boxed_img = image.astype(np.uint8).transpose((1, 2, 0)).copy()
else:
boxed_img = image.astype(np.uint8).copy()
head_idx = np.where(gt_pred_map > 0)
H, W = boxed_img.shape[:2]
Y, X = head_idx[-2] , head_idx[-1]
for y, x in zip(Y, X):
h, w = h_map[y, x]*prediction_downscale, w_map[y, x]*prediction_downscale
if multi_colours:
selected_colour = colours[(BOX_SIZE_BINS.index(h // prediction_downscale)) // 3]
else:
selected_colour = (0, 255, 0)
if h//2 in BOXES[3] or h//2 in BOXES[2]:
t = 1
else:
t = thickness
cv2.rectangle(boxed_img, (max(int(prediction_downscale * x - w / 2), 0), max(int(prediction_downscale * y - h / 2), 0)),
(min(int(prediction_downscale * x + w - w / 2), W), min(int(prediction_downscale * y + h - h / 2), H)), selected_colour, t)
return boxed_img.transpose((2, 0, 1))
'''
Testing function for LSC-CNN.
Parameters
-----------
test_funcs: (python function) function to test the images
(returns 4 channel output [b_1, b_2, b_3, z] for gt and prediction)
dataset: (Object) DataReader Object
set_name: (string) sets the name for dataset to test on - either test or train
print_output: (bool) Dumps gt and predictions if True
Returns
----------
metrics_test: (dict) Dictionary of metrics
txt: (string) metrics in string format to log
'''
def test_lsccnn(test_funcs, dataset, set_name, network, print_output=False, thresh=0.2):
test_functions = []
global test_loss
global counter
test_loss = 0.
counter = 0.
metrics_test = {}
metrics_ = ['new_mae', 'mle', 'mse', 'loss1']
for k in metrics_:
metrics_test[k] = 0.0
loss_weights = np.ones((len(PRED_DOWNSCALE_FACTORS), NUM_BOXES_PER_SCALE+1))
def test_function(img_batch, gt_batch, roi_batch):
global test_loss
global counter
gt_batch = (gt_batch > 0).astype(np.float32)
loss, pred_batch, gt_batch = test_funcs(img_batch, gt_batch, loss_weights, network)
test_loss += loss
counter += 1
return (*pred_batch), (*gt_batch)
if isinstance(print_output, str):
print_path = print_output
elif isinstance(print_output, bool) and print_output:
print_path = './models/dump'
else:
print_path = None
e = dataset.iterate_over_test_data(test_function, set_name)
for e_idx, e_iter in enumerate(e):
image_split = e_iter[1].split('/')
image_name = image_split[len(image_split)-1]
image = cv2.imread(e_iter[1])
maps = [(image, {}),
(e_iter[2], {'cmap': 'jet', 'vmin': 0., 'vmax': 1.})]
pred_dot_map, pred_box_map = get_box_and_dot_maps(e_iter[0][0:4], thresh=thresh) # prediction_downscale
# -- Plotting boxes
boxed_image_pred = get_boxed_img(image, pred_box_map, pred_box_map, \
pred_dot_map, prediction_downscale=2, \
thickness=2, multi_colours=False)
boxed_image_pred_path = os.path.join(print_path, image_name + '_boxed_image.png')
cv2.imwrite(boxed_image_pred_path, boxed_image_pred.astype(np.uint8).transpose((1, 2, 0)))
print_graph(maps, "", os.path.join(print_path, image_name))
# -- Calculate metrics
metrics_test = calculate_metrics(pred_dot_map, e_iter[2], metrics_test)
for m in metrics_:
metrics_test[m] /= float(e_idx+1)
metrics_test['mse'] = np.sqrt(metrics_test['mse'])
metrics_test['loss1'] = test_loss / float(counter)
txt = ''
for metric in metrics_test.keys():
if metric == "mle" and (args.mle == False):
continue
txt += '%s: %s ' % (metric, metrics_test[metric])
return metrics_test, txt
'''
This function calculates the various counting and localization metrics
Parameters
----------
pred: dot map prediction of LSC-CNN (HxW)
true: ground truth map (HxW)
metrics_test: dictionary of metrics
Returns
----------
metrics_test: updated dictionary of metrics
'''
def calculate_metrics(pred, true, metrics_test):
pred_count = np.sum(pred)
true_count = np.sum(true)
head_x_true, head_y_true = np.where(pred > 0)[-2:]
head_x_pred, head_y_pred = np.where(true > 0)[-2:]
if args.mle:
if len(head_x_pred) == 0:
off = 16*len(head_y_pred)
else:
off, _, _ = get_offset_error(head_x_pred, head_y_pred, head_x_true, head_y_true, output_downscale)
metrics_test['mle'] += off
metrics_test['new_mae'] += np.abs(true_count - pred_count)
metrics_test['mse'] += (true_count - pred_count) ** 2
return metrics_test
'''
This function finds the optimal threshold on the validation set.
Parameters
----------
f: (file object) file writer
iters: Number of iterations to run the binary search
test_funcs: lsccnn test function
splits: number of splits to the range of thresholds
beg: beginning threshold
end: ending threshold
Returns
----------
optimal_threshold: optimal threshold where the mae is
lowest on validation set.
'''
def find_class_threshold(f, iters, test_funcs, network, splits=10, beg=0.0, end=0.3):
for li_idx in range(iters):
avg_errors = []
threshold = list(np.arange(beg, end, (end - beg) / splits))
log(f, 'threshold:'+str(threshold))
for class_threshold in threshold:
avg_error = test_lsccnn(test_funcs, dataset, 'test_valid', network, True, thresh=class_threshold)
avg_errors.append(avg_error[0]['new_mae'])
log(f, "class threshold: %f, avg_error: %f" % (class_threshold, avg_error[0]['new_mae']))
mid = np.asarray(avg_errors).argmin()
beg = threshold[max(mid - 2, 0)]
end = threshold[min(mid + 2, splits - 1)]
log(f, "Best threshold: %f" % threshold[mid])
optimal_threshold = threshold[mid]
return optimal_threshold
'''
This function performs box NMS on the predictions of the net.
Parameters
----------
predictions: multiscale predictions - list of numpy maps
each map is of size 4 x H x W
Returns
----------
nms_out: Binary map of where the prediction person is
box_out: Size of the box at the predicted dot
NOTE: count(nms_out) == count(box_out)
'''
def box_NMS(predictions, thresh):
Scores = []
Boxes = []
for k in range(len(BOXES)):
scores = np.max(predictions[k], axis=0)
boxes = np.argmax(predictions[k], axis=0)
# index the boxes with BOXES to get h_map and w_map (both are the same for us)
mask = (boxes<3) # removing Z
boxes = (boxes+1) * mask
scores = (scores * mask) # + 100 # added 100 since we take logsoftmax and it's negative!!
boxes = (boxes==1)*BOXES[k][0] + (boxes==2)*BOXES[k][1] + (boxes==3)*BOXES[k][2]
Scores.append(scores)
Boxes.append(boxes)
x, y, h, w, scores = apply_nms.apply_nms(Scores, Boxes, Boxes, 0.5, thresh=thresh)
nms_out = np.zeros((predictions[0].shape[1], predictions[0].shape[2])) # since predictions[0] is of size 4 x H x W
box_out = np.zeros((predictions[0].shape[1], predictions[0].shape[2])) # since predictions[0] is of size 4 x H x W
for (xx, yy, hh) in zip(x, y, h):
nms_out[yy, xx] = 1
box_out[yy, xx] = hh
assert(np.count_nonzero(nms_out) == len(x))
return nms_out, box_out
"""
A function to return dotmaps and box maps of either gt
or predictions. In case of predictions, it would be NMSed
output and in case of gt maps, it would be would be from each
individual scale.
Parameters
----------
pred: list of ndarray (currently MUST be of length 3
- each for one scale)
Returns
----------
nms_out: dot map of NMSed output of the given predictions.
h: box map of NMSed output
"""
def get_box_and_dot_maps(pred, thresh):
assert(len(pred) == 4)
all_dot_maps = []
all_box_maps = []
# NMS on the multi-scale outputs
nms_out, h = box_NMS(pred, thresh)
return nms_out, h
'''
Main training code for LSC-CNN.
Parameters
-----------
network : (torch model) network. In this case len(network) == 1
dataset: (class object) data_reader class object
network_function: (class) network_functions() class object to get test and train
functions.
log_path: (str) path to log losses and stats.
Returns
----------
This method does not return anything. It directly logs all the losses,
metrics and statistics of training/validation/testing stages.
'''
def train_networks(network, dataset, network_functions, log_path):
snapshot_path = os.path.join(log_path, 'snapshots')
f = open(os.path.join(log_path, 'train0.log'), 'w')
# -- Logging Parameters
log(f, 'args: ' + str(args))
log(f, 'model: ' + str(network), False)
log(f, 'Training0...')
log(f, 'LR: %.12f.' % (args.lr))
log(f, 'Classification Model')
# -- Get train, test functions
train_funcs, test_funcs = network_functions.create_network_functions(network)
start_epoch = args.start_epoch
num_epochs = args.epochs
valid_losses = {}
test_losses = {}
train_losses = {}
for metric in ['loss1', 'new_mae']:
valid_losses[metric] = []
test_losses[metric] = []
for metric in ['loss1']:
train_losses[metric] = []
batch_size = args.batch_size
num_train_images = len(dataset.dataset_files['train'])
num_patches_per_image = args.patches
num_batches_per_epoch = num_patches_per_image * num_train_images // batch_size
if start_epoch > 0:
with open(os.path.join(snapshot_path, 'losses.pkl'), 'rb') as lossfile:
train_losses, valid_losses, test_losses = pickle.load(lossfile, encoding='latin1')
print ('loaded prev losses')
for metric in metrics:
try:
valid_losses[metric] = valid_losses[metric][:start_epoch]
except:
pass
test_losses[metric] = test_losses[metric][:start_epoch]
for metric in train_losses.keys():
train_losses[metric] = train_losses[metric][:start_epoch]
network, _= load_net(network,
network_functions, 0,
snapshot_path,
get_filename(\
network.name,
start_epoch))
# -- Main Training Loop
if os.path.isfile("loss_weights.npy"):
loss_weights = np.load('loss_weights.npy')
else:
loss_weights = np.ones((4, 4))
HIST_GT = []
for e_i, epoch in enumerate(range(start_epoch, num_epochs)):
avg_loss = [0.0 for _ in range(1)]
hist_boxes = np.zeros((16,))
hist_boxes_gt = np.zeros((16,))
# b_i - batch index
for b_i in range(num_batches_per_epoch):
# Generate next training sample
Xs, Ys, _ = dataset.train_get_batch()
losses, hist_boxes, hist_boxes_gt = train_funcs[0](Xs, Ys, hist_boxes, hist_boxes_gt, loss_weights, network)
for scale_idx in range(1):
avg_loss[scale_idx] = avg_loss[scale_idx] + losses[scale_idx]
# Logging losses after 1k iterations.
if b_i % 1000 == 0:
log(f, 'Epoch %d [%d]: %s loss: %s.' % (epoch, b_i, [network.name], losses))
log(f, 'hist_boxes %s.' % (np.array_str(np.int32(hist_boxes))))
log(f, 'hist_boxes_gt %s.' % (np.array_str(np.int32(hist_boxes_gt))))
hist_boxes = np.zeros((16,))
hist_boxes_gt = np.zeros((16,))
HIST_GT.append(hist_boxes_gt)
if np.all(loss_weights == 1):
HIST_GT = np.asarray(HIST_GT)
HIST_GT = np.sum(HIST_GT, axis=0)
HIST_GT = np.reshape(HIST_GT, (4, 4))
loss_weights = compute_box_weights(HIST_GT)
np.save('loss_weights.npy', loss_weights)
print("Saving loss weights!! PLEASE re-run the code for training/testing")
exit()
# -- Stats update
avg_loss = [al / num_batches_per_epoch for al in avg_loss]
avg_loss = [av for av in avg_loss]
train_losses['loss1'].append(avg_loss)
epoch_test_losses, txt = test_lsccnn(test_funcs, dataset, 'test', network, True)
log(f, 'TEST epoch: ' + str(epoch) + ' ' + txt)
epoch_val_losses, txt = test_lsccnn(test_funcs, dataset, 'test_valid', network, True)
log(f, 'TEST valid epoch: ' + str(epoch) + ' ' + txt)
for metric in ['loss1', 'new_mae']:
valid_losses[metric].append(epoch_val_losses[metric])
test_losses[metric].append(epoch_test_losses[metric])
# Save networks
save_checkpoint({
'epoch': epoch + 1,
'state_dict': network.state_dict(),
'optimizer': network_functions.optimizers.state_dict(),
}, snapshot_path, get_filename(network.name, epoch + 1))
print ('saving graphs...')
with open(os.path.join(snapshot_path, 'losses.pkl'), 'wb') as lossfile:
pickle.dump((train_losses, valid_losses, test_losses), lossfile, protocol=2)
for metric in train_losses.keys():
if "maxima_split" not in metric:
if isinstance(train_losses[metric][0], list):
for i in range(len(train_losses[metric][0])):
plt.plot([a[i] for a in train_losses[metric]])
plt.savefig(os.path.join(snapshot_path, 'train_%s_%d.png' % (metric, i)))
plt.clf()
plt.close()
print(metric, "METRIC", train_losses[metric])
plt.plot(train_losses[metric])
plt.savefig(os.path.join(snapshot_path, 'train_%s.png' % metric))
plt.clf()
plt.close()
for metric in valid_losses.keys():
if isinstance(valid_losses[metric][0], list):
for i in range(len(valid_losses[metric][0])):
plt.plot([a[i] for a in valid_losses[metric]])
plt.savefig(os.path.join(snapshot_path, 'valid_%s_%d.png' % (metric, i)))
plt.clf()
plt.close()
plt.plot(valid_losses[metric])
plt.savefig(os.path.join(snapshot_path, 'valid_%s.png' % metric))
plt.clf()
plt.close()
for metric in test_losses.keys():
if isinstance(test_losses[metric][0], list):
for i in range(len(test_losses[metric][0])):
plt.plot([a[i] for a in test_losses[metric]])
plt.savefig(os.path.join(snapshot_path, 'test_%s_%d.png' % (metric, i)))
plt.clf()
plt.close()
plt.plot(test_losses[metric])
plt.savefig(os.path.join(snapshot_path, 'test_%s.png' % metric))
plt.clf()
plt.close()
# -- Finding best NMS Threshold
if args.threshold == -1:
threshold = find_class_threshold(f, 1, test_funcs, network)
log(f, "Best Threshold is", threshold)
else:
threshold = args.threshold
# Test the latest model and the best model
try:
min_epoch = np.argmin(map(sum, valid_losses['mae']))
min_epoch = np.argmin(valid_losses['new_mae'])
log(f, 'Done Training.\n Minimum loss %s at epoch %s' % (valid_losses['new_mae'][min_epoch], min_epoch))
except:
pass
log(f, '\nTesting ...')
_, txt = test_lsccnn(test_funcs, dataset, 'test', network, './models/dump_test', thresh=threshold)
log(f, 'TEST epoch: ' + str(num_epochs - 1) + ' ' + txt)
log(f, 'Exiting train...')
f.close()
return
"""
This method dumps dataset (if not created yet) and calls
`train_networks` which consists of training, validation
and testing steps.
Basically, this is a wrapper around the main training stage.
"""
def train():
global dataset_paths, model_save_dir, batch_size, crop_size, dataset
print(dataset_paths, dataset)
if not dataset.dataset_ready:
print ('CREATING DATASET...')
dataset.create_dataset_files(dataset_paths,
image_crop_size=crop_size,
image_roi_size=80,
image_roi_stride=72,
prediction_downscale_factor=output_downscale,
valid_set_size=validation_set,
use_rgb=True,
test_batch_size=4)
exit(0)
print ('test,train: ',len(dataset.dataset_files['test']), \
len(dataset.dataset_files['train']))
dataset.test_batch_size = 8
global network
network = LSCCNN(args, nofreeze=True, name='scale_4', output_downscale=4)
load_model(network)
model_save_path = os.path.join(model_save_dir, 'train2')
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
os.makedirs(os.path.join(model_save_path, 'snapshots'))
train_networks(network=network,
dataset=dataset,
network_functions=networkFunctions(),
log_path=model_save_path)
print('\n-------\nDONE.')
if __name__ == '__main__':
args = parser.parse_args()
# -- Assign GPU
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
# -- Assertions
assert(args.dataset)
assert len(args.trained_model) in [0, 1]
# -- Setting seeds for reproducability
np.random.seed(11)
random.seed(11)
torch.manual_seed(11)
torch.backends.cudnn.enabled = False
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.cuda.manual_seed(11)
torch.cuda.manual_seed_all(11)
# -- Dataset paths
if args.dataset == "parta":
dataset_paths = {'test': ['../dataset/ST_partA/test_data/images',
'../dataset/ST_partA/test_data/ground_truth'],
'train': ['../dataset/ST_partA/train_data/images',
'../dataset/ST_partA/train_data/ground_truth']}
validation_set = 30
path = '../dataset/stparta_dotmaps_predscale0.5_rgb_ddcnn++_test_val_30'
output_downscale = 2
elif args.dataset == "partb":
dataset_paths = {'test': ['../dataset/ST_partA/test_data/images',
'../dataset/ST_partA/test_data/ground_truth'],
'train': ['../dataset/ST_partA/train_data/images',
'../dataset/ST_partA/train_data/ground_truth']}
validation_set = 80
output_downscale = 2
path = "../dataset/stpartb_dotmaps_predscale0.5_rgb_ddcnn++_test/"
elif args.dataset == "ucfqnrf":
dataset_paths = {'test': ['../dataset/UCF-QNRF_ECCV18/Test/images',
'../dataset/UCF-QNRF_ECCV18/Test/ground_truth'],
'train': ['../dataset/UCF-QNRF_ECCV18/Train/images',
'../dataset/UCF-QNRF_ECCV18/Train/ground_truth']}
validation_set = 240
output_downscale = 2
path = '../dataset/qnrf_dotmaps_predictionScale_'+str(output_downscale)
model_save_dir = './models'
batch_size = args.batch_size
crop_size = 224
dataset = DataReader(path)
# -- Train the model
train()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
pkg/out/ddb.go
|
package out
import (
"fmt"
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
)
func (d *Dynamo) checkPartial(inc *Incident) (bool, string, error) {
fmt.Printf("\nlooking up an existing record with id: %v\n", inc.Identifier)
partial := &dynamodb.QueryInput{
TableName: aws.String(os.Getenv("TABLE_NAME")),
KeyConditionExpression: aws.String("id = :id"),
ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{
":id": {
S: aws.String(inc.Identifier),
},
},
}
resp, err := d.DynamoDB.Query(partial)
if err != nil {
return false, "", fmt.Errorf("could not get item: %v", err)
}
if int(*resp.Count) != 0 {
var pld Incident
err = dynamodbattribute.UnmarshalMap(resp.Items[0], &pld)
if err != nil {
return false, "", fmt.Errorf("could not unmarshal item: %v", err)
}
if pld.IntID != "" {
return true, pld.IntID, nil
}
return false, "", fmt.Errorf("partial entry has no internal identifier")
}
fmt.Println("no partial match found")
return false, "", nil
}
func (d *Dynamo) checkExact(inc *Incident) (bool, error) {
fmt.Printf("\nlooking up an existing record with id: %v and comment id: %v\n", inc.Identifier, inc.CommentID)
exact := &dynamodb.GetItemInput{
TableName: aws.String(os.Getenv("TABLE_NAME")),
Key: map[string]*dynamodb.AttributeValue{
"id": {
S: aws.String(inc.Identifier),
},
"comment_sysid": {
S: aws.String(inc.CommentID),
},
},
}
resp, err := d.DynamoDB.GetItem(exact)
if err != nil {
return false, fmt.Errorf("could not get item: %v", err)
}
if resp.Item != nil {
var pld Incident
err = dynamodbattribute.UnmarshalMap(resp.Item, &pld)
if err != nil {
return false, fmt.Errorf("could not unmarshal item: %v", err)
}
if pld.IntID != "" {
return true, nil
}
return false, fmt.Errorf("exact entry has no internal identifier")
}
fmt.Println("no exact match found")
return false, nil
}
func (d *Dynamo) writeItem(inc *Incident) error {
item, err := dynamodbattribute.MarshalMap(inc)
if err != nil {
return fmt.Errorf("could not marshal db record: %s", err)
}
input := &dynamodb.PutItemInput{
TableName: aws.String(os.Getenv("TABLE_NAME")),
Item: item,
}
_, err = d.DynamoDB.PutItem(input)
if err != nil {
return err
}
fmt.Printf("\nitem added to db with identifier: %v\n", inc.Identifier)
return nil
}
|
[
"\"TABLE_NAME\"",
"\"TABLE_NAME\"",
"\"TABLE_NAME\""
] |
[] |
[
"TABLE_NAME"
] |
[]
|
["TABLE_NAME"]
|
go
| 1 | 0 | |
wifi.go
|
package main
import (
"context"
"fmt"
"log"
"os"
"time"
"github.com/chromedp/cdproto/page"
"github.com/chromedp/chromedp"
)
// DisableWiFi : turns WiFi on or off
func DisableWiFi(disable bool) {
var buttonQuerySelector string
if disable {
buttonQuerySelector = ".enable-disable-on#tf1_wifiEnable2point4Gh"
} else {
buttonQuerySelector = ".enable-disable-off#tf1_wifiEnable2point4Gh"
}
options := []chromedp.ExecAllocatorOption{
chromedp.Flag("headless", false),
}
options = append(chromedp.DefaultExecAllocatorOptions[:], options...)
allocCtx, cancel := chromedp.NewExecAllocator(context.Background(), options...)
defer cancel()
ctx, cancel := chromedp.NewContext(
allocCtx,
chromedp.WithLogf(log.Printf),
)
defer cancel()
ctx, cancel = context.WithTimeout(ctx, 3500*time.Second)
defer cancel()
res := ""
err := chromedp.Run(ctx,
chromedp.ActionFunc(func(ctx context.Context) error {
chromedp.ListenTarget(ctx, func(ev interface{}) {
if _, ok := ev.(*page.EventJavascriptDialogOpening); ok { // page loaded
t := page.HandleJavaScriptDialog(true)
go func() {
if err := chromedp.Run(ctx, t); err != nil {
fmt.Println(err)
}
fmt.Printf(ev.(*page.EventJavascriptDialogOpening).Message) // holds msg!
}()
}
})
return nil
}),
chromedp.Navigate(os.Getenv("DWR_URL")),
chromedp.SendKeys(
"#tf1_usrName",
os.Getenv("DWR_USERNAME"),
),
chromedp.SendKeys(
"#tf1_password",
os.Getenv("DWR_PASSWORD"),
),
chromedp.Click("#btSave"), // login button
chromedp.Click(".wifiIconn"),
chromedp.Click(buttonQuerySelector),
chromedp.EvaluateAsDevTools(`document.querySelector('.btnSubmit').click()`, &res),
)
if err != nil {
log.Fatal(err)
}
log.Printf("[+] Done! completed the script")
}
|
[
"\"DWR_URL\"",
"\"DWR_USERNAME\"",
"\"DWR_PASSWORD\""
] |
[] |
[
"DWR_USERNAME",
"DWR_PASSWORD",
"DWR_URL"
] |
[]
|
["DWR_USERNAME", "DWR_PASSWORD", "DWR_URL"]
|
go
| 3 | 0 | |
app/job/main/creative/dao/academy/dao_test.go
|
package academy
import (
"flag"
"go-common/app/job/main/creative/conf"
"os"
"testing"
)
var (
d *Dao
)
func TestMain(m *testing.M) {
if os.Getenv("DEPLOY_ENV") != "" {
flag.Set("app_id", "main.archive.creative-job")
flag.Set("conf_token", "43943fda0bb311e8865c66d44b23cda7")
flag.Set("tree_id", "16037")
flag.Set("conf_version", "docker-1")
flag.Set("deploy_env", "uat")
flag.Set("conf_host", "config.bilibili.co")
flag.Set("conf_path", "/tmp")
flag.Set("region", "sh")
flag.Set("zone", "sh001")
} else {
flag.Set("conf", "../../cmd/creative-job.toml")
}
flag.Parse()
if err := conf.Init(); err != nil {
panic(err)
}
d = New(conf.Conf)
os.Exit(m.Run())
}
|
[
"\"DEPLOY_ENV\""
] |
[] |
[
"DEPLOY_ENV"
] |
[]
|
["DEPLOY_ENV"]
|
go
| 1 | 0 | |
src/testcases/CWE134_Uncontrolled_Format_String/s01/CWE134_Uncontrolled_Format_String__Environment_printf_04.java
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE134_Uncontrolled_Format_String__Environment_printf_04.java
Label Definition File: CWE134_Uncontrolled_Format_String.label.xml
Template File: sources-sinks-04.tmpl.java
*/
/*
* @description
* CWE: 134 Uncontrolled Format String
* BadSource: Environment Read data from an environment variable
* GoodSource: A hardcoded string
* Sinks: printf
* GoodSink: dynamic printf format with string defined
* BadSink : dynamic printf without validation
* Flow Variant: 04 Control flow: if(PRIVATE_STATIC_FINAL_TRUE) and if(PRIVATE_STATIC_FINAL_FALSE)
*
* */
package testcases.CWE134_Uncontrolled_Format_String.s01;
import testcasesupport.*;
public class CWE134_Uncontrolled_Format_String__Environment_printf_04 extends AbstractTestCase
{
/* The two variables below are declared "final", so a tool should
* be able to identify that reads of these will always return their
* initialized values.
*/
private static final boolean PRIVATE_STATIC_FINAL_TRUE = true;
private static final boolean PRIVATE_STATIC_FINAL_FALSE = false;
public void bad() throws Throwable
{
String data;
if (PRIVATE_STATIC_FINAL_TRUE)
{
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
data = System.getenv("ADD");
}
else
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run
* but ensure data is inititialized before the Sink to avoid compiler errors */
data = null;
}
if (PRIVATE_STATIC_FINAL_TRUE)
{
if (data != null)
{
/* POTENTIAL FLAW: uncontrolled string formatting */
System.out.printf(data);
}
}
}
/* goodG2B1() - use goodsource and badsink by changing first PRIVATE_STATIC_FINAL_TRUE to PRIVATE_STATIC_FINAL_FALSE */
private void goodG2B1() throws Throwable
{
String data;
if (PRIVATE_STATIC_FINAL_FALSE)
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run
* but ensure data is inititialized before the Sink to avoid compiler errors */
data = null;
}
else
{
/* FIX: Use a hardcoded string */
data = "foo";
}
if (PRIVATE_STATIC_FINAL_TRUE)
{
if (data != null)
{
/* POTENTIAL FLAW: uncontrolled string formatting */
System.out.printf(data);
}
}
}
/* goodG2B2() - use goodsource and badsink by reversing statements in first if */
private void goodG2B2() throws Throwable
{
String data;
if (PRIVATE_STATIC_FINAL_TRUE)
{
/* FIX: Use a hardcoded string */
data = "foo";
}
else
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run
* but ensure data is inititialized before the Sink to avoid compiler errors */
data = null;
}
if (PRIVATE_STATIC_FINAL_TRUE)
{
if (data != null)
{
/* POTENTIAL FLAW: uncontrolled string formatting */
System.out.printf(data);
}
}
}
/* goodB2G1() - use badsource and goodsink by changing second PRIVATE_STATIC_FINAL_TRUE to PRIVATE_STATIC_FINAL_FALSE */
private void goodB2G1() throws Throwable
{
String data;
if (PRIVATE_STATIC_FINAL_TRUE)
{
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
data = System.getenv("ADD");
}
else
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run
* but ensure data is inititialized before the Sink to avoid compiler errors */
data = null;
}
if (PRIVATE_STATIC_FINAL_FALSE)
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run */
IO.writeLine("Benign, fixed string");
}
else
{
if (data != null)
{
/* FIX: explicitly defined string formatting */
System.out.printf("%s%n", data);
}
}
}
/* goodB2G2() - use badsource and goodsink by reversing statements in second if */
private void goodB2G2() throws Throwable
{
String data;
if (PRIVATE_STATIC_FINAL_TRUE)
{
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
data = System.getenv("ADD");
}
else
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run
* but ensure data is inititialized before the Sink to avoid compiler errors */
data = null;
}
if (PRIVATE_STATIC_FINAL_TRUE)
{
if (data != null)
{
/* FIX: explicitly defined string formatting */
System.out.printf("%s%n", data);
}
}
}
public void good() throws Throwable
{
goodG2B1();
goodG2B2();
goodB2G1();
goodB2G2();
}
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
public static void main(String[] args) throws ClassNotFoundException,
InstantiationException, IllegalAccessException
{
mainFromParent(args);
}
}
|
[
"\"ADD\"",
"\"ADD\"",
"\"ADD\""
] |
[] |
[
"ADD"
] |
[]
|
["ADD"]
|
java
| 1 | 0 | |
libs/geometry/doc/make_qbk.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# ===========================================================================
# Copyright (c) 2007-2012 Barend Gehrels, Amsterdam, the Netherlands.
# Copyright (c) 2008-2012 Bruno Lalande, Paris, France.
# Copyright (c) 2009-2012 Mateusz Loskot ([email protected]), London, UK
#
# Use, modification and distribution is subject to the Boost Software License,
# Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# ============================================================================
import os, sys
os.chdir(os.path.dirname(sys.argv[0]))
if 'DOXYGEN' in os.environ:
doxygen_cmd = os.environ['DOXYGEN']
else:
doxygen_cmd = 'doxygen'
if 'DOXYGEN_XML2QBK' in os.environ:
doxygen_xml2qbk_cmd = os.environ['DOXYGEN_XML2QBK']
else:
doxygen_xml2qbk_cmd = 'doxygen_xml2qbk'
cmd = doxygen_xml2qbk_cmd
cmd = cmd + " --xml doxy/doxygen_output/xml/%s.xml"
cmd = cmd + " --start_include boost/geometry/"
cmd = cmd + " --convenience_header_path ../../../boost/geometry/"
cmd = cmd + " --convenience_headers geometry.hpp,geometries/geometries.hpp,multi/multi.hpp"
cmd = cmd + " --skip_namespace boost::geometry::"
cmd = cmd + " --copyright src/copyright_block.qbk"
cmd = cmd + " > generated/%s.qbk"
def call_doxygen():
os.chdir("doxy");
os.system(doxygen_cmd)
os.chdir("..")
def group_to_quickbook(section):
os.system(cmd % ("group__" + section.replace("_", "__"), section))
def model_to_quickbook(section):
os.system(cmd % ("classboost_1_1geometry_1_1model_1_1" + section.replace("_", "__"), section))
def model_to_quickbook2(classname, section):
os.system(cmd % ("classboost_1_1geometry_1_1model_1_1" + classname, section))
def struct_to_quickbook(section):
os.system(cmd % ("structboost_1_1geometry_1_1" + section.replace("_", "__"), section))
def class_to_quickbook(section):
os.system(cmd % ("classboost_1_1geometry_1_1" + section.replace("_", "__"), section))
def strategy_to_quickbook(section):
p = section.find("::")
ns = section[:p]
strategy = section[p+2:]
os.system(cmd % ("classboost_1_1geometry_1_1strategy_1_1"
+ ns.replace("_", "__") + "_1_1" + strategy.replace("_", "__"),
ns + "_" + strategy))
def cs_to_quickbook(section):
os.system(cmd % ("structboost_1_1geometry_1_1cs_1_1" + section.replace("_", "__"), section))
call_doxygen()
algorithms = ["append", "assign", "make", "clear"
, "area", "buffer", "centroid", "convert", "correct", "covered_by"
, "convex_hull", "difference", "disjoint", "distance"
, "envelope", "equals", "expand", "for_each", "intersection", "intersects"
, "length", "num_geometries", "num_interior_rings", "num_points"
, "overlaps", "perimeter", "reverse", "simplify", "sym_difference"
, "touches", "transform", "union", "unique", "within"]
access_functions = ["get", "set", "exterior_ring", "interior_rings"
, "num_points", "num_interior_rings", "num_geometries"]
coordinate_systems = ["cartesian", "geographic", "polar", "spherical", "spherical_equatorial"]
core = ["closure", "coordinate_system", "coordinate_type", "cs_tag"
, "dimension", "exception", "interior_type"
, "degree", "radian"
, "is_radian", "point_order"
, "point_type", "ring_type", "tag", "tag_cast" ]
exceptions = ["exception", "centroid_exception"];
iterators = ["circular_iterator", "closing_iterator"
, "ever_circling_iterator"]
models = ["point", "linestring", "box"
, "polygon", "segment", "ring"
, "multi_linestring", "multi_point", "multi_polygon", "referring_segment"]
strategies = ["distance::pythagoras", "distance::haversine"
, "distance::cross_track", "distance::projected_point"
, "within::winding", "within::franklin", "within::crossings_multiply"
, "area::surveyor", "area::huiller"
, "centroid::bashein_detmer", "centroid::average"
, "convex_hull::graham_andrew"
, "simplify::douglas_peucker"
, "side::side_by_triangle", "side::side_by_cross_track", "side::spherical_side_formula"
, "transform::inverse_transformer", "transform::map_transformer"
, "transform::rotate_transformer", "transform::scale_transformer"
, "transform::translate_transformer", "transform::ublas_transformer"
]
views = ["box_view", "segment_view"
, "closeable_view", "reversible_view", "identity_view"]
for i in algorithms:
group_to_quickbook(i)
for i in access_functions:
group_to_quickbook(i)
for i in coordinate_systems:
cs_to_quickbook(i)
for i in core:
struct_to_quickbook(i)
for i in exceptions:
class_to_quickbook(i)
for i in iterators:
struct_to_quickbook(i)
for i in models:
model_to_quickbook(i)
for i in strategies:
strategy_to_quickbook(i)
for i in views:
struct_to_quickbook(i)
model_to_quickbook2("d2_1_1point__xy", "point_xy")
group_to_quickbook("arithmetic")
group_to_quickbook("register")
group_to_quickbook("enum")
# Use either bjam or b2 or ../../../b2 (the last should be done on Release branch)
os.system("bjam")
|
[] |
[] |
[
"DOXYGEN",
"DOXYGEN_XML2QBK"
] |
[]
|
["DOXYGEN", "DOXYGEN_XML2QBK"]
|
python
| 2 | 0 | |
examples/db/dropTable/go/dropTable.go
|
package example
import (
"fmt"
"os"
"github.com/micro/services/clients/go/db"
)
// Drop a table in the DB
func DropTable() {
dbService := db.NewDbService(os.Getenv("MICRO_API_TOKEN"))
rsp, err := dbService.DropTable(&db.DropTableRequest{
Table: "users",
})
fmt.Println(rsp, err)
}
|
[
"\"MICRO_API_TOKEN\""
] |
[] |
[
"MICRO_API_TOKEN"
] |
[]
|
["MICRO_API_TOKEN"]
|
go
| 1 | 0 | |
run/run.go
|
// Copyright (c) 2019-2020 Siemens AG
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
// Author(s): Jonas Plum
package run
import (
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"regexp"
"runtime"
"runtime/debug"
"time"
"github.com/forensicanalysis/artifactcollector/collection"
"github.com/forensicanalysis/artifactlib/goartifacts"
)
// Collection is the output of a run that can be used to further process the output
// (e.g. send the output to a SFTP server).
type Collection struct {
Name string
Path string
}
// Run performs the full artifact collection process.
func Run(config *collection.Configuration, artifactDefinitions []goartifacts.ArtifactDefinition, embedded map[string][]byte) (c *Collection) { //nolint:gocyclo,funlen
if len(config.Artifacts) == 0 {
fmt.Println("No artifacts selected in config")
return nil
}
var outputDirFlag string
flag.StringVar(&outputDirFlag, "o", "", "Output directory for forensicstore and log file")
flag.Parse()
cwd, _ := os.Getwd()
windowsZipTempDir := regexp.MustCompile(`(?i)C:\\Windows\\system32`)
sevenZipTempDir := regexp.MustCompile(`(?i)C:\\Users\\.*\\AppData\\Local\\Temp\\.*`)
// output dir order:
// 1. -o flag given
// 2. implemented in config
// 3.1. running from zip -> Desktop
// 3.2. otherwise -> current directory
switch {
case outputDirFlag != "":
config.OutputDir = outputDirFlag
case config.OutputDir != "":
case windowsZipTempDir.MatchString(cwd) || sevenZipTempDir.MatchString(cwd):
fmt.Println("Running from zip, results will be available on Desktop")
config.OutputDir = filepath.Join(homeDir(), "Desktop")
default:
config.OutputDir = "" // current directory
}
// setup
hostname, err := os.Hostname()
if err != nil {
hostname = "artifactcollector"
}
if config.Case != "" {
hostname = config.Case + "-" + hostname
}
collectionName := fmt.Sprintf("%s_%s", hostname, time.Now().UTC().Format("2006-01-02T15-04-05"))
// setup logging
log.SetFlags(log.LstdFlags | log.Lshortfile)
logfilePath := filepath.Join(config.OutputDir, collectionName+".log")
logfile, logfileError := os.OpenFile(logfilePath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600)
if logfileError != nil {
log.Printf("Could not open logfile %s\n", logfileError)
} else {
log.SetOutput(logfile)
defer logfile.Close()
}
defer func() {
if r := recover(); r != nil {
logPrint("A critical error occurred: ", r, string(debug.Stack()))
c = nil
}
}()
// start running
logPrint("Start to collect forensic artifacts. This might take a while.")
start := time.Now()
// unpack internal files
tempDir, err := unpack(embedded)
if err != nil {
logPrint(err)
return nil
}
defer os.RemoveAll(tempDir) // clean up
// enforce admin rights
if err = enforceAdmin(!config.User); err != nil {
return nil
}
// select from entrypoint
filteredArtifactDefinitions := artifactDefinitions
if config.Artifacts != nil {
filteredArtifactDefinitions = goartifacts.FilterName(config.Artifacts, artifactDefinitions)
}
// create store
collectionPath := filepath.Join(config.OutputDir, collectionName)
storeName, store, teardown, err := createStore(collectionPath, config, filteredArtifactDefinitions)
if err != nil {
logPrint(err)
return nil
}
if config.FS != nil {
store.SetFS(config.FS)
}
// add store as log writer
storeLogger, storeLoggerError := newStoreLogger(store)
if storeLoggerError != nil {
log.Printf("Could not setup logging to forensicstore: %s", storeLoggerError)
}
switch {
case logfileError == nil && storeLoggerError == nil:
log.SetOutput(io.MultiWriter(logfile, storeLogger))
case storeLoggerError == nil:
log.SetOutput(storeLogger)
}
collector, err := collection.NewCollector(store, tempDir, artifactDefinitions)
if err != nil {
logPrint(fmt.Errorf("LiveCollector creation failed: %w", err))
return nil
}
i, total := 1, len(filteredArtifactDefinitions)
// collect artifacts
for _, artifactDefinition := range filteredArtifactDefinitions {
startArtifact := time.Now()
logPrint(fmt.Sprintf("Collecting %s (%d/%d)", artifactDefinition.Name, i, total))
i++
for _, source := range artifactDefinition.Sources {
collector.Collect(artifactDefinition.Name, source)
}
log.Printf("Collected %s in %.1f seconds\n", artifactDefinition.Name, time.Since(startArtifact).Seconds())
}
log.Printf("Collected artifacts in %.1f seconds\n", time.Since(start).Seconds())
// remove store logger
if logfileError == nil {
log.SetOutput(logfile)
} else {
log.SetOutput(ioutil.Discard)
}
err = teardown()
if err != nil {
logPrint(fmt.Sprintf("Close Store failed: %s", err))
return nil
}
logPrint("Collection done.")
return &Collection{
Name: collectionName,
Path: storeName,
}
}
func homeDir() string {
if runtime.GOOS == "windows" {
os.Getenv("USERPROFILE")
}
return os.Getenv("HOME")
}
func unpack(embedded map[string][]byte) (tempDir string, err error) {
tempDir, err = ioutil.TempDir("", "ac")
if err != nil {
return tempDir, err
}
for path, content := range embedded {
if err := os.MkdirAll(filepath.Join(tempDir, filepath.Dir(path)), 0700); err != nil {
return tempDir, err
}
if err := ioutil.WriteFile(filepath.Join(tempDir, path), content, 0644); err != nil {
return tempDir, err
}
log.Printf("Unpacking %s", path)
}
return tempDir, nil
}
func enforceAdmin(forceAdmin bool) error {
switch {
case !forceAdmin:
return nil
case runtime.GOOS == "windows":
_, err := os.Open("\\\\.\\PHYSICALDRIVE0")
if err != nil {
logPrint("Need to be windows admin")
return os.ErrPermission
}
return nil
case os.Getgid() != 0:
logPrint("need to be root")
return os.ErrPermission
default:
return nil
}
}
func logPrint(a ...interface{}) {
log.Println(a...)
fmt.Println(a...)
}
|
[
"\"USERPROFILE\"",
"\"HOME\""
] |
[] |
[
"HOME",
"USERPROFILE"
] |
[]
|
["HOME", "USERPROFILE"]
|
go
| 2 | 0 | |
testing/testing.go
|
package testing
import (
"crypto/tls"
"fmt"
"net/http"
"os"
"strconv"
"strings"
stripe "github.com/HRInnovationLab/stripe-go/v72"
"github.com/HRInnovationLab/stripe-go/v72/form"
"golang.org/x/net/http2"
)
// This file should contain any testing helpers that should be commonly
// available across all tests in the Stripe package.
//
// There's not much in here because it' a relatively recent addition to the
// package, but should be used as appropriate for any new changes.
const (
// MockMinimumVersion is the minimum acceptable version for stripe-mock.
// It's here so that if the library depends on new endpoints or features
// added in a more recent version of stripe-mock, we can show people a
// better error message instead of the test suite crashing with a bunch of
// confusing 404 errors or the like.
MockMinimumVersion = "0.106.0"
// TestMerchantID is a token that can be used to represent a merchant ID in
// simple tests.
TestMerchantID = "acct_123"
)
func init() {
// Enable strict mode on form encoding so that we'll panic if any kind of
// malformed param struct is detected
form.Strict = true
port := os.Getenv("STRIPE_MOCK_PORT")
if port == "" {
port = "12112"
}
// stripe-mock's certificate for localhost is self-signed so configure a
// specialized client that skips the certificate authority check.
transport := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
}
// Go can often enable HTTP/2 automatically if it's supported, but
// confusingly, if you set `TLSClientConfig`, it disables it and you have
// to explicitly invoke http2's `ConfigureTransport` to get it back.
//
// See the incorrectly closed bug report here:
//
// https://github.com/golang/go/issues/20645
//
err := http2.ConfigureTransport(transport)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to initialize HTTP/2 transport: %v\n", err)
os.Exit(1)
}
httpClient := &http.Client{
Transport: transport,
}
resp, err := httpClient.Get("https://localhost:" + port)
if err != nil {
fmt.Fprintf(os.Stderr, "Couldn't reach stripe-mock at `localhost:%s` (%v). Is "+
"it running? Please see README for setup instructions.\n", port, err)
os.Exit(1)
}
version := resp.Header.Get("Stripe-Mock-Version")
if version != "master" && compareVersions(version, MockMinimumVersion) > 0 {
fmt.Fprintf(os.Stderr, "Your version of stripe-mock (%s) is too old. The "+
"minimum version to run this test suite is %s. Please see its "+
"repository for upgrade instructions.\n", version, MockMinimumVersion)
os.Exit(1)
}
stripe.Key = "sk_test_myTestKey"
// Configure a backend for stripe-mock and set it for both the API and
// Uploads (unlike the real Stripe API, stripe-mock supports both these
// backends).
stripeMockBackend := stripe.GetBackendWithConfig(
stripe.APIBackend,
&stripe.BackendConfig{
URL: stripe.String("https://localhost:" + port),
HTTPClient: httpClient,
LeveledLogger: stripe.DefaultLeveledLogger,
},
)
stripe.SetBackend(stripe.APIBackend, stripeMockBackend)
stripe.SetBackend(stripe.UploadsBackend, stripeMockBackend)
}
// compareVersions compares two semantic version strings. We need this because
// with more complex double-digit numbers, lexical comparison breaks down.
func compareVersions(a, b string) (ret int) {
as := strings.Split(a, ".")
bs := strings.Split(b, ".")
loopMax := len(bs)
if len(as) > len(bs) {
loopMax = len(as)
}
for i := 0; i < loopMax; i++ {
var x, y string
if len(as) > i {
x = as[i]
}
if len(bs) > i {
y = bs[i]
}
xi, _ := strconv.Atoi(x)
yi, _ := strconv.Atoi(y)
if xi > yi {
ret = -1
} else if xi < yi {
ret = 1
}
if ret != 0 {
break
}
}
return
}
|
[
"\"STRIPE_MOCK_PORT\""
] |
[] |
[
"STRIPE_MOCK_PORT"
] |
[]
|
["STRIPE_MOCK_PORT"]
|
go
| 1 | 0 | |
tests/ci/pvs_check.py
|
#!/usr/bin/env python3
import subprocess
import os
import json
import logging
from github import Github
from report import create_test_html_report
from s3_helper import S3Helper
from pr_info import PRInfo
import shutil
import sys
from get_robot_token import get_best_robot_token, get_parameter_from_ssm
NAME = 'PVS Studio (actions)'
LICENCE_NAME = 'Free license: ClickHouse, Yandex'
HTML_REPORT_FOLDER = 'pvs-studio-html-report'
TXT_REPORT_NAME = 'pvs-studio-task-report.txt'
def process_logs(s3_client, additional_logs, s3_path_prefix):
additional_urls = []
for log_path in additional_logs:
if log_path:
additional_urls.append(
s3_client.upload_test_report_to_s3(
log_path,
s3_path_prefix + "/" + os.path.basename(log_path)))
return additional_urls
def _process_txt_report(path):
warnings = []
errors = []
with open(path, 'r') as report_file:
for line in report_file:
if 'viva64' in line:
continue
elif 'warn' in line:
warnings.append(':'.join(line.split('\t')[0:2]))
elif 'err' in line:
errors.append(':'.join(line.split('\t')[0:2]))
return warnings, errors
def get_commit(gh, commit_sha):
repo = gh.get_repo(os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse"))
commit = repo.get_commit(commit_sha)
return commit
def upload_results(s3_client, pr_number, commit_sha, test_results, additional_files):
s3_path_prefix = str(pr_number) + "/" + commit_sha + "/" + NAME.lower().replace(' ', '_')
additional_urls = process_logs(s3_client, additional_files, s3_path_prefix)
branch_url = "https://github.com/ClickHouse/ClickHouse/commits/master"
branch_name = "master"
if pr_number != 0:
branch_name = "PR #{}".format(pr_number)
branch_url = "https://github.com/ClickHouse/ClickHouse/pull/" + str(pr_number)
commit_url = f"https://github.com/ClickHouse/ClickHouse/commit/{commit_sha}"
task_url = f"https://github.com/ClickHouse/ClickHouse/actions/runs/{os.getenv('GITHUB_RUN_ID')}"
raw_log_url = additional_urls[0]
additional_urls.pop(0)
html_report = create_test_html_report(NAME, test_results, raw_log_url, task_url, branch_url, branch_name, commit_url, additional_urls)
with open('report.html', 'w') as f:
f.write(html_report)
url = s3_client.upload_test_report_to_s3('report.html', s3_path_prefix + ".html")
logging.info("Search result in url %s", url)
return url
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
repo_path = os.path.join(os.getenv("REPO_COPY", os.path.abspath("../../")))
temp_path = os.path.join(os.getenv("RUNNER_TEMP", os.path.abspath("./temp")), 'pvs_check')
with open(os.getenv('GITHUB_EVENT_PATH'), 'r') as event_file:
event = json.load(event_file)
pr_info = PRInfo(event)
# this check modify repository so copy it to the temp directory
logging.info("Repo copy path %s", repo_path)
gh = Github(get_best_robot_token())
images_path = os.path.join(temp_path, 'changed_images.json')
docker_image = 'clickhouse/pvs-test'
if os.path.exists(images_path):
logging.info("Images file exists")
with open(images_path, 'r') as images_fd:
images = json.load(images_fd)
logging.info("Got images %s", images)
if 'clickhouse/pvs-test' in images:
docker_image += ':' + images['clickhouse/pvs-test']
logging.info("Got docker image %s", docker_image)
s3_helper = S3Helper('https://s3.amazonaws.com')
licence_key = get_parameter_from_ssm('pvs_studio_key')
cmd = f"docker run -u $(id -u ${{USER}}):$(id -g ${{USER}}) --volume={repo_path}:/repo_folder --volume={temp_path}:/test_output -e LICENCE_NAME='{LICENCE_NAME}' -e LICENCE_KEY='{licence_key}' {docker_image}"
commit = get_commit(gh, pr_info.sha)
try:
subprocess.check_output(cmd, shell=True)
except:
commit.create_status(context=NAME, description='PVS report failed to build', state='failure', target_url=f"https://github.com/ClickHouse/ClickHouse/actions/runs/{os.getenv('GITHUB_RUN_ID')}")
sys.exit(1)
try:
s3_path_prefix = str(pr_info.number) + "/" + pr_info.sha + "/" + NAME.lower().replace(' ', '_')
html_urls = s3_helper.upload_test_folder_to_s3(os.path.join(temp_path, HTML_REPORT_FOLDER), s3_path_prefix)
index_html = None
for url in html_urls:
if 'index.html' in url:
index_html = '<a href="{}">HTML report</a>'.format(url)
break
if not index_html:
commit.create_status(context=NAME, description='PVS report failed to build', state='failure', target_url=f"https://github.com/ClickHouse/ClickHouse/actions/runs/{os.getenv('GITHUB_RUN_ID')}")
sys.exit(1)
txt_report = os.path.join(temp_path, TXT_REPORT_NAME)
warnings, errors = _process_txt_report(txt_report)
errors = errors + warnings
status = 'success'
test_results = [(index_html, "Look at the report"), ("Errors count not checked", "OK")]
description = "Total errors {}".format(len(errors))
additional_logs = [txt_report, os.path.join(temp_path, 'pvs-studio.log')]
report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, additional_logs)
print("::notice ::Report url: {}".format(report_url))
commit = get_commit(gh, pr_info.sha)
commit.create_status(context=NAME, description=description, state=status, target_url=report_url)
except Exception as ex:
print("Got an exception", ex)
sys.exit(1)
|
[] |
[] |
[
"GITHUB_RUN_ID",
"RUNNER_TEMP",
"GITHUB_REPOSITORY",
"REPO_COPY",
"GITHUB_EVENT_PATH"
] |
[]
|
["GITHUB_RUN_ID", "RUNNER_TEMP", "GITHUB_REPOSITORY", "REPO_COPY", "GITHUB_EVENT_PATH"]
|
python
| 5 | 0 | |
01_Language/01_Functions/go/src/functions/get_include_path.go
|
package functions
import "os"
func GetIncludePath() string {
return os.Getenv("GOPATH")
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
tests/api/conftest.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 CESNET.
#
# OARepo-Communities is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Pytest configuration.
See https://pytest-invenio.readthedocs.io/ for documentation on which test
fixtures are available.
"""
import logging
import os
import uuid
import pytest
from flask import Blueprint
from invenio_access import ActionRoles, ActionUsers
from invenio_accounts.models import User
from invenio_accounts.proxies import current_datastore
from invenio_accounts.testutils import create_test_user
from invenio_app.factory import create_api
from invenio_search import current_search
from oarepo_communities.views import blueprint
from oarepo_enrollments.config import allow_all
from oarepo_communities.api import OARepoCommunity
from oarepo_communities.constants import COMMUNITY_REQUEST_APPROVAL, COMMUNITY_APPROVE, COMMUNITY_REQUEST_CHANGES, \
COMMUNITY_REVERT_APPROVE, COMMUNITY_PUBLISH, COMMUNITY_UNPUBLISH, STATE_EDITING, STATE_PENDING_APPROVAL, \
STATE_APPROVED, STATE_PUBLISHED
from oarepo_communities.handlers import CommunityHandler
from oarepo_communities.proxies import current_oarepo_communities
from oarepo_communities.search import CommunitySearch
from tests.api.helpers import gen_rest_endpoint, make_sample_record, LiteEntryPoint, _test_login_factory
logging.basicConfig()
logging.getLogger('elasticsearch').setLevel(logging.DEBUG)
@pytest.fixture(scope='module')
def create_app():
return create_api
@pytest.fixture(scope='module')
def app_config(app_config):
app_config = dict(
TESTING=True,
APPLICATION_ROOT='/',
WTF_CSRF_ENABLED=False,
CACHE_TYPE='simple',
SERVER_NAME='localhost',
DEBUG=False,
PREFERRED_URL_SCHEME='https',
FLASK_ENV='development',
PIDSTORE_RECID_FIELD='id',
EMAIL_BACKEND='flask_email.backends.locmem.Mail',
SECRET_KEY='TEST',
SQLALCHEMY_DATABASE_URI=os.getenv('SQLALCHEMY_DATABASE_URI',
'sqlite://'),
SECURITY_DEPRECATED_PASSWORD_SCHEMES=[],
SQLALCHEMY_TRACK_MODIFICATIONS=True,
SECURITY_PASSWORD_HASH='plaintext',
SECURITY_PASSWORD_SCHEMES=['plaintext'],
APP_ALLOWED_HOSTS=['localhost'],
USERPROFILES_EXTEND_SECURITY_FORMS=True,
RATELIMIT_ENABLED=False,
RECORDS_REST_ENDPOINTS={
'recid': gen_rest_endpoint('recid',
CommunitySearch,
'tests.api.helpers.TestRecord',
custom_read_permission_factory=allow_all)
},
OAREPO_COMMUNITIES_ENDPOINTS=['recid']
)
app_config.pop('RATELIMIT_STORAGE_URL', None)
return app_config
@pytest.fixture(scope='module')
def app(base_app):
"""Flask application fixture."""
# OARepoEnrollmentsExt(base_app)
# OARepoCommunities(base_app)
# Register blueprints here
# base_app.register_blueprint(create_blueprint_from_app(base_app))
base_app.register_blueprint(blueprint)
return base_app
def extra_entrypoints(app, group=None, name=None):
data = {
'oarepo_enrollments.enrollments': [
LiteEntryPoint('communities', CommunityHandler),
],
}
names = data.keys() if name is None else [name]
for key in names:
for entry_point in data[key]:
yield entry_point
@pytest.fixture(scope='module')
def users(base_app):
yield [create_test_user('user{}@inveniosoftware.org'.format(i)) for i in range(3)]
@pytest.fixture
def authenticated_user(db):
"""Authenticated user."""
yield create_test_user('[email protected]')
@pytest.fixture
def community_member(db, community):
user = create_test_user('[email protected]')
role = OARepoCommunity.get_role(community[1], 'member')
current_datastore.add_role_to_user(user, role)
user = User.query.get(user.id)
yield user
@pytest.fixture
def community_curator(db, community):
"""Curator user."""
user = create_test_user('[email protected]')
member = OARepoCommunity.get_role(community[1], 'member')
curator = OARepoCommunity.get_role(community[1], 'curator')
current_datastore.add_role_to_user(user, member)
current_datastore.add_role_to_user(user, curator)
yield user
@pytest.fixture
def community_publisher(db, community):
"""Curator user."""
user = create_test_user('[email protected]')
member = OARepoCommunity.get_role(community[1], 'member')
publisher = OARepoCommunity.get_role(community[1], 'publisher')
current_datastore.add_role_to_user(user, member)
current_datastore.add_role_to_user(user, publisher)
yield publisher
@pytest.fixture
def community_ext_groups():
return {
'A': {
'members_id': str(uuid.uuid4()),
'curators_id': str(uuid.uuid4()),
'publishers_id': str(uuid.uuid4()),
},
'B': {
'members_id': str(uuid.uuid4()),
'curators_id': str(uuid.uuid4()),
'publishers_id': str(uuid.uuid4()),
}
}
@pytest.fixture
def community(db):
"""Community fixture."""
comid = 'comtest'
community = OARepoCommunity.create(
{'description': 'Community description'},
title='Title',
id_=comid)
db.session.commit()
yield comid, community
@pytest.fixture()
def sample_records(app, db, es_clear):
try:
current_search.client.indices.delete('records-record-v1.0.0')
except:
pass
if 'records-record-v1.0.0' not in current_search.mappings:
current_search.register_mappings('records', 'tests.api.mappings')
list(current_search.delete())
list(current_search.create())
records = {
'A': [
make_sample_record(db, 'Test 1 in community A', 'A', 'published'),
make_sample_record(db, 'Test 2 in community A', 'A'),
make_sample_record(db, 'Test 3 in community A', 'A')
],
'B': [
make_sample_record(db, 'Test 4 in community B', 'B', 'published'),
make_sample_record(db, 'Test 5 in community B', 'B'),
make_sample_record(db, 'Test 6 in community B', 'B', 'published', secondary=['C']),
],
'comtest': [
make_sample_record(db, 'Test 4 in community comid', 'comtest', secondary=['B']),
{
STATE_EDITING: make_sample_record(db, 'Test 4 in community comid', 'comtest',
state=STATE_EDITING, secondary=['B']),
STATE_PENDING_APPROVAL: make_sample_record(db, 'Test 4 in community comid', 'comtest',
state=STATE_PENDING_APPROVAL, secondary=['B']),
STATE_APPROVED: make_sample_record(db, 'Test 4 in community comid', 'comtest',
state=STATE_APPROVED, secondary=['B']),
STATE_PUBLISHED: make_sample_record(db, 'Test 4 in community comid', 'comtest',
state=STATE_PUBLISHED, secondary=['B']),
}
]
}
current_search.flush_and_refresh('records-record-v1.0.0')
return records
@pytest.yield_fixture()
def permissions(db, community, sample_records):
"""Permission for users."""
users = {None: None}
user_roles = ['author', 'curator', 'publisher', 'member']
community_roles = {r.name.split(':')[-1]: r for r in community[1].roles}
for role in user_roles:
users[role] = create_test_user(
email='{0}@invenio-software.org'.format(role),
password='pass1',
active=True
)
if role == 'author':
current_datastore.add_role_to_user(users[role], community_roles['member'])
else:
current_datastore.add_role_to_user(users[role], community_roles['member'])
current_datastore.add_role_to_user(users[role], community_roles[role])
perms = [
(COMMUNITY_REQUEST_APPROVAL, ['author']),
(COMMUNITY_APPROVE, ['curator']),
(COMMUNITY_REQUEST_CHANGES, ['curator']),
(COMMUNITY_REVERT_APPROVE, ['curator', 'publisher']),
(COMMUNITY_PUBLISH, ['publisher']),
(COMMUNITY_UNPUBLISH, ['publisher'])
]
for action, roles in perms:
for r in roles:
if r == 'author':
db.session.add(ActionUsers(
action=action,
argument=community[1].id,
user=users[r]))
else:
role_name = current_oarepo_communities.role_name_factory(community[1], r)['name']
role = current_datastore.find_role(role_name)
db.session.add(ActionRoles(action=action, argument=community[1].id, role=role))
db.session.commit()
yield users
@pytest.fixture()
def test_blueprint(db, users, community_member, base_app):
"""Test blueprint with dynamically added testing endpoints."""
blue = Blueprint(
'_tests',
__name__,
url_prefix='/_tests/'
)
if blue.name in base_app.blueprints:
del base_app.blueprints[blue.name]
db.session.commit()
for user in User.query.all():
if base_app.view_functions.get('_tests.test_login_{}'.format(user.id)) is not None:
del base_app.view_functions['_tests.test_login_{}'.format(user.id)]
blue.add_url_rule('_login_{}/{}'.format(user.id, user.id), view_func=_test_login_factory(user))
base_app.register_blueprint(blue)
return blue
|
[] |
[] |
[
"SQLALCHEMY_DATABASE_URI"
] |
[]
|
["SQLALCHEMY_DATABASE_URI"]
|
python
| 1 | 0 | |
formatters/sumologic.go
|
package formatters
import (
"bytes"
"fmt"
"os"
"strings"
"github.com/behance/go-logrus"
)
// SumologicFormatter - takes entries and flattens them into a K=V format
// with an additional APP_NAME key
type SumologicFormatter struct{}
// Format - See logrus.Formatter.Format for docs
func (f SumologicFormatter) Format(entry *logrus.Entry) ([]byte, error) {
b := &bytes.Buffer{}
fmt.Fprintf(
b,
"APP_NAME='%s' SEVERITY='%s' ",
appName(),
strings.ToUpper(entry.Level.String()),
)
// KVEntryString in the kv.go file
fmt.Fprintf(b, KVEntryString(entry))
fmt.Fprintln(b)
return b.Bytes(), nil
}
func appName() string {
appname := os.Getenv("LOG_APP_NAME")
if appname == "" {
return "GolangApp"
}
return appname
}
|
[
"\"LOG_APP_NAME\""
] |
[] |
[
"LOG_APP_NAME"
] |
[]
|
["LOG_APP_NAME"]
|
go
| 1 | 0 | |
test/unit_minimal_test.go
|
package test
import (
"os"
"testing"
"github.com/gruntwork-io/terratest/modules/terraform"
)
func TestUnitMinimal(t *testing.T) {
t.Parallel()
// only if the environment variables are set to non-empty they will be passed to terraform
vars := map[string]interface{}{
"gcp_project": os.Getenv("TEST_GCP_PROJECT"),
"gcp_org_domain": os.Getenv("TEST_GCP_ORG_DOMAIN"),
}
for key, value := range vars {
if value == "" {
delete(vars, key)
}
}
terraformOptions := &terraform.Options{
TerraformDir: "unit-minimal",
Vars: vars,
Upgrade: true,
}
defer terraform.Destroy(t, terraformOptions)
terraform.InitAndPlan(t, terraformOptions)
// terraform.ApplyAndIdempotent(t, terraformOptions)
// Replace ApplyAndIdempotent() check with below code if provider and terraform report output changes that
// can not be prevented due to some bugs in this feature
// terraform.Apply(t, terraformOptions)
// stdout := terraform.Plan(t, terraformOptions)
// resourceCount := terraform.GetResourceCount(t, stdout)
// assert.Equal(t, 0, resourceCount.Add, "No resources should have been created. Found %d instead.", resourceCount.Add)
// assert.Equal(t, 0, resourceCount.Change, "No resources should have been changed. Found %d instead.", resourceCount.Change)
// assert.Equal(t, 0, resourceCount.Destroy, "No resources should have been destroyed. Found %d instead.", resourceCount.Destroy)
}
|
[
"\"TEST_GCP_PROJECT\"",
"\"TEST_GCP_ORG_DOMAIN\""
] |
[] |
[
"TEST_GCP_PROJECT",
"TEST_GCP_ORG_DOMAIN"
] |
[]
|
["TEST_GCP_PROJECT", "TEST_GCP_ORG_DOMAIN"]
|
go
| 2 | 0 | |
proxysql_exporter.go
|
// Copyright 2016-2017 Percona LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
"fmt"
"os"
_ "github.com/go-sql-driver/mysql"
"github.com/percona/exporter_shared"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/log"
"github.com/prometheus/common/version"
)
const (
program = "proxysql_exporter"
defaultDataSource = "stats:stats@tcp(localhost:6032)/"
)
var (
versionF = flag.Bool("version", false, "Print version information and exit.")
listenAddressF = flag.String("web.listen-address", ":42004", "Address to listen on for web interface and telemetry.")
telemetryPathF = flag.String("web.telemetry-path", "/metrics", "Path under which to expose metrics.")
mysqlStatusF = flag.Bool("collect.mysql_status", true, "Collect from stats_mysql_global (SHOW MYSQL STATUS).")
mysqlConnectionPoolF = flag.Bool("collect.mysql_connection_pool", true, "Collect from stats_mysql_connection_pool.")
mysqlConnectionListF = flag.Bool("collect.mysql_connection_list", true, "Collect connection list from stats_mysql_processlist.")
mysqlDetailedConnectionListF = flag.Bool("collect.detailed.stats_mysql_processlist", false, "Collect detailed connection list from stats_mysql_processlist.")
mysqlCommandCounter = flag.Bool("collect.stats_command_counter", false, "Collect histograms over command latency")
mysqlRuntimeServers = flag.Bool("collect.runtime_mysql_servers", false, "Collect from runtime_mysql_servers.")
memoryMetricsF = flag.Bool("collect.stats_memory_metrics", false, "Collect memory metrics from stats_memory_metrics.")
)
func main() {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "%s %s exports various ProxySQL metrics in Prometheus format.\n", os.Args[0], version.Version)
fmt.Fprintf(os.Stderr, "It uses DATA_SOURCE_NAME environment variable with following format: https://github.com/go-sql-driver/mysql#dsn-data-source-name\n")
fmt.Fprintf(os.Stderr, "Default value is %q.\n\n", defaultDataSource)
fmt.Fprintf(os.Stderr, "Usage: %s [flags]\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, "Flags:\n")
flag.PrintDefaults()
}
flag.Parse()
if *versionF {
fmt.Println(version.Print(program))
os.Exit(0)
}
dsn := os.Getenv("DATA_SOURCE_NAME")
if dsn == "" {
dsn = defaultDataSource
}
log.Infof("Starting %s %s for %s", program, version.Version, dsn)
exporter := NewExporter(dsn, *mysqlStatusF, *mysqlConnectionPoolF, *mysqlConnectionListF, *mysqlDetailedConnectionListF,
*mysqlRuntimeServers, *memoryMetricsF, *mysqlCommandCounter)
prometheus.MustRegister(exporter)
exporter_shared.RunServer("ProxySQL", *listenAddressF, *telemetryPathF, promhttp.Handler())
}
|
[
"\"DATA_SOURCE_NAME\""
] |
[] |
[
"DATA_SOURCE_NAME"
] |
[]
|
["DATA_SOURCE_NAME"]
|
go
| 1 | 0 | |
test/e2e/batcher/test_raw_batcher.py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
from kubernetes import client
from kserve import KServeClient
from kserve import constants
from kserve import V1beta1PredictorSpec
from kserve import V1beta1Batcher
from kserve import V1beta1SKLearnSpec
from kserve import V1beta1InferenceServiceSpec
from kserve import V1beta1InferenceService
from kubernetes.client import V1ResourceRequirements
from ..common.utils import predict_str
from ..common.utils import KSERVE_TEST_NAMESPACE
from concurrent import futures
kserve_client = KServeClient(config_file=os.environ.get("KUBECONFIG", "~/.kube/config"))
input_file = open('./data/iris_batch_input.json')
json_array = json.load(input_file)
def test_batcher_raw():
service_name = 'isvc-raw-sklearn-batcher'
annotations = dict()
annotations['serving.kserve.io/deploymentMode'] = 'RawDeployment'
predictor = V1beta1PredictorSpec(
batcher=V1beta1Batcher(
max_batch_size=32,
max_latency=5000,
),
min_replicas=1,
sklearn=V1beta1SKLearnSpec(
storage_uri="gs://kfserving-examples/models/sklearn/1.0/model",
resources=V1ResourceRequirements(
requests={"cpu": "100m", "memory": "256Mi"},
limits={"cpu": "100m", "memory": "256Mi"},
),
),
)
isvc = V1beta1InferenceService(api_version=constants.KSERVE_V1BETA1,
kind=constants.KSERVE_KIND,
metadata=client.V1ObjectMeta(
name=service_name, namespace=KSERVE_TEST_NAMESPACE,
annotations=annotations,
), spec=V1beta1InferenceServiceSpec(predictor=predictor),
)
kserve_client.create(isvc)
try:
kserve_client.wait_isvc_ready(service_name, namespace=KSERVE_TEST_NAMESPACE)
except RuntimeError as e:
print(kserve_client.api_instance.get_namespaced_custom_object("serving.knative.dev", "v1",
KSERVE_TEST_NAMESPACE,
"services", service_name + "-predictor-default"))
pods = kserve_client.core_api.list_namespaced_pod(KSERVE_TEST_NAMESPACE,
label_selector='serving.kserve.io/inferenceservice={}'.
format(service_name))
for pod in pods.items:
print(pod)
raise e
with futures.ThreadPoolExecutor(max_workers=4) as executor:
future_res = [
executor.submit(lambda: predict_str(service_name, json.dumps(item))) for item in json_array
]
results = [
f.result()["batchId"] for f in future_res
]
assert (all(x == results[0] for x in results))
kserve_client.delete(service_name, KSERVE_TEST_NAMESPACE)
|
[] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
python
| 1 | 0 | |
src/main.go
|
package main
import (
"ledger-api/src/config"
"ledger-api/src/router"
"log"
"os"
)
func main() {
r := router.Route()
var port string
if port = os.Getenv("PORT"); port == "" {
port = config.All["port"]
}
log.Println("API listening at http://localhost:" + port)
r.Run(":" + port)
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
vendor/github.com/mailhog/mhsendmail/cmd/cmd.go
|
package cmd
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"net/mail"
"net/smtp"
"os"
"os/user"
)
import flag "github.com/spf13/pflag"
// Go runs the MailHog sendmail replacement.
func Go() {
host, err := os.Hostname()
if err != nil {
host = "localhost"
}
username := "nobody"
user, err := user.Current()
if err == nil && user != nil && len(user.Username) > 0 {
username = user.Username
}
fromAddr := username + "@" + host
smtpAddr := "localhost:1025"
var recip []string
// defaults from envars if provided
if len(os.Getenv("MH_SENDMAIL_SMTP_ADDR")) > 0 {
smtpAddr = os.Getenv("MH_SENDMAIL_SMTP_ADDR")
}
if len(os.Getenv("MH_SENDMAIL_FROM")) > 0 {
fromAddr = os.Getenv("MH_SENDMAIL_FROM")
}
var verbose bool
// override defaults from cli flags
flag.StringVar(&smtpAddr, "smtp-addr", smtpAddr, "SMTP server address")
flag.StringVarP(&fromAddr, "from", "f", fromAddr, "SMTP sender")
flag.BoolP("long-i", "i", true, "Ignored. This flag exists for sendmail compatibility.")
flag.BoolP("long-t", "t", true, "Ignored. This flag exists for sendmail compatibility.")
flag.BoolVarP(&verbose, "verbose", "v", false, "Verbose mode (sends debug output to stderr)")
flag.Parse()
// allow recipient to be passed as an argument
recip = flag.Args()
if verbose {
fmt.Fprintln(os.Stderr, smtpAddr, fromAddr)
}
body, err := ioutil.ReadAll(os.Stdin)
if err != nil {
fmt.Fprintln(os.Stderr, "error reading stdin")
os.Exit(11)
}
msg, err := mail.ReadMessage(bytes.NewReader(body))
if err != nil {
fmt.Fprintln(os.Stderr, "error parsing message body")
os.Exit(11)
}
if len(recip) == 0 {
// We only need to parse the message to get a recipient if none where
// provided on the command line.
recip = append(recip, msg.Header.Get("To"))
}
err = smtp.SendMail(smtpAddr, nil, fromAddr, recip, body)
if err != nil {
fmt.Fprintln(os.Stderr, "error sending mail")
log.Fatal(err)
}
}
|
[
"\"MH_SENDMAIL_SMTP_ADDR\"",
"\"MH_SENDMAIL_SMTP_ADDR\"",
"\"MH_SENDMAIL_FROM\"",
"\"MH_SENDMAIL_FROM\""
] |
[] |
[
"MH_SENDMAIL_SMTP_ADDR",
"MH_SENDMAIL_FROM"
] |
[]
|
["MH_SENDMAIL_SMTP_ADDR", "MH_SENDMAIL_FROM"]
|
go
| 2 | 0 | |
models/accounts.go
|
package models
import (
u "go-contacts/utils"
"os"
"strings"
"github.com/dgrijalva/jwt-go"
"github.com/jinzhu/gorm"
"golang.org/x/crypto/bcrypt"
)
/*
JWT claims struct
*/
type Token struct {
UserId uint
jwt.StandardClaims
}
//a struct to rep user account
type Account struct {
gorm.Model
Email string `json:"email"`
Password string `json:"password"`
Token string `json:"token";sql:"-"`
}
//Validate incoming user details...
func (account *Account) Validate() (map[string]interface{}, bool) {
if !strings.Contains(account.Email, "@") {
return u.Message(false, "Email address is required"), false
}
if len(account.Password) < 6 {
return u.Message(false, "Password is required"), false
}
//Email must be unique
temp := &Account{}
//check for errors and duplicate emails
err := GetDB().Table("accounts").Where("email = ?", account.Email).First(temp).Error
if err != nil && err != gorm.ErrRecordNotFound {
return u.Message(false, "Connection error. Please retry"), false
}
if temp.Email != "" {
return u.Message(false, "Email address already in use by another user."), false
}
return u.Message(false, "Requirement passed"), true
}
func (account *Account) Create() map[string]interface{} {
if resp, ok := account.Validate(); !ok {
return resp
}
hashedPassword, _ := bcrypt.GenerateFromPassword([]byte(account.Password), bcrypt.DefaultCost)
account.Password = string(hashedPassword)
GetDB().Create(account)
if account.ID <= 0 {
return u.Message(false, "Failed to create account, connection error.")
}
//Create new JWT token for the newly registered account
tk := &Token{UserId: account.ID}
token := jwt.NewWithClaims(jwt.GetSigningMethod("HS256"), tk)
tokenString, _ := token.SignedString([]byte(os.Getenv("token_password")))
account.Token = tokenString
account.Password = "" //delete password
response := u.Message(true, "Account has been created")
response["account"] = account
return response
}
func Login(email, password string) map[string]interface{} {
account := &Account{}
err := GetDB().Table("accounts").Where("email = ?", email).First(account).Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return u.Message(false, "Email address not found")
}
return u.Message(false, "Connection error. Please retry")
}
err = bcrypt.CompareHashAndPassword([]byte(account.Password), []byte(password))
if err != nil && err == bcrypt.ErrMismatchedHashAndPassword { //Password does not match!
return u.Message(false, "Invalid login credentials. Please try again")
}
//Worked! Logged In
account.Password = ""
//Create JWT token
tk := &Token{UserId: account.ID}
token := jwt.NewWithClaims(jwt.GetSigningMethod("HS256"), tk)
tokenString, _ := token.SignedString([]byte(os.Getenv("token_password")))
account.Token = tokenString //Store the token in the response
resp := u.Message(true, "Logged In")
resp["account"] = account
return resp
}
func GetUser(u uint) *Account {
acc := &Account{}
GetDB().Table("accounts").Where("id = ?", u).First(acc)
if acc.Email == "" { //User not found!
return nil
}
acc.Password = ""
return acc
}
|
[
"\"token_password\"",
"\"token_password\""
] |
[] |
[
"token_password"
] |
[]
|
["token_password"]
|
go
| 1 | 0 | |
accounts/settings.py
|
import os
from django.conf import settings
USER_SETTINGS = getattr(settings, "PLATFORM_ACCOUNTS", {})
DEFAULTS = {
"CLIENT_ID": os.environ.get("LABS_CLIENT_ID"),
"CLIENT_SECRET": os.environ.get("LABS_CLIENT_SECRET"),
"REDIRECT_URI": os.environ.get("LABS_REDIRECT_URI"),
"SCOPE": ["read", "introspection"],
"PLATFORM_URL": "https://platform.pennlabs.org",
"ADMIN_PERMISSION": "example_admin",
"CUSTOM_ADMIN": True,
}
class AccountsSettings(object):
"""
Based on https://github.com/encode/django-rest-framework/blob/master/rest_framework/settings.py
"""
def __init__(self, settings=None, defaults=None):
self.settings = settings or {}
self.defaults = defaults or {}
def __getattr__(self, attr):
if attr not in self.defaults.keys():
raise AttributeError("Invalid Penn Labs accounts setting: %s" % attr)
try:
val = self.settings[attr]
except KeyError:
val = self.defaults[attr]
setattr(self, attr, val)
return val
accounts_settings = AccountsSettings(USER_SETTINGS, DEFAULTS)
|
[] |
[] |
[
"LABS_CLIENT_SECRET",
"LABS_CLIENT_ID",
"LABS_REDIRECT_URI"
] |
[]
|
["LABS_CLIENT_SECRET", "LABS_CLIENT_ID", "LABS_REDIRECT_URI"]
|
python
| 3 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# reddit_min directory.
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, "reddit_min"))
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/veGiantModel/engine/p2p.py
|
# Copyright (c) 2021, ByteDance Inc. All rights reserved.
'''
Copyright 2019 The Microsoft DeepSpeed Team
'''
import os
import torch
import torch.distributed as dist
from deepspeed.utils import logger, log_dist
ENABLE_PYTORCH_BROADCAST = os.environ.get("ENABLE_PYTORCH_BROADCAST", "0") != "0"
try:
if not ENABLE_PYTORCH_BROADCAST:
import byteps.torch as bps
else:
print("BytePS import is disabled", flush=True)
bps = None
except ImportError:
print("BytePS is not installed")
bps = None
_groups = None
_grid = None
DS_PIPE_VERBOSE = os.environ.get('DS_PIPE_VERBOSE', "0") != "0"
did_recv = False
send_stream = None
recv_stream = None
bps_send_handles = {}
bps_recv_handles = {}
#initializes adjacent process groups
#run this only after torch.distributed.init_process_group() has been called
def init_process_groups(grid):
global _groups, _grid
_grid = grid
assert _grid.pipe_parallel_size > 1, "There is no model parallelism"
_groups = [dist.new_group(ranks=group) for group in _grid.p2p_groups]
def _is_valid_send_recv(src_stage, dest_stage):
first_stage = 0
last_stage = _grid.pipe_parallel_size - 1
assert abs(src_stage-dest_stage) == 1 or \
(src_stage == first_stage and dest_stage == last_stage) or \
(src_stage == last_stage and dest_stage == first_stage), \
"Functionality currently limited to send and receive between adjacent ranks only"
def send(tensor, dest_stage, async_op=False):
global _groups
async_op = False
src_stage = _grid.get_stage_id()
_is_valid_send_recv(src_stage, dest_stage)
group = _get_send_recv_group(src_stage, dest_stage)
src_rank = _grid.stage_to_global(stage_id=src_stage)
import torch
if tensor.dtype != torch.float32 and DS_PIPE_VERBOSE:
print('warning: p2p send', tensor.dtype, tensor.shape, flush=True)
return _send(tensor, src_rank, group, async_op)
def _bps_get_name(src, dest, name, suffix):
return "_".join([str(src), str(dest), str(name), str(suffix)])
def bps_send(tensor, dest_stage, name, index, async_op=True):
global bps_send_handles
src_stage = _grid.get_stage_id()
_is_valid_send_recv(src_stage, dest_stage)
src_rank = _grid.stage_to_global(stage_id=src_stage)
dest_rank = _grid.stage_to_global(stage_id=dest_stage)
name = _bps_get_name(src_rank, dest_rank, name, index)
if name not in bps_send_handles:
# XXX hard-code max number of tensors for this name
bps_send_handles[name] = [None] * 10
else:
handle = bps_send_handles[name][index]
if handle is not None:
bps.synchronize(handle)
handle = bps.send_async(tensor, dest_rank, name=name)
# XXX
if not async_op:
bps.synchronize(handle)
else:
bps_send_handles[name][index] = handle
return tensor
def bps_sync(src_stage, name, index=0):
dest_stage = _grid.get_stage_id()
_is_valid_send_recv(src_stage, dest_stage)
src_rank = _grid.stage_to_global(stage_id=src_stage)
dest_rank = _grid.stage_to_global(stage_id=dest_stage)
name = _bps_get_name(src_rank, dest_rank, name, index)
if name in bps_recv_handles:
handle = bps_recv_handles[name][index]
if handle is not None:
bps.synchronize(handle)
def bps_sync_all():
for name, handles in bps_send_handles.items():
for handle in handles:
if handle is not None:
bps.synchronize(handle)
for name, handles in bps_recv_handles.items():
for handle in handles:
if handle is not None:
bps.synchronize(handle)
def bps_recv(tensor, src_stage, name, index=0, async_op=True):
global bps_recv_handles
dest_stage = _grid.get_stage_id()
_is_valid_send_recv(src_stage, dest_stage)
src_rank = _grid.stage_to_global(stage_id=src_stage)
dest_rank = _grid.stage_to_global(stage_id=dest_stage)
name = _bps_get_name(src_rank, dest_rank, name, index)
if name not in bps_recv_handles:
# XXX hard-code max number of tensors for this name
bps_recv_handles[name] = [None] * 10
else:
handle = bps_recv_handles[name][index]
if handle is not None:
bps.synchronize(handle)
handle = bps.recv_async(tensor, src_rank, name=name)
if not async_op:
bps.synchronize(handle)
else:
bps_recv_handles[name][index] = handle
return tensor
def _send(tensor, src_rank, group, async_op):
global did_recv
return dist.broadcast(tensor, src_rank, group=group, async_op=async_op)
def send_grads(tensor, grid, async_op=False):
async_op = False
if grid.send_grads_src_rank == grid.global_rank:
# print(f'start rank: {grid.global_rank}, stage_id: {grid.stage_id}, mp_id: {grid.model_parallel_id}, _send_grad_src_rank: {grid.send_grads_src_rank}, send group: {grid.send_grads_group}, send_grad_groups: {grid.send_grads_proc_group}', flush=True)
_send(tensor, grid.send_grads_src_rank, grid.send_grads_proc_group, async_op)
# print(f'finis rank: {grid.global_rank}, stage_id: {grid.stage_id}, mp_id: {grid.model_parallel_id}, _send_grad_src_rank: {grid.send_grads_src_rank}, send group: {grid.send_grads_group}', flush=True)
else:
# print(f'finish fast rank: {grid.global_rank}, stage_id: {grid.stage_id}, mp_id: {grid.model_parallel_id}, _send_grad_src_rank: {grid.send_grads_src_rank}, send group: {grid.send_grads_group}', flush=True)
pass
def _recv(tensor, src_rank, group, async_op):
global did_recv
tensor = dist.broadcast(tensor, src_rank, group=group, async_op=async_op)
did_recv = True
return tensor
def recv_grads(tensor, grid, async_op=False):
async_op = False
# print(f'start rank: {grid.global_rank}, stage_id: {grid.stage_id}, mp_id: {grid.model_parallel_id}, _recv_grad_src_rank: {grid.recv_grads_src_rank}, recv group: {grid.recv_grads_group}, recv_grad_groups: {grid.recv_grads_proc_group}', flush=True)
_recv(tensor, grid.recv_grads_src_rank, grid.recv_grads_proc_group, async_op)
# print(f'finish rank: {grid.global_rank}, stage_id: {grid.stage_id}, mp_id: {grid.model_parallel_id}, _recv_grad_src_rank: {grid.recv_grads_src_rank}, recv group: {grid.recv_grads_group}', flush=True)
def send_activations(tensor, grid, async_op=False):
async_op = False
if grid.send_activation_src_rank == grid.global_rank:
# print(f'start rank: {grid.global_rank}, stage_id: {grid.stage_id}, mp_id: {grid.model_parallel_id}, _send_grad_src_rank: {grid.send_grads_src_rank}, send group: {grid.send_grads_group}, send_grad_groups: {grid.send_grads_proc_group}', flush=True)
_send(tensor, grid.send_activation_src_rank, grid.send_activation_proc_group, async_op)
# print(f'finis rank: {grid.global_rank}, stage_id: {grid.stage_id}, mp_id: {grid.model_parallel_id}, _send_grad_src_rank: {grid.send_grads_src_rank}, send group: {grid.send_grads_group}', flush=True)
else:
# print(f'finish fast rank: {grid.global_rank}, stage_id: {grid.stage_id}, mp_id: {grid.model_parallel_id}, _send_grad_src_rank: {grid.send_grads_src_rank}, send group: {grid.send_grads_group}', flush=True)
pass
def recv_activations(tensor, grid, async_op=False):
async_op = False
# print(f'start rank: {grid.global_rank}, stage_id: {grid.stage_id}, mp_id: {grid.model_parallel_id}, _recv_grad_src_rank: {grid.recv_grads_src_rank}, recv group: {grid.recv_grads_group}, recv_grad_groups: {grid.recv_grads_proc_group}', flush=True)
_recv(tensor, grid.recv_activation_src_rank, grid.recv_activation_proc_group, async_op)
# print(f'finish rank: {grid.global_rank}, stage_id: {grid.stage_id}, mp_id: {grid.model_parallel_id}, _recv_grad_src_rank: {grid.recv_grads_src_rank}, recv group: {grid.recv_grads_group}', flush=True)
def recv(tensor, src_stage, async_op=False):
global _groups
global did_recv
async_op = False
dest_stage = _grid.get_stage_id()
_is_valid_send_recv(src_stage, dest_stage)
group = _get_send_recv_group(src_stage, dest_stage)
src_rank = _grid.stage_to_global(stage_id=src_stage)
return _recv(tensor, src_rank, group, async_op)
def barrier(stage_id):
global _groups, _grid
group_id = _grid.stage_to_global(stage_id=stage_id)
if (dist.get_rank() >= 0):
print("Barrier Group ID", group_id)
print("Barrier Group", _grid.p2p_groups[group_id])
dist.barrier(group=_groups[group_id])
if (dist.get_rank() >= 0):
print("Exiting Barrier ", group_id)
def _get_send_recv_group(src_stage, dest_stage):
'''the group id is always the smaller rank unless its a wrap around'''
stage_id = None
first_stage = 0
last_stage = _grid.pipe_parallel_size - 1
if (src_stage == first_stage and dest_stage == last_stage
or dest_stage == first_stage and src_stage == last_stage):
stage_id = last_stage
elif src_stage > dest_stage:
stage_id = dest_stage
else:
stage_id = src_stage
'''group_id corresponds to group of [group_id, group_id+1]
unless group_id is the rank of the last stage
in which case group_id correspods to group[group_id-num_stages+1, group_id]
'''
group_id = _grid.stage_to_global(stage_id=stage_id)
return _groups[group_id]
|
[] |
[] |
[
"ENABLE_PYTORCH_BROADCAST",
"DS_PIPE_VERBOSE"
] |
[]
|
["ENABLE_PYTORCH_BROADCAST", "DS_PIPE_VERBOSE"]
|
python
| 2 | 0 | |
examples/http/app/app.go
|
package main
import (
"context"
"flag"
"fmt"
"io"
"log"
"net"
"net/http"
"os"
"strings"
"sync"
"github.com/johnsiilver/serveonssh"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
)
var (
endpoint = flag.String("endpoint", "", "The host:port we are connecting to")
socket = flag.String("socket", "", "The Unix socket on the REMOTE side to connect to")
keyFile = flag.String("key", "", "The SSH key to use. If not provided, attempts to use the SSH agent.")
pass = flag.String("pass", "", "File containing a password to use for SSH. If not provided tries --key and then the SSH agent.")
user = flag.String("user", os.Getenv("USER"), "The user to SSH as, set to your logged in user")
)
func main() {
flag.Parse()
log.SetFlags(log.LstdFlags | log.Lshortfile)
auths, err := getAuthFromFlags()
if err != nil {
log.Fatalf("auth failure: %s", err)
}
f, err := serveonssh.New(
*endpoint,
*socket,
&ssh.ClientConfig{
User: *user,
Auth: auths,
HostKeyCallback: ssh.InsecureIgnoreHostKey(), // Don't do this in real life
},
)
if err != nil {
panic(err)
}
defer f.Close()
httpc := http.Client{
Transport: &http.Transport{
DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
return f.Dialer()()
},
},
}
wg := sync.WaitGroup{}
for i := 0; i < 100; i++ {
i := i
wg.Add(1)
go func() {
defer wg.Done()
resp, err := httpc.Get("http://unix" + *socket)
if err != nil {
panic(err)
}
b, err := io.ReadAll(resp.Body)
if err != nil {
panic(err)
}
if string(b) != "Hello" {
log.Println("server returned: ", string(b))
}
log.Printf("attempt(%d) was successful", i)
}()
}
wg.Wait()
}
func getAuthFromFlags() ([]ssh.AuthMethod, error) {
auths := []ssh.AuthMethod{}
if *keyFile != "" {
a, err := publicKey(*keyFile)
if err != nil {
return nil, err
}
auths = append(auths, a)
}
if *pass != "" {
b, err := os.ReadFile(*pass)
if err != nil {
return nil, fmt.Errorf("pasword file(%s) had error: %s", *pass, err)
}
auths = append(auths, ssh.Password(strings.TrimSpace(string(b))))
}
if a, err := agentAuth(); err == nil {
auths = append(auths, a)
}
return auths, nil
}
func agentAuth() (ssh.AuthMethod, error) {
conn, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK"))
if err != nil {
return nil, err
}
client := agent.NewClient(conn)
return ssh.PublicKeysCallback(client.Signers), nil
}
func publicKey(privateKeyFile string) (ssh.AuthMethod, error) {
k, err := os.ReadFile(privateKeyFile)
if err != nil {
return nil, err
}
signer, err := ssh.ParsePrivateKey(k)
if err != nil {
return nil, err
}
return ssh.PublicKeys(signer), nil
}
|
[
"\"USER\"",
"\"SSH_AUTH_SOCK\""
] |
[] |
[
"USER",
"SSH_AUTH_SOCK"
] |
[]
|
["USER", "SSH_AUTH_SOCK"]
|
go
| 2 | 0 | |
tests/test_other.py
|
import multiprocessing, os, re, shutil, subprocess, sys
import tools.shared
from tools.shared import *
from runner import RunnerCore, path_from_root, get_bullet_library, nonfastcomp
class other(RunnerCore):
def get_zlib_library(self):
if WINDOWS:
return self.get_library('zlib', os.path.join('libz.a'), configure=['emconfigure.bat'], configure_args=['cmake', '.', '-DBUILD_SHARED_LIBS=OFF'], make=['mingw32-make'], make_args=[])
else:
return self.get_library('zlib', os.path.join('libz.a'), make_args=['libz.a'])
def test_emcc(self):
for compiler in [EMCC, EMXX]:
shortcompiler = os.path.basename(compiler)
suffix = '.c' if compiler == EMCC else '.cpp'
# --version
output = Popen([PYTHON, compiler, '--version'], stdout=PIPE, stderr=PIPE).communicate()
output = output[0].replace('\r', '')
self.assertContained('''emcc (Emscripten GCC-like replacement)''', output)
self.assertContained('''Copyright (C) 2014 the Emscripten authors (see AUTHORS.txt)
This is free and open source software under the MIT license.
There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
''', output)
# -v, without input files
output = Popen([PYTHON, compiler, '-v'], stdout=PIPE, stderr=PIPE).communicate()
self.assertContained('''clang version''', output[1].replace('\r', ''), output[1].replace('\r', ''))
# --help
output = Popen([PYTHON, compiler, '--help'], stdout=PIPE, stderr=PIPE).communicate()
self.assertContained('''%s [options] file...
Most normal gcc/g++ options will work, for example:
--help Display this information
--version Display compiler version information
Options that are modified or new in %s include:
-O0 No optimizations (default)''' % (shortcompiler, shortcompiler), output[0].replace('\r', ''), output[1].replace('\r', ''))
# emcc src.cpp ==> writes a.out.js
self.clear()
output = Popen([PYTHON, compiler, path_from_root('tests', 'hello_world' + suffix)], stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('a.out.js'), '\n'.join(output)
self.assertContained('hello, world!', run_js('a.out.js'))
# properly report source code errors, and stop there
self.clear()
assert not os.path.exists('a.out.js')
process = Popen([PYTHON, compiler, path_from_root('tests', 'hello_world_error' + suffix)], stdout=PIPE, stderr=PIPE)
output = process.communicate()
assert not os.path.exists('a.out.js'), 'compilation failed, so no output file is expected'
assert len(output[0]) == 0, output[0]
assert process.returncode is not 0, 'Failed compilation must return a nonzero error code!'
self.assertNotContained('IOError', output[1]) # no python stack
self.assertNotContained('Traceback', output[1]) # no python stack
self.assertContained('error: invalid preprocessing directive', output[1])
self.assertContained(["error: use of undeclared identifier 'cheez", "error: unknown type name 'cheez'"], output[1])
self.assertContained('errors generated', output[1])
assert 'compiler frontend failed to generate LLVM bitcode, halting' in output[1].split('errors generated.')[1]
# emcc src.cpp -c and emcc src.cpp -o src.[o|bc] ==> should give a .bc file
# regression check: -o js should create "js", with bitcode content
for args in [['-c'], ['-o', 'src.o'], ['-o', 'src.bc'], ['-o', 'src.so'], ['-o', 'js']]:
print '-c stuff', args
target = args[1] if len(args) == 2 else 'hello_world.o'
self.clear()
Popen([PYTHON, compiler, path_from_root('tests', 'hello_world' + suffix)] + args, stdout=PIPE, stderr=PIPE).communicate()
syms = Building.llvm_nm(target)
assert len(syms.defs) == 1 and 'main' in syms.defs, 'Failed to generate valid bitcode'
if target == 'js': # make sure emcc can recognize the target as a bitcode file
shutil.move(target, target + '.bc')
target += '.bc'
output = Popen([PYTHON, compiler, target, '-o', target + '.js'], stdout = PIPE, stderr = PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists(target + '.js'), 'Expected %s to exist since args are %s : %s' % (target + '.js', str(args), '\n'.join(output))
self.assertContained('hello, world!', run_js(target + '.js'))
# handle singleton archives
self.clear()
Popen([PYTHON, compiler, path_from_root('tests', 'hello_world' + suffix), '-o', 'a.bc'], stdout=PIPE, stderr=PIPE).communicate()
Popen([LLVM_AR, 'r', 'a.a', 'a.bc'], stdout=PIPE, stderr=PIPE).communicate()
assert os.path.exists('a.a')
output = Popen([PYTHON, compiler, 'a.a']).communicate()
assert os.path.exists('a.out.js'), output
self.assertContained('hello, world!', run_js('a.out.js'))
# emcc src.ll ==> generates .js
self.clear()
output = Popen([PYTHON, compiler, path_from_root('tests', 'hello_world.ll')], stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('a.out.js'), '\n'.join(output)
self.assertContained('hello, world!', run_js('a.out.js'))
# emcc [..] -o [path] ==> should work with absolute paths
try:
for path in [os.path.abspath(os.path.join('..', 'file1.js')), os.path.join('b_dir', 'file2.js')]:
print path
self.clear(in_curr=True)
os.chdir(self.get_dir())
if not os.path.exists('a_dir'): os.mkdir('a_dir')
os.chdir('a_dir')
if not os.path.exists('b_dir'): os.mkdir('b_dir')
output = Popen([PYTHON, compiler, path_from_root('tests', 'hello_world.ll'), '-o', path], stdout=PIPE, stderr=PIPE).communicate()
print output
assert os.path.exists(path), path + ' does not exist; ' + '\n'.join(output)
last = os.getcwd()
os.chdir(os.path.dirname(path))
self.assertContained('hello, world!', run_js(os.path.basename(path)))
os.chdir(last)
finally:
os.chdir(self.get_dir())
self.clear()
for source, has_malloc in [('hello_world' + suffix, False), ('hello_malloc.cpp', True)]:
print source, has_malloc
self.clear()
output = Popen([PYTHON, compiler, path_from_root('tests', source)], stdout=PIPE, stderr=PIPE).communicate()
assert os.path.exists('a.out.js'), '\n'.join(output)
self.assertContained('hello, world!', run_js('a.out.js'))
generated = open('a.out.js').read()
assert ('function _malloc(bytes) {' in generated) == (not has_malloc), 'If malloc is needed, it should be there, if not not'
# Optimization: emcc src.cpp -o something.js [-Ox]. -O0 is the same as not specifying any optimization setting
for params, opt_level, bc_params, closure, has_malloc in [ # bc params are used after compiling to bitcode
(['-o', 'something.js'], 0, None, 0, 1),
(['-o', 'something.js', '-O0'], 0, None, 0, 0),
(['-o', 'something.js', '-O1'], 1, None, 0, 0),
(['-o', 'something.js', '-O1', '-g'], 1, None, 0, 0), # no closure since debug
(['-o', 'something.js', '-O2'], 2, None, 0, 1),
(['-o', 'something.js', '-O2', '-g'], 2, None, 0, 0),
(['-o', 'something.js', '-Os'], 2, None, 0, 1),
(['-o', 'something.js', '-O3', '-s', 'ASM_JS=0'], 3, None, 0, 1),
# and, test compiling to bitcode first
(['-o', 'something.bc'], 0, [], 0, 0),
(['-o', 'something.bc', '-O0'], 0, [], 0, 0),
(['-o', 'something.bc', '-O1'], 1, ['-O1'], 0, 0),
(['-o', 'something.bc', '-O2'], 2, ['-O2'], 0, 0),
(['-o', 'something.bc', '-O3'], 3, ['-O3', '-s', 'ASM_JS=0'], 0, 0),
(['-O1', '-o', 'something.bc'], 1, [], 0, 0),
]:
print params, opt_level, bc_params, closure, has_malloc
self.clear()
keep_debug = '-g' in params
args = [PYTHON, compiler, path_from_root('tests', 'hello_world_loop' + ('_malloc' if has_malloc else '') + '.cpp')] + params
print '..', args
output = Popen(args,
stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
if bc_params is not None:
assert os.path.exists('something.bc'), output[1]
bc_args = [PYTHON, compiler, 'something.bc', '-o', 'something.js'] + bc_params
print '....', bc_args
output = Popen(bc_args, stdout=PIPE, stderr=PIPE).communicate()
assert os.path.exists('something.js'), output[1]
self.assertContained('hello, world!', run_js('something.js'))
# Verify optimization level etc. in the generated code
# XXX these are quite sensitive, and will need updating when code generation changes
generated = open('something.js').read() # TODO: parse out the _main function itself, not support code, if the tests below need that some day
assert 'new Uint16Array' in generated and 'new Uint32Array' in generated, 'typed arrays 2 should be used by default'
assert 'SAFE_HEAP' not in generated, 'safe heap should not be used by default'
assert ': while(' not in generated, 'when relooping we also js-optimize, so there should be no labelled whiles'
if closure:
if opt_level == 0: assert '._main =' in generated, 'closure compiler should have been run'
elif opt_level >= 1: assert '._main=' in generated, 'closure compiler should have been run (and output should be minified)'
else:
# closure has not been run, we can do some additional checks. TODO: figure out how to do these even with closure
assert '._main = ' not in generated, 'closure compiler should not have been run'
if keep_debug:
assert ('(label)' in generated or '(label | 0)' in generated) == (opt_level <= 0), 'relooping should be in opt >= 1'
assert ('assert(STACKTOP < STACK_MAX' in generated) == (opt_level == 0), 'assertions should be in opt == 0'
assert '$i' in generated or '$storemerge' in generated or '$original' in generated, 'micro opts should always be on'
if opt_level >= 2 and '-g' in params:
assert re.search('HEAP8\[\$?\w+ ?\+ ?\(+\$?\w+ ?', generated) or re.search('HEAP8\[HEAP32\[', generated), 'eliminator should create compound expressions, and fewer one-time vars' # also in -O1, but easier to test in -O2
assert ('_puts(' in generated) == (opt_level >= 1), 'with opt >= 1, llvm opts are run and they should optimize printf to puts'
if opt_level == 0 or '-g' in params: assert 'function _main() {' in generated or 'function _main(){' in generated, 'Should be unminified'
elif opt_level >= 2: assert ('function _main(){' in generated or '"use asm";var a=' in generated), 'Should be whitespace-minified'
# emcc -s RELOOP=1 src.cpp ==> should pass -s to emscripten.py. --typed-arrays is a convenient alias for -s USE_TYPED_ARRAYS
for params, test, text in [
(['-O2'], lambda generated: 'function intArrayToString' in generated, 'shell has unminified utilities'),
(['-O2', '--closure', '1'], lambda generated: 'function intArrayToString' not in generated, 'closure minifies the shell'),
(['-O2'], lambda generated: 'var b=0' in generated and not 'function _main' in generated, 'registerize/minify is run by default in -O2'),
(['-O2', '--minify', '0'], lambda generated: 'var b = 0' in generated and not 'function _main' in generated, 'minify is cancelled, but not registerize'),
(['-O2', '--js-opts', '0'], lambda generated: 'var b=0' not in generated and 'var b = 0' not in generated and 'function _main' in generated, 'js opts are cancelled'),
(['-O2', '-g'], lambda generated: 'var b=0' not in generated and 'var b = 0' not in generated and 'function _main' in generated, 'registerize/minify is cancelled by -g'),
(['-O2', '-g0'], lambda generated: 'var b=0' in generated and not 'function _main' in generated, 'registerize/minify is run by default in -O2 -g0'),
(['-O2', '-g1'], lambda generated: 'var b = 0' in generated and not 'function _main' in generated, 'compress is cancelled by -g1'),
(['-O2', '-g2'], lambda generated: ('var b = 0' in generated or 'var i1 = 0' in generated) and 'function _main' in generated, 'minify is cancelled by -g2'),
(['-O2', '-g3'], lambda generated: 'var b=0' not in generated and 'var b = 0' not in generated and 'function _main' in generated, 'registerize is cancelled by -g3'),
(['-O2', '-profiling'], lambda generated: ('var b = 0' in generated or 'var i1 = 0' in generated) and 'function _main' in generated, 'similar to -g2'),
#(['-O2', '-g4'], lambda generated: 'var b=0' not in generated and 'var b = 0' not in generated and 'function _main' in generated, 'same as -g3 for now'),
(['-s', 'INLINING_LIMIT=0'], lambda generated: 'function _dump' in generated, 'no inlining without opts'),
(['-s', 'USE_TYPED_ARRAYS=0'], lambda generated: 'new Int32Array' not in generated, 'disable typed arrays'),
(['-s', 'USE_TYPED_ARRAYS=1'], lambda generated: 'IHEAPU = ' in generated, 'typed arrays 1 selected'),
([], lambda generated: 'Module["_dump"]' not in generated, 'dump is not exported by default'),
(['-s', 'EXPORTED_FUNCTIONS=["_main", "_dump"]'], lambda generated: 'Module["_dump"]' in generated, 'dump is now exported'),
(['--typed-arrays', '0'], lambda generated: 'new Int32Array' not in generated, 'disable typed arrays'),
(['--typed-arrays', '1'], lambda generated: 'IHEAPU = ' in generated, 'typed arrays 1 selected'),
(['--typed-arrays', '2'], lambda generated: 'new Uint16Array' in generated and 'new Uint32Array' in generated, 'typed arrays 2 selected'),
(['--llvm-opts', '1'], lambda generated: '_puts(' in generated, 'llvm opts requested'),
([], lambda generated: '// The Module object' in generated, 'without opts, comments in shell code'),
(['-O2'], lambda generated: '// The Module object' not in generated, 'with opts, no comments in shell code'),
(['-O2', '-g2'], lambda generated: '// The Module object' not in generated, 'with -g2, no comments in shell code'),
(['-O2', '-g3'], lambda generated: '// The Module object' in generated, 'with -g3, yes comments in shell code'),
]:
print params, text
self.clear()
if os.environ.get('EMCC_FAST_COMPILER') != '0' and ['disable typed arrays', 'typed arrays 1 selected']: continue
output = Popen([PYTHON, compiler, path_from_root('tests', 'hello_world_loop.cpp'), '-o', 'a.out.js'] + params, stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('a.out.js'), '\n'.join(output)
self.assertContained('hello, world!', run_js('a.out.js'))
assert test(open('a.out.js').read()), text
# Compiling two source files into a final JS.
for args, target in [([], 'a.out.js'), (['-o', 'combined.js'], 'combined.js')]:
self.clear()
output = Popen([PYTHON, compiler, path_from_root('tests', 'twopart_main.cpp'), path_from_root('tests', 'twopart_side.cpp')] + args,
stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists(target), '\n'.join(output)
self.assertContained('side got: hello from main, over', run_js(target))
# Compiling two files with -c will generate separate .bc files
self.clear()
output = Popen([PYTHON, compiler, path_from_root('tests', 'twopart_main.cpp'), path_from_root('tests', 'twopart_side.cpp'), '-c'] + args,
stdout=PIPE, stderr=PIPE).communicate()
if '-o' in args:
# specifying -o and -c is an error
assert 'fatal error' in output[1], output[1]
continue
assert os.path.exists('twopart_main.o'), '\n'.join(output)
assert os.path.exists('twopart_side.o'), '\n'.join(output)
assert not os.path.exists(target), 'We should only have created bitcode here: ' + '\n'.join(output)
# Compiling one of them alone is expected to fail
output = Popen([PYTHON, compiler, 'twopart_main.o', '-O1', '-g'] + args, stdout=PIPE, stderr=PIPE).communicate()
assert os.path.exists(target), '\n'.join(output)
#print '\n'.join(output)
self.assertContained('missing function', run_js(target, stderr=STDOUT))
try_delete(target)
# Combining those bc files into js should work
output = Popen([PYTHON, compiler, 'twopart_main.o', 'twopart_side.o'] + args, stdout=PIPE, stderr=PIPE).communicate()
assert os.path.exists(target), '\n'.join(output)
self.assertContained('side got: hello from main, over', run_js(target))
# Combining bc files into another bc should also work
try_delete(target)
assert not os.path.exists(target)
output = Popen([PYTHON, compiler, 'twopart_main.o', 'twopart_side.o', '-o', 'combined.bc'] + args, stdout=PIPE, stderr=PIPE).communicate()
syms = Building.llvm_nm('combined.bc')
assert len(syms.defs) == 2 and 'main' in syms.defs, 'Failed to generate valid bitcode'
output = Popen([PYTHON, compiler, 'combined.bc', '-o', 'combined.bc.js'], stdout = PIPE, stderr = PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('combined.bc.js'), 'Expected %s to exist' % ('combined.bc.js')
self.assertContained('side got: hello from main, over', run_js('combined.bc.js'))
# --js-transform <transform>
self.clear()
trans = os.path.join(self.get_dir(), 't.py')
trans_file = open(trans, 'w')
trans_file.write('''
import sys
f = open(sys.argv[1], 'w')
f.write('transformed!')
f.close()
''')
trans_file.close()
output = Popen([PYTHON, compiler, path_from_root('tests', 'hello_world' + suffix), '--js-transform', '%s t.py' % (PYTHON)], stdout=PIPE, stderr=PIPE).communicate()
assert open('a.out.js').read() == 'transformed!', 'Transformed output must be as expected'
# TODO: Add in files test a clear example of using disablePermissions, and link to it from the wiki
# TODO: test normal project linking, static and dynamic: get_library should not need to be told what to link!
# TODO: deprecate llvm optimizations, dlmalloc, etc. in emscripten.py.
def test_emcc_nonfastcomp(self):
nonfastcomp(self.test_emcc)
def test_cmake(self):
# Test all supported generators.
if WINDOWS:
generators = ['MinGW Makefiles', 'NMake Makefiles']
else:
generators = ['Unix Makefiles', 'Ninja', 'Eclipse CDT4 - Ninja']
def nmake_detect_error(configuration):
if Building.which(configuration['build'][0]):
return None
else:
return 'Skipping NMake test for CMake support, since nmake was not found in PATH. Run this test in Visual Studio command prompt to easily access nmake.'
def check_makefile(configuration, dirname):
assert os.path.exists(dirname + '/Makefile'), 'CMake call did not produce a Makefile!'
configurations = { 'MinGW Makefiles' : { 'prebuild': check_makefile,
'build' : ['mingw32-make'],
},
'NMake Makefiles' : { 'detect' : nmake_detect_error,
'prebuild': check_makefile,
'build' : ['nmake', '/NOLOGO'],
},
'Unix Makefiles' : { 'prebuild': check_makefile,
'build' : ['make'],
},
'Ninja' : { 'build' : ['ninja'],
},
'Eclipse CDT4 - Ninja': { 'build' : ['ninja'],
}
}
if os.name == 'nt':
emconfigure = path_from_root('emconfigure.bat')
else:
emconfigure = path_from_root('emconfigure')
for generator in generators:
conf = configurations[generator]
make = conf['build']
try:
detector = conf['detect']
except KeyError:
detector = None
if detector:
error = detector(conf)
elif len(make) == 1 and not Building.which(make[0]):
# Use simple test if applicable
error = 'Skipping %s test for CMake support, since it could not be detected.' % generator
else:
error = None
if error:
logging.warning(error)
continue
try:
prebuild = conf['prebuild']
except KeyError:
prebuild = None
try:
postbuild = conf['postbuild']
except KeyError:
postbuild = None
cmake_cases = ['target_js', 'target_html']
cmake_outputs = ['test_cmake.js', 'hello_world_gles.html']
for i in range(0, 2):
for configuration in ['Debug', 'Release']:
# CMake can be invoked in two ways, using 'emconfigure cmake', or by directly running 'cmake'.
# Test both methods.
for invoke_method in ['cmake', 'emconfigure']:
# Create a temp workspace folder
cmakelistsdir = path_from_root('tests', 'cmake', cmake_cases[i])
tempdirname = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=TEMP_DIR)
try:
os.chdir(tempdirname)
verbose_level = int(os.getenv('EM_BUILD_VERBOSE')) if os.getenv('EM_BUILD_VERBOSE') != None else 0
# Run Cmake
if invoke_method == 'cmake':
# Test invoking cmake directly.
cmd = ['cmake', '-DCMAKE_TOOLCHAIN_FILE='+path_from_root('cmake', 'Platform', 'Emscripten.cmake'),
'-DCMAKE_BUILD_TYPE=' + configuration, '-G', generator, cmakelistsdir]
else:
# Test invoking via 'emconfigure cmake'
cmd = [emconfigure, 'cmake', '-DCMAKE_BUILD_TYPE=' + configuration, '-G', generator, cmakelistsdir]
ret = Popen(cmd, stdout=None if verbose_level >= 2 else PIPE, stderr=None if verbose_level >= 1 else PIPE).communicate()
if len(ret) > 1 and ret[1] != None and len(ret[1].strip()) > 0:
logging.error(ret[1]) # If there were any errors, print them directly to console for diagnostics.
if len(ret) > 1 and ret[1] != None and 'error' in ret[1].lower():
logging.error('Failed command: ' + ' '.join(cmd))
logging.error('Result:\n' + ret[1])
raise Exception('cmake call failed!')
if prebuild:
prebuild(configuration, tempdirname)
# Build
cmd = make + (['VERBOSE=1'] if verbose_level >= 3 else [])
ret = Popen(cmd, stdout=None if verbose_level >= 2 else PIPE).communicate()
if len(ret) > 1 and ret[1] != None and len(ret[1].strip()) > 0:
logging.error(ret[1]) # If there were any errors, print them directly to console for diagnostics.
if len(ret) > 0 and ret[0] != None and 'error' in ret[0].lower() and not '0 error(s)' in ret[0].lower():
logging.error('Failed command: ' + ' '.join(cmd))
logging.error('Result:\n' + ret[0])
raise Exception('make failed!')
assert os.path.exists(tempdirname + '/' + cmake_outputs[i]), 'Building a cmake-generated Makefile failed to produce an output file %s!' % tempdirname + '/' + cmake_outputs[i]
if postbuild:
postbuild(configuration, tempdirname)
# Run through node, if CMake produced a .js file.
if cmake_outputs[i].endswith('.js'):
ret = Popen(listify(NODE_JS) + [tempdirname + '/' + cmake_outputs[i]], stdout=PIPE).communicate()[0]
self.assertTextDataIdentical(open(cmakelistsdir + '/out.txt', 'r').read().strip(), ret.strip())
finally:
os.chdir(path_from_root('tests')) # Move away from the directory we are about to remove.
shutil.rmtree(tempdirname)
def test_failure_error_code(self):
for compiler in [EMCC, EMXX]:
# Test that if one file is missing from the build, then emcc shouldn't succeed, and shouldn't try to produce an output file.
process = Popen([PYTHON, compiler, path_from_root('tests', 'hello_world.c'), 'this_file_is_missing.c', '-o', 'this_output_file_should_never_exist.js'], stdout=PIPE, stderr=PIPE)
process.communicate()
assert process.returncode is not 0, 'Trying to compile a nonexisting file should return with a nonzero error code!'
assert os.path.exists('this_output_file_should_never_exist.js') == False, 'Emcc should not produce an output file when build fails!'
def test_cxx03(self):
for compiler in [EMCC, EMXX]:
process = Popen([PYTHON, compiler, path_from_root('tests', 'hello_cxx03.cpp')], stdout=PIPE, stderr=PIPE)
process.communicate()
assert process.returncode is 0, 'By default, emscripten should build using -std=c++03!'
def test_cxx11(self):
for compiler in [EMCC, EMXX]:
process = Popen([PYTHON, compiler, '-std=c++11', path_from_root('tests', 'hello_cxx11.cpp')], stdout=PIPE, stderr=PIPE)
process.communicate()
assert process.returncode is 0, 'User should be able to specify custom -std= on the command line!'
def test_cap_suffixes(self):
shutil.copyfile(path_from_root('tests', 'hello_world.cpp'), 'test.CPP')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'test.CPP')]).communicate()
self.assertContained('hello, world!', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_catch_undef(self):
open(os.path.join(self.get_dir(), 'test.cpp'), 'w').write(r'''
#include <vector>
#include <stdio.h>
class Test {
public:
std::vector<int> vector;
};
Test globalInstance;
int main() {
printf("hello, world!\n");
return 0;
}
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'test.cpp'), '-fsanitize=undefined']).communicate()
self.assertContained('hello, world!', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_unaligned_memory(self):
def test():
open(os.path.join(self.get_dir(), 'test.cpp'), 'w').write(r'''
#include <stdio.h>
#include <stdarg.h>
typedef unsigned char Bit8u;
typedef unsigned short Bit16u;
typedef unsigned int Bit32u;
int main()
{
va_list argp;
va_arg(argp, char *); // check for compilation error, #1705
Bit8u data[4] = {0x01,0x23,0x45,0x67};
printf("data: %x\n", *(Bit32u*)data);
printf("data[0,1] 16bit: %x\n", *(Bit16u*)data);
printf("data[1,2] 16bit: %x\n", *(Bit16u*)(data+1));
}
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'test.cpp'), '-s', 'UNALIGNED_MEMORY=1']).communicate()
self.assertContained('data: 67452301\ndata[0,1] 16bit: 2301\ndata[1,2] 16bit: 4523', run_js(os.path.join(self.get_dir(), 'a.out.js')))
nonfastcomp(test)
def test_unaligned_memory_2(self):
def test():
open(os.path.join(self.get_dir(), 'test.cpp'), 'w').write(r'''
#include <string>
#include <stdio.h>
int main( int argc, char ** argv )
{
std::string testString( "Hello, World!" );
printf( "testString = %s\n", testString.c_str() );
return 0;
}
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'test.cpp'), '-s', 'UNALIGNED_MEMORY=1']).communicate()
self.assertContained('testString = Hello, World!', run_js(os.path.join(self.get_dir(), 'a.out.js')))
nonfastcomp(test)
def test_asm_minify(self):
def test(args):
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_loop_malloc.cpp')] + args).communicate()
self.assertContained('hello, world!', run_js(self.in_dir('a.out.js')))
return open(self.in_dir('a.out.js')).read()
src = test([])
assert 'function _malloc' in src
src = test(['-O2', '-s', 'ASM_JS=1'])
normal_size = len(src)
print 'normal', normal_size
assert 'function _malloc' not in src
src = test(['-O2', '-s', 'ASM_JS=1', '--minify', '0'])
unminified_size = len(src)
print 'unminified', unminified_size
assert unminified_size > normal_size
assert 'function _malloc' not in src
src = test(['-O2', '-s', 'ASM_JS=1', '-g'])
debug_size = len(src)
print 'debug', debug_size
assert debug_size > unminified_size
assert 'function _malloc' in src
def test_dangerous_func_cast(self):
src = r'''
#include <stdio.h>
typedef void (*voidfunc)();
int my_func() {
printf("my func\n");
return 10;
}
int main(int argc, char **argv) {
voidfunc fps[10];
for (int i = 0; i < 10; i++) fps[i] = (i == argc) ? (void (*)())my_func : NULL;
fps[2*(argc-1) + 1]();
return 0;
}
'''
open('src.c', 'w').write(src)
def test(args, expected, err_expected=None):
print args, expected, err_expected
out, err = Popen([PYTHON, EMCC, 'src.c'] + args, stderr=PIPE).communicate()
if err_expected: self.assertContained(err_expected, err)
self.assertContained(expected, run_js(self.in_dir('a.out.js'), stderr=PIPE, full_output=True))
return open(self.in_dir('a.out.js')).read()
if os.environ.get('EMCC_FAST_COMPILER') == '0':
test([], 'my func') # no asm, so casting func works
test(['-O2'], 'abort', ['Casting potentially incompatible function pointer i32 ()* to void (...)*, for my_func',
'Incompatible function pointer casts are very dangerous with ASM_JS=1, you should investigate and correct these']) # asm, so failure
test(['-O2', '-s', 'ASSERTIONS=1'],
'Invalid function pointer called. Perhaps a miscast function pointer (check compilation warnings) or bad vtable lookup (maybe due to derefing a bad pointer, like NULL)?',
['Casting potentially incompatible function pointer i32 ()* to void (...)*, for my_func',
'Incompatible function pointer casts are very dangerous with ASM_JS=1, you should investigate and correct these']) # asm, so failure
else:
# fastcomp. all asm, so it can't just work with wrong sigs. but, ASSERTIONS=2 gives much better info to debug
test(['-O1'], 'If this abort() is unexpected, build with -s ASSERTIONS=1 which can give more information.') # no useful info, but does mention ASSERTIONS
test(['-O1', '-s', 'ASSERTIONS=1'], '''Invalid function pointer called with signature 'v'. Perhaps this is an invalid value (e.g. caused by calling a virtual method on a NULL pointer)? Or calling a function with an incorrect type, which will fail? (it is worth building your source files with -Werror (warnings are errors), as warnings can indicate undefined behavior which can cause this)
Build with ASSERTIONS=2 for more info.
''') # some useful text
test(['-O1', '-s', 'ASSERTIONS=2'], '''Invalid function pointer '0' called with signature 'v'. Perhaps this is an invalid value (e.g. caused by calling a virtual method on a NULL pointer)? Or calling a function with an incorrect type, which will fail? (it is worth building your source files with -Werror (warnings are errors), as warnings can indicate undefined behavior which can cause this)
This pointer might make sense in another type signature: i: 0
''') # actually useful identity of the bad pointer, with comparisons to what it would be in other types/tables
def test_l_link(self):
# Linking with -lLIBNAME and -L/DIRNAME should work
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write('''
extern void printey();
int main() {
printey();
return 0;
}
''')
try:
os.makedirs(os.path.join(self.get_dir(), 'libdir'));
except:
pass
open(os.path.join(self.get_dir(), 'libdir', 'libfile.cpp'), 'w').write('''
#include <stdio.h>
void printey() {
printf("hello from lib\\n");
}
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'libdir', 'libfile.cpp'), '-c']).communicate()
shutil.move(os.path.join(self.get_dir(), 'libfile.o'), os.path.join(self.get_dir(), 'libdir', 'libfile.so'))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '-L' + os.path.join(self.get_dir(), 'libdir'), '-lfile']).communicate()
self.assertContained('hello from lib', run_js(os.path.join(self.get_dir(), 'a.out.js')))
assert not os.path.exists('a.out') and not os.path.exists('a.exe'), 'Must not leave unneeded linker stubs'
def test_static_link(self):
def nonfc():
if os.environ.get('EMCC_FAST_COMPILER') != '0': return self.skip('todo in fastcomp')
def test(name, header, main, side, expected, args=[], suffix='cpp', first=True):
print name
#t = main ; main = side ; side = t
original_main = main
original_side = side
if header: open(os.path.join(self.get_dir(), 'header.h'), 'w').write(header)
if type(main) == str:
open(os.path.join(self.get_dir(), 'main.' + suffix), 'w').write(main)
main = ['main.' + suffix]
if type(side) == str:
open(os.path.join(self.get_dir(), 'side.' + suffix), 'w').write(side)
side = ['side.' + suffix]
Popen([PYTHON, EMCC] + side + ['-o', 'side.js', '-s', 'SIDE_MODULE=1', '-O2'] + args).communicate()
# TODO: test with and without DISABLE_GL_EMULATION, check that file sizes change
Popen([PYTHON, EMCC] + main + ['-o', 'main.js', '-s', 'MAIN_MODULE=1', '-O2', '-s', 'DISABLE_GL_EMULATION=1'] + args).communicate()
Popen([PYTHON, EMLINK, 'main.js', 'side.js', 'together.js'], stdout=PIPE).communicate()
assert os.path.exists('together.js')
for engine in JS_ENGINES:
out = run_js('together.js', engine=engine, stderr=PIPE, full_output=True)
self.assertContained(expected, out)
if engine == SPIDERMONKEY_ENGINE: self.validate_asmjs(out)
if first:
shutil.copyfile('together.js', 'first.js')
test(name + ' (reverse)', header, original_side, original_main, expected, args, suffix, False) # test reverse order
# test a simple call from one module to another. only one has a string (and constant memory initialization for it)
test('basics', '', '''
#include <stdio.h>
extern int sidey();
int main() {
printf("other says %d.", sidey());
return 0;
}
''', '''
int sidey() { return 11; }
''', 'other says 11.')
# finalization of float variables should pass asm.js validation
test('floats', '', '''
#include <stdio.h>
extern float sidey();
int main() {
printf("other says %.2f.", sidey()+1);
return 0;
}
''', '''
float sidey() { return 11.5; }
''', 'other says 12.50')
# memory initialization in both
test('multiple memory inits', '', r'''
#include <stdio.h>
extern void sidey();
int main() {
printf("hello from main\n");
sidey();
return 0;
}
''', r'''
#include <stdio.h>
void sidey() { printf("hello from side\n"); }
''', 'hello from main\nhello from side\n')
# function pointers
test('fp1', 'typedef void (*voidfunc)();', r'''
#include <stdio.h>
#include "header.h"
voidfunc sidey(voidfunc f);
void a() { printf("hello from funcptr\n"); }
int main() {
sidey(a)();
return 0;
}
''', '''
#include "header.h"
voidfunc sidey(voidfunc f) { return f; }
''', 'hello from funcptr\n')
# function pointers with 'return' in the name
test('fp2', 'typedef void (*voidfunc)();', r'''
#include <stdio.h>
#include "header.h"
int sidey(voidfunc f);
void areturn0() { printf("hello 0\n"); }
void areturn1() { printf("hello 1\n"); }
void areturn2() { printf("hello 2\n"); }
int main(int argc, char **argv) {
voidfunc table[3] = { areturn0, areturn1, areturn2 };
table[sidey(NULL)]();
return 0;
}
''', '''
#include "header.h"
int sidey(voidfunc f) { if (f) f(); return 1; }
''', 'hello 1\n')
# Global initializer
test('global init', '', r'''
#include <stdio.h>
struct Class {
Class() { printf("a new Class\n"); }
};
static Class c;
int main() {
return 0;
}
''', r'''
void nothing() {}
''', 'a new Class\n')
# Multiple global initializers (LLVM generates overlapping names for them)
test('global inits', r'''
#include <stdio.h>
struct Class {
Class(const char *name) { printf("new %s\n", name); }
};
''', r'''
#include "header.h"
static Class c("main");
int main() {
return 0;
}
''', r'''
#include "header.h"
static Class c("side");
''', ['new main\nnew side\n', 'new side\nnew main\n'])
# Class code used across modules
test('codecall', r'''
#include <stdio.h>
struct Class {
Class(const char *name);
};
''', r'''
#include "header.h"
int main() {
Class c("main");
return 0;
}
''', r'''
#include "header.h"
Class::Class(const char *name) { printf("new %s\n", name); }
''', ['new main\n'])
# malloc usage in both modules
test('malloc', r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
''', r'''
#include <stdio.h>
#include "header.h"
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
puts(ret);
return 0;
}
''', r'''
#include "header.h"
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''', ['hello through side\n'])
# js library call
open('lib.js', 'w').write(r'''
mergeInto(LibraryManager.library, {
test_lib_func: function(x) {
return x + 17.2;
}
});
''')
test('js-lib', 'extern "C" { extern double test_lib_func(int input); }', r'''
#include <stdio.h>
#include "header.h"
extern double sidey();
int main2() { return 11; }
int main() {
int input = sidey();
double temp = test_lib_func(input);
printf("other says %.2f\n", temp);
printf("more: %.5f, %d\n", temp, input);
return 0;
}
''', r'''
#include <stdio.h>
#include "header.h"
extern int main2();
double sidey() {
int temp = main2();
printf("main2 sed: %d\n", temp);
printf("main2 sed: %u, %c\n", temp, temp/2);
return test_lib_func(temp);
}
''', 'other says 45.2', ['--js-library', 'lib.js'])
# libc usage in one modules. must force libc inclusion in the main module if that isn't the one using mallinfo()
try:
os.environ['EMCC_FORCE_STDLIBS'] = 'libc'
test('malloc-1', r'''
#include <string.h>
int side();
''', r'''
#include <stdio.h>
#include "header.h"
int main() {
printf("|%d|\n", side());
return 0;
}
''', r'''
#include <stdlib.h>
#include <malloc.h>
#include "header.h"
int side() {
struct mallinfo m = mallinfo();
return m.arena > 1;
}
''', ['|1|\n'])
finally:
del os.environ['EMCC_FORCE_STDLIBS']
# iostream usage in one and std::string in both
test('iostream', r'''
#include <iostream>
#include <string>
std::string side();
''', r'''
#include "header.h"
int main() {
std::cout << "hello from main " << side() << std::endl;
return 0;
}
''', r'''
#include "header.h"
std::string side() { return "and hello from side"; }
''', ['hello from main and hello from side\n'])
# followup to iostream test: a second linking
print 'second linking of a linking output'
open('moar.cpp', 'w').write(r'''
#include <iostream>
struct Moar {
Moar() { std::cout << "moar!" << std::endl; }
};
Moar m;
''')
Popen([PYTHON, EMCC, 'moar.cpp', '-o', 'moar.js', '-s', 'SIDE_MODULE=1', '-O2']).communicate()
Popen([PYTHON, EMLINK, 'together.js', 'moar.js', 'triple.js'], stdout=PIPE).communicate()
assert os.path.exists('triple.js')
for engine in JS_ENGINES:
out = run_js('triple.js', engine=engine, stderr=PIPE, full_output=True)
self.assertContained('moar!\nhello from main and hello from side\n', out)
if engine == SPIDERMONKEY_ENGINE: self.validate_asmjs(out)
# zlib compression library. tests function pointers in initializers and many other things
try:
os.environ['EMCC_FORCE_STDLIBS'] = 'libcextra'
test('zlib', '', open(path_from_root('tests', 'zlib', 'example.c'), 'r').read(),
self.get_zlib_library(),
open(path_from_root('tests', 'zlib', 'ref.txt'), 'r').read(),
args=['-I' + path_from_root('tests', 'zlib')], suffix='c')
finally:
del os.environ['EMCC_FORCE_STDLIBS']
use_cmake = WINDOWS
bullet_library = get_bullet_library(self, use_cmake)
# bullet physics engine. tests all the things
test('bullet', '', open(path_from_root('tests', 'bullet', 'Demos', 'HelloWorld', 'HelloWorld.cpp'), 'r').read(),
bullet_library,
[open(path_from_root('tests', 'bullet', 'output.txt'), 'r').read(), # different roundings
open(path_from_root('tests', 'bullet', 'output2.txt'), 'r').read(),
open(path_from_root('tests', 'bullet', 'output3.txt'), 'r').read()],
args=['-I' + path_from_root('tests', 'bullet', 'src')])
nonfastcomp(nonfc)
def test_outline(self):
def test(name, src, libs, expected, expected_ranges, args=[], suffix='cpp'):
print name
def measure_funcs(filename):
i = 0
start = -1
curr = None
ret = {}
for line in open(filename):
i += 1
if line.startswith('function '):
start = i
curr = line
elif line.startswith('}') and curr:
size = i - start
ret[curr] = size
curr = None
return ret
for debug, outlining_limits in [
([], (1000,)),
(['-g1'], (1000,)),
(['-g2'], (1000,)),
(['-g'], (100, 250, 500, 1000, 2000, 5000, 0))
]:
for outlining_limit in outlining_limits:
print '\n', Building.COMPILER_TEST_OPTS, debug, outlining_limit, '\n'
# TODO: test without -g3, tell all sorts
Popen([PYTHON, EMCC, src] + libs + ['-o', 'test.js', '-O2'] + debug + ['-s', 'OUTLINING_LIMIT=%d' % outlining_limit] + args).communicate()
assert os.path.exists('test.js')
shutil.copyfile('test.js', '%d_test.js' % outlining_limit)
for engine in JS_ENGINES:
out = run_js('test.js', engine=engine, stderr=PIPE, full_output=True)
self.assertContained(expected, out)
if engine == SPIDERMONKEY_ENGINE: self.validate_asmjs(out)
if debug == ['-g']:
low = expected_ranges[outlining_limit][0]
seen = max(measure_funcs('test.js').values())
high = expected_ranges[outlining_limit][1]
print Building.COMPILER_TEST_OPTS, outlining_limit, ' ', low, '<=', seen, '<=', high
assert low <= seen <= high
for test_opts, expected_ranges in [
([], {
100: (190, 500),
250: (200, 600),
500: (200, 700),
1000: (230, 1000),
2000: (300, 2000),
5000: (500, 5000),
0: (1500, 5000)
}),
(['-O2'], {
100: (0, 1600),
250: (0, 1600),
500: (0, 1600),
1000: (0, 1600),
2000: (0, 2000),
5000: (0, 5000),
0: (0, 5000)
}),
]:
Building.COMPILER_TEST_OPTS = test_opts
test('zlib', path_from_root('tests', 'zlib', 'example.c'),
self.get_zlib_library(),
open(path_from_root('tests', 'zlib', 'ref.txt'), 'r').read(),
expected_ranges,
args=['-I' + path_from_root('tests', 'zlib')], suffix='c')
def test_symlink(self):
self.clear()
if os.name == 'nt':
return self.skip('Windows FS does not need to be tested for symlinks support, since it does not have them.')
open(os.path.join(self.get_dir(), 'foobar.xxx'), 'w').write('int main(){ return 0; }')
os.symlink(os.path.join(self.get_dir(), 'foobar.xxx'), os.path.join(self.get_dir(), 'foobar.c'))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'foobar.c'), '-o', os.path.join(self.get_dir(), 'foobar')]).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'foobar'))
try_delete(os.path.join(self.get_dir(), 'foobar'))
try_delete(os.path.join(self.get_dir(), 'foobar.xxx'))
try_delete(os.path.join(self.get_dir(), 'foobar.c'))
open(os.path.join(self.get_dir(), 'foobar.c'), 'w').write('int main(){ return 0; }')
os.symlink(os.path.join(self.get_dir(), 'foobar.c'), os.path.join(self.get_dir(), 'foobar.xxx'))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'foobar.xxx'), '-o', os.path.join(self.get_dir(), 'foobar')]).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'foobar'))
try_delete(os.path.join(self.get_dir(), 'foobar'))
try_delete(os.path.join(self.get_dir(), 'foobar.xxx'))
try_delete(os.path.join(self.get_dir(), 'foobar.c'))
def test_multiply_defined_libsymbols(self):
lib = "int mult() { return 1; }"
lib_name = os.path.join(self.get_dir(), 'libA.c')
open(lib_name, 'w').write(lib)
a2 = "void x() {}"
a2_name = os.path.join(self.get_dir(), 'a2.c')
open(a2_name, 'w').write(a2)
b2 = "void y() {}"
b2_name = os.path.join(self.get_dir(), 'b2.c')
open(b2_name, 'w').write(b2)
main = r'''
#include <stdio.h>
int mult();
int main() {
printf("result: %d\n", mult());
return 0;
}
'''
main_name = os.path.join(self.get_dir(), 'main.c')
open(main_name, 'w').write(main)
Building.emcc(lib_name, output_filename='libA.so')
Building.emcc(a2_name, ['-L.', '-lA'])
Building.emcc(b2_name, ['-L.', '-lA'])
Building.emcc(main_name, ['-L.', '-lA', a2_name+'.o', b2_name+'.o'], output_filename='a.out.js')
self.assertContained('result: 1', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_multiply_defined_libsymbols_2(self):
a = "int x() { return 55; }"
a_name = os.path.join(self.get_dir(), 'a.c')
open(a_name, 'w').write(a)
b = "int y() { return 2; }"
b_name = os.path.join(self.get_dir(), 'b.c')
open(b_name, 'w').write(b)
c = "int z() { return 5; }"
c_name = os.path.join(self.get_dir(), 'c.c')
open(c_name, 'w').write(c)
main = r'''
#include <stdio.h>
int x();
int y();
int z();
int main() {
printf("result: %d\n", x() + y() + z());
return 0;
}
'''
main_name = os.path.join(self.get_dir(), 'main.c')
open(main_name, 'w').write(main)
Building.emcc(a_name) # a.c.o
Building.emcc(b_name) # b.c.o
Building.emcc(c_name) # c.c.o
lib_name = os.path.join(self.get_dir(), 'libLIB.a')
Building.emar('cr', lib_name, [a_name + '.o', b_name + '.o']) # libLIB.a with a and b
# a is in the lib AND in an .o, so should be ignored in the lib. We do still need b from the lib though
Building.emcc(main_name, [a_name+'.o', c_name + '.o', '-L.', '-lLIB'], output_filename='a.out.js')
self.assertContained('result: 62', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_link_group_asserts(self):
lib_src_name = os.path.join(self.get_dir(), 'lib.c')
open(lib_src_name, 'w').write('int x() { return 42; }')
main_name = os.path.join(self.get_dir(), 'main.c')
open(main_name, 'w').write(r'''
#include <stdio.h>
int x();
int main() {
printf("result: %d\n", x());
return 0;
}
''')
Building.emcc(lib_src_name) # lib.c.o
lib_name = os.path.join(self.get_dir(), 'libLIB.a')
Building.emar('cr', lib_name, [lib_src_name + '.o']) # libLIB.a with lib.c.o
def test(lib_args, err_expected):
output = Popen([PYTHON, EMCC, main_name, '-o', 'a.out.js'] + lib_args, stdout=PIPE, stderr=PIPE).communicate()
if err_expected:
self.assertContained(err_expected, output[1])
else:
out_js = os.path.join(self.get_dir(), 'a.out.js')
assert os.path.exists(out_js), '\n'.join(output)
self.assertContained('result: 42', run_js(out_js))
test(['-Wl,--start-group', lib_name], '--start-group without matching --end-group')
test(['-Wl,--start-group', lib_name, '-Wl,--start-group'], 'Nested --start-group, missing --end-group?')
test(['-Wl,--end-group', lib_name, '-Wl,--start-group'], '--end-group without --start-group')
test(['-Wl,--start-group', lib_name, '-Wl,--end-group'], None)
def test_circular_libs(self):
def tmp_source(name, code):
file_name = os.path.join(self.get_dir(), name)
open(file_name, 'w').write(code)
return file_name
a = tmp_source('a.c', 'int z(); int x() { return z(); }')
b = tmp_source('b.c', 'int x(); int y() { return x(); } int z() { return 42; }')
c = tmp_source('c.c', 'int q() { return 0; }')
main = tmp_source('main.c', r'''
#include <stdio.h>
int y();
int main() {
printf("result: %d\n", y());
return 0;
}
''')
Building.emcc(a) # a.c.o
Building.emcc(b) # b.c.o
Building.emcc(c) # c.c.o
lib_a = os.path.join(self.get_dir(), 'libA.a')
Building.emar('cr', lib_a, [a + '.o', c + '.o']) # libA.a with a.c.o,c.c.o
lib_b = os.path.join(self.get_dir(), 'libB.a')
Building.emar('cr', lib_b, [b + '.o', c + '.o']) # libB.a with b.c.o,c.c.o
args = ['-s', 'ERROR_ON_UNDEFINED_SYMBOLS=1', main, '-o', 'a.out.js']
libs_list = [lib_a, lib_b]
# lib_a does not satisfy any symbols from main, so it will not be included,
# and there will be an unresolved symbol.
output = Popen([PYTHON, EMCC] + args + libs_list, stdout=PIPE, stderr=PIPE).communicate()
self.assertContained('error: unresolved symbol: x', output[1])
# -Wl,--start-group and -Wl,--end-group around the libs will cause a rescan
# of lib_a after lib_b adds undefined symbol "x", so a.c.o will now be
# included (and the link will succeed).
libs = ['-Wl,--start-group'] + libs_list + ['-Wl,--end-group']
output = Popen([PYTHON, EMCC] + args + libs, stdout=PIPE, stderr=PIPE).communicate()
out_js = os.path.join(self.get_dir(), 'a.out.js')
assert os.path.exists(out_js), '\n'.join(output)
self.assertContained('result: 42', run_js(out_js))
# -( and -) should also work.
args = ['-s', 'ERROR_ON_UNDEFINED_SYMBOLS=1', main, '-o', 'a2.out.js']
libs = ['-Wl,-('] + libs_list + ['-Wl,-)']
output = Popen([PYTHON, EMCC] + args + libs, stdout=PIPE, stderr=PIPE).communicate()
out_js = os.path.join(self.get_dir(), 'a2.out.js')
assert os.path.exists(out_js), '\n'.join(output)
self.assertContained('result: 42', run_js(out_js))
def test_redundant_link(self):
lib = "int mult() { return 1; }"
lib_name = os.path.join(self.get_dir(), 'libA.c')
open(lib_name, 'w').write(lib)
main = r'''
#include <stdio.h>
int mult();
int main() {
printf("result: %d\n", mult());
return 0;
}
'''
main_name = os.path.join(self.get_dir(), 'main.c')
open(main_name, 'w').write(main)
Building.emcc(lib_name, output_filename='libA.so')
Building.emcc(main_name, ['libA.so']*2, output_filename='a.out.js')
self.assertContained('result: 1', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_export_all(self):
lib = r'''
#include <stdio.h>
void libf1() { printf("libf1\n"); }
void libf2() { printf("libf2\n"); }
'''
lib_name = os.path.join(self.get_dir(), 'lib.c')
open(lib_name, 'w').write(lib)
open('main.js', 'w').write('''
_libf1();
_libf2();
''')
Building.emcc(lib_name, ['-s', 'EXPORT_ALL=1', '--post-js', 'main.js'], output_filename='a.out.js')
self.assertContained('libf1\nlibf2\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_stdin(self):
Building.emcc(path_from_root('tests', 'module', 'test_stdin.c'), output_filename='a.out.js')
open('in.txt', 'w').write('abcdef\nghijkl')
for engine in JS_ENGINES:
print >> sys.stderr, engine
if engine == NODE_JS: continue # FIXME
if engine == V8_ENGINE: continue # no stdin support in v8 shell
self.assertContained('abcdef\nghijkl\neof', run_js(os.path.join(self.get_dir(), 'a.out.js'), engine=engine, stdin=open('in.txt')))
def test_ungetc_fscanf(self):
open('main.cpp', 'w').write(r'''
#include <stdio.h>
int main(int argc, char const *argv[])
{
char str[4] = {0};
FILE* f = fopen("my_test.input", "r");
if (f == NULL) {
printf("cannot open file\n");
return -1;
}
ungetc('x', f);
ungetc('y', f);
ungetc('z', f);
fscanf(f, "%3s", str);
printf("%s\n", str);
return 0;
}
''')
open('my_test.input', 'w').write('abc')
Building.emcc('main.cpp', ['--embed-file', 'my_test.input'], output_filename='a.out.js')
self.assertContained('zyx', Popen(listify(JS_ENGINES[0]) + ['a.out.js'], stdout=PIPE, stderr=PIPE).communicate()[0])
def test_abspaths(self):
# Includes with absolute paths are generally dangerous, things like -I/usr/.. will get to system local headers, not our portable ones.
shutil.copyfile(path_from_root('tests', 'hello_world.c'), 'main.c')
for args, expected in [(['-I/usr/something'], True),
(['-L/usr/something'], True),
(['-I/usr/something', '-Wno-warn-absolute-paths'], False),
(['-L/usr/something', '-Wno-warn-absolute-paths'], False),
(['-Isubdir/something'], False),
(['-Lsubdir/something'], False),
([], False)]:
err = Popen([PYTHON, EMCC, 'main.c'] + args, stderr=PIPE).communicate()[1]
assert ('encountered. If this is to a local system header/library, it may cause problems (local system files make sense for compiling natively on your system, but not necessarily to JavaScript)' in err) == expected, err
def test_local_link(self):
# Linking a local library directly, like /usr/lib/libsomething.so, cannot work of course since it
# doesn't contain bitcode. However, when we see that we should look for a bitcode file for that
# library in the -L paths and system/lib
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write('''
extern void printey();
int main() {
printey();
return 0;
}
''')
try:
os.makedirs(os.path.join(self.get_dir(), 'subdir'));
except:
pass
open(os.path.join(self.get_dir(), 'subdir', 'libfile.so'), 'w').write('this is not llvm bitcode!')
open(os.path.join(self.get_dir(), 'libfile.cpp'), 'w').write('''
#include <stdio.h>
void printey() {
printf("hello from lib\\n");
}
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'libfile.cpp'), '-o', 'libfile.so']).communicate()
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), os.path.join(self.get_dir(), 'subdir', 'libfile.so'), '-L.'], stderr=PIPE).communicate()
self.assertContained('hello from lib', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_runtimelink_multi(self):
return self.skip('BUILD_AS_SHARED_LIB=2 is deprecated')
if Settings.ASM_JS: return self.skip('asm does not support runtime linking yet')
if SPIDERMONKEY_ENGINE not in JS_ENGINES: return self.skip('cannot run without spidermonkey due to node limitations')
open('testa.h', 'w').write(r'''
#ifndef _TESTA_H_
#define _TESTA_H_
class TestA {
public:
TestA();
};
#endif
''')
open('testb.h', 'w').write(r'''
#ifndef _TESTB_H_
#define _TESTB_H_
class TestB {
public:
TestB();
};
#endif
''')
open('testa.cpp', 'w').write(r'''
#include <stdio.h>
#include <testa.h>
TestA::TestA() {
printf("TestA\n");
}
''')
open('testb.cpp', 'w').write(r'''
#include <stdio.h>
#include <testb.h>
#include <testa.h>
/*
*/
TestB::TestB() {
printf("TestB\n");
TestA* testa = new TestA();
}
''')
open('main.cpp', 'w').write(r'''
#include <stdio.h>
#include <testa.h>
#include <testb.h>
/*
*/
int main(int argc, char** argv) {
printf("Main\n");
TestA* testa = new TestA();
TestB* testb = new TestB();
}
''')
Popen([PYTHON, EMCC, 'testa.cpp', '-o', 'liba.js', '-s', 'BUILD_AS_SHARED_LIB=2', '-s', 'LINKABLE=1', '-s', 'NAMED_GLOBALS=1', '-I.']).communicate()
Popen([PYTHON, EMCC, 'testb.cpp', '-o', 'libb.js', '-s', 'BUILD_AS_SHARED_LIB=2', '-s', 'LINKABLE=1', '-s', 'NAMED_GLOBALS=1', '-I.']).communicate()
Popen([PYTHON, EMCC, 'main.cpp', '-o', 'main.js', '-s', 'RUNTIME_LINKED_LIBS=["liba.js", "libb.js"]', '-s', 'NAMED_GLOBALS=1', '-I.', '-s', 'LINKABLE=1']).communicate()
Popen([PYTHON, EMCC, 'main.cpp', 'testa.cpp', 'testb.cpp', '-o', 'full.js', '-I.']).communicate()
self.assertContained('TestA\nTestB\nTestA\n', run_js('main.js', engine=SPIDERMONKEY_ENGINE))
def test_js_libraries(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write('''
#include <stdio.h>
extern "C" {
extern void printey();
extern int calcey(int x, int y);
}
int main() {
printey();
printf("*%d*\\n", calcey(10, 22));
return 0;
}
''')
open(os.path.join(self.get_dir(), 'mylib1.js'), 'w').write('''
mergeInto(LibraryManager.library, {
printey: function() {
Module.print('hello from lib!');
}
});
''')
open(os.path.join(self.get_dir(), 'mylib2.js'), 'w').write('''
mergeInto(LibraryManager.library, {
calcey: function(x, y) {
return x + y;
}
});
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--js-library', os.path.join(self.get_dir(), 'mylib1.js'),
'--js-library', os.path.join(self.get_dir(), 'mylib2.js')]).communicate()
self.assertContained('hello from lib!\n*32*\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_identical_basenames(self):
# Issue 287: files in different dirs but with the same basename get confused as the same,
# causing multiply defined symbol errors
try:
os.makedirs(os.path.join(self.get_dir(), 'foo'));
except:
pass
try:
os.makedirs(os.path.join(self.get_dir(), 'bar'));
except:
pass
open(os.path.join(self.get_dir(), 'foo', 'main.cpp'), 'w').write('''
extern void printey();
int main() {
printey();
return 0;
}
''')
open(os.path.join(self.get_dir(), 'bar', 'main.cpp'), 'w').write('''
#include<stdio.h>
void printey() { printf("hello there\\n"); }
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'foo', 'main.cpp'), os.path.join(self.get_dir(), 'bar', 'main.cpp')]).communicate()
self.assertContained('hello there', run_js(os.path.join(self.get_dir(), 'a.out.js')))
# ditto with first creating .o files
try_delete(os.path.join(self.get_dir(), 'a.out.js'))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'foo', 'main.cpp'), '-o', os.path.join(self.get_dir(), 'foo', 'main.o')]).communicate()
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'bar', 'main.cpp'), '-o', os.path.join(self.get_dir(), 'bar', 'main.o')]).communicate()
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'foo', 'main.o'), os.path.join(self.get_dir(), 'bar', 'main.o')]).communicate()
self.assertContained('hello there', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_main_a(self):
# if main() is in a .a, we need to pull in that .a
main_name = os.path.join(self.get_dir(), 'main.c')
open(main_name, 'w').write(r'''
#include <stdio.h>
extern int f();
int main() {
printf("result: %d.\n", f());
return 0;
}
''')
other_name = os.path.join(self.get_dir(), 'other.c')
open(other_name, 'w').write(r'''
#include <stdio.h>
int f() { return 12346; }
''')
Popen([PYTHON, EMCC, main_name, '-c', '-o', main_name+'.bc']).communicate()
Popen([PYTHON, EMCC, other_name, '-c', '-o', other_name+'.bc']).communicate()
Popen([PYTHON, EMAR, 'cr', main_name+'.a', main_name+'.bc']).communicate()
Popen([PYTHON, EMCC, other_name+'.bc', main_name+'.a']).communicate()
self.assertContained('result: 12346.', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_dup_o_in_a(self):
open('common.c', 'w').write(r'''
#include <stdio.h>
void a(void) {
printf("a\n");
}
''')
Popen([PYTHON, EMCC, 'common.c', '-c', '-o', 'common.o']).communicate()
Popen([PYTHON, EMAR, 'rc', 'liba.a', 'common.o']).communicate()
open('common.c', 'w').write(r'''
#include <stdio.h>
void b(void) {
printf("b\n");
}
''')
Popen([PYTHON, EMCC, 'common.c', '-c', '-o', 'common.o']).communicate()
Popen([PYTHON, EMAR, 'rc', 'libb.a', 'common.o']).communicate()
open('main.c', 'w').write(r'''
void a(void);
void b(void);
int main() {
a();
b();
}
''')
Popen([PYTHON, EMCC, 'main.c', '-L.', '-la', '-lb']).communicate()
self.assertContained('a\nb\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_export_in_a(self):
export_name = 'this_is_an_entry_point'
open('export.c', 'w').write(r'''
#include <stdio.h>
void %s(void) {
printf("Hello, world!\n");
}
''' % export_name)
Popen([PYTHON, EMCC, 'export.c', '-c', '-o', 'export.o']).communicate()
Popen([PYTHON, EMAR, 'rc', 'libexport.a', 'export.o']).communicate()
open('main.c', 'w').write(r'''
int main() {
return 0;
}
''')
definition = 'function _%s(' % export_name
# Sanity check: the symbol should not be linked in if not requested.
Popen([PYTHON, EMCC, 'main.c', '-L.', '-lexport']).communicate()
self.assertNotContained(definition, open(os.path.join(self.get_dir(), 'a.out.js')).read())
# Sanity check: exporting without a definition does not cause it to appear.
# Note: exporting main prevents emcc from warning that it generated no code.
Popen([PYTHON, EMCC, 'main.c', '-s', '''EXPORTED_FUNCTIONS=['_main', '_%s']''' % export_name]).communicate()
self.assertNotContained(definition, open(os.path.join(self.get_dir(), 'a.out.js')).read())
# Actual test: defining symbol in library and exporting it causes it to appear in the output.
Popen([PYTHON, EMCC, 'main.c', '-L.', '-lexport', '-s', '''EXPORTED_FUNCTIONS=['_%s']''' % export_name]).communicate()
self.assertContained(definition, open(os.path.join(self.get_dir(), 'a.out.js')).read())
def test_embed_file(self):
open(os.path.join(self.get_dir(), 'somefile.txt'), 'w').write('''hello from a file with lots of data and stuff in it thank you very much''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(r'''
#include <stdio.h>
int main() {
FILE *f = fopen("somefile.txt", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%s|\n", buf);
return 0;
}
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--embed-file', 'somefile.txt']).communicate()
self.assertContained('|hello from a file wi|', run_js(os.path.join(self.get_dir(), 'a.out.js')))
# preload twice, should not err
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--embed-file', 'somefile.txt', '--embed-file', 'somefile.txt']).communicate()
self.assertContained('|hello from a file wi|', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_embed_file_dup(self):
try_delete(os.path.join(self.get_dir(), 'tst'))
os.mkdir(os.path.join(self.get_dir(), 'tst'))
os.mkdir(os.path.join(self.get_dir(), 'tst', 'test1'))
os.mkdir(os.path.join(self.get_dir(), 'tst', 'test2'))
open(os.path.join(self.get_dir(), 'tst', 'aa.txt'), 'w').write('''frist''')
open(os.path.join(self.get_dir(), 'tst', 'test1', 'aa.txt'), 'w').write('''sacond''')
open(os.path.join(self.get_dir(), 'tst', 'test2', 'aa.txt'), 'w').write('''thard''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(r'''
#include <stdio.h>
#include <string.h>
void print_file(const char *name) {
FILE *f = fopen(name, "r");
char buf[100];
memset(buf, 0, 100);
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%s|\n", buf);
}
int main() {
print_file("tst/aa.txt");
print_file("tst/test1/aa.txt");
print_file("tst/test2/aa.txt");
return 0;
}
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--embed-file', 'tst']).communicate()
self.assertContained('|frist|\n|sacond|\n|thard|\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_exclude_file(self):
try_delete(os.path.join(self.get_dir(), 'tst'))
os.mkdir(os.path.join(self.get_dir(), 'tst'))
os.mkdir(os.path.join(self.get_dir(), 'tst', 'abc.exe'))
os.mkdir(os.path.join(self.get_dir(), 'tst', 'abc.txt'))
open(os.path.join(self.get_dir(), 'tst', 'hello.exe'), 'w').write('''hello''')
open(os.path.join(self.get_dir(), 'tst', 'hello.txt'), 'w').write('''world''')
open(os.path.join(self.get_dir(), 'tst', 'abc.exe', 'foo'), 'w').write('''emscripten''')
open(os.path.join(self.get_dir(), 'tst', 'abc.txt', 'bar'), 'w').write('''!!!''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(r'''
#include <stdio.h>
int main() {
if(fopen("tst/hello.exe", "rb")) printf("Failed\n");
if(!fopen("tst/hello.txt", "rb")) printf("Failed\n");
if(fopen("tst/abc.exe/foo", "rb")) printf("Failed\n");
if(!fopen("tst/abc.txt/bar", "rb")) printf("Failed\n");
return 0;
}
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--embed-file', 'tst', '--exclude-file', '*.exe']).communicate()
output = run_js(os.path.join(self.get_dir(), 'a.out.js'))
assert output == ''
def test_multidynamic_link(self):
# Linking the same dynamic library in statically will error, normally, since we statically link it, causing dupe symbols
def test(link_cmd, lib_suffix=''):
print link_cmd, lib_suffix
self.clear()
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(r'''
#include <stdio.h>
extern void printey();
extern void printother();
int main() {
printf("*");
printey();
printf("\n");
printother();
printf("\n");
printf("*");
return 0;
}
''')
try:
os.makedirs(os.path.join(self.get_dir(), 'libdir'));
except:
pass
open(os.path.join(self.get_dir(), 'libdir', 'libfile.cpp'), 'w').write('''
#include <stdio.h>
void printey() {
printf("hello from lib");
}
''')
open(os.path.join(self.get_dir(), 'libdir', 'libother.cpp'), 'w').write('''
#include <stdio.h>
extern void printey();
void printother() {
printf("|");
printey();
printf("|");
}
''')
compiler = [PYTHON, EMCC]
# Build libfile normally into an .so
Popen(compiler + [os.path.join(self.get_dir(), 'libdir', 'libfile.cpp'), '-o', os.path.join(self.get_dir(), 'libdir', 'libfile.so' + lib_suffix)]).communicate()
# Build libother and dynamically link it to libfile
Popen(compiler + [os.path.join(self.get_dir(), 'libdir', 'libother.cpp')] + link_cmd + ['-o', os.path.join(self.get_dir(), 'libdir', 'libother.so')]).communicate()
# Build the main file, linking in both the libs
Popen(compiler + [os.path.join(self.get_dir(), 'main.cpp')] + link_cmd + ['-lother', '-c']).communicate()
print '...'
# The normal build system is over. We need to do an additional step to link in the dynamic libraries, since we ignored them before
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.o')] + link_cmd + ['-lother']).communicate()
self.assertContained('*hello from lib\n|hello from lib|\n*', run_js(os.path.join(self.get_dir(), 'a.out.js')))
test(['-L' + os.path.join(self.get_dir(), 'libdir'), '-lfile']) # -l, auto detection from library path
test(['-L' + os.path.join(self.get_dir(), 'libdir'), os.path.join(self.get_dir(), 'libdir', 'libfile.so.3.1.4.1.5.9')], '.3.1.4.1.5.9') # handle libX.so.1.2.3 as well
def test_js_link(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write('''
#include <stdio.h>
int main() {
printf("hello from main\\n");
return 0;
}
''')
open(os.path.join(self.get_dir(), 'before.js'), 'w').write('''
var MESSAGE = 'hello from js';
if (typeof Module != 'undefined') throw 'This code should run before anything else!';
''')
open(os.path.join(self.get_dir(), 'after.js'), 'w').write('''
Module.print(MESSAGE);
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'before.js', '--post-js', 'after.js']).communicate()
self.assertContained('hello from main\nhello from js\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_sdl_endianness(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(r'''
#include <stdio.h>
#include <SDL/SDL.h>
int main() {
printf("%d, %d, %d\n", SDL_BYTEORDER, SDL_LIL_ENDIAN, SDL_BIG_ENDIAN);
return 0;
}
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp')]).communicate()
self.assertContained('1234, 1234, 4321\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_link_memcpy(self):
# memcpy can show up *after* optimizations, so after our opportunity to link in libc, so it must be special-cased
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(r'''
#include <stdio.h>
int main(int argc, char **argv) {
int num = argc + 10;
char buf[num], buf2[num];
for (int i = 0; i < num; i++) {
buf[i] = i*i+i/3;
}
for (int i = 1; i < num; i++) {
buf[i] += buf[i-1];
}
for (int i = 0; i < num; i++) {
buf2[i] = buf[i];
}
for (int i = 1; i < num; i++) {
buf2[i] += buf2[i-1];
}
for (int i = 0; i < num; i++) {
printf("%d:%d\n", i, buf2[i]);
}
return 0;
}
''')
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'main.cpp')]).communicate()
output = run_js(os.path.join(self.get_dir(), 'a.out.js'), full_output=True, stderr=PIPE)
self.assertContained('''0:0
1:1
2:6
3:21
4:53
5:111
6:-49
7:98
8:55
9:96
10:-16
''', output)
self.assertNotContained('warning: library.js memcpy should not be running, it is only for testing!', output)
def test_warn_undefined(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(r'''
#include <stdio.h>
#include <SDL.h>
#include "SDL/SDL_opengl.h"
extern "C" {
void something();
void elsey();
}
int main() {
printf("%p", SDL_GL_GetProcAddress("glGenTextures")); // pull in gl proc stuff, avoid warnings on emulation funcs
something();
elsey();
return 0;
}
''')
def clear(): try_delete('a.out.js')
for args in [[], ['-O2']]:
for action in ['WARN', 'ERROR', None]:
for value in ([0, 1] if action else [0]):
clear()
print 'warn', args, action, value
extra = ['-s', action + '_ON_UNDEFINED_SYMBOLS=%d' % value] if action else []
output = Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp')] + extra + args, stderr=PIPE).communicate()
if action == None or (action == 'WARN' and value):
self.assertContained('unresolved symbol: something', output[1])
self.assertContained('unresolved symbol: elsey', output[1])
assert os.path.exists('a.out.js')
self.assertNotContained('unresolved symbol: emscripten_', output[1])
elif action == 'ERROR' and value:
self.assertContained('unresolved symbol: something', output[1])
self.assertContained('unresolved symbol: elsey', output[1])
self.assertNotContained('warning', output[1])
assert not os.path.exists('a.out.js')
elif action == 'WARN' and not value:
self.assertNotContained('unresolved symbol', output[1])
assert os.path.exists('a.out.js')
def test_toobig(self):
# very large [N x i8], we should not oom in the compiler
self.clear()
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(r'''
#include <stdio.h>
#define BYTES 100*1024*1024
int main(int argc, char **argv) {
if (argc == 100) {
static char buf[BYTES];
static char buf2[BYTES];
for (int i = 0; i < BYTES; i++) {
buf[i] = i*i;
buf2[i] = i/3;
}
for (int i = 0; i < BYTES; i++) {
buf[i] = buf2[i/2];
buf2[i] = buf[i/3];
}
printf("%d\n", buf[10] + buf2[20]);
}
return 0;
}
''')
output = Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp')], stderr=PIPE).communicate()[1]
print output
assert os.path.exists('a.out.js')
def test_prepost(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write('''
#include <stdio.h>
int main() {
printf("hello from main\\n");
return 0;
}
''')
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var Module = {
preRun: function() { Module.print('pre-run') },
postRun: function() { Module.print('post-run') }
};
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'pre.js']).communicate()
self.assertContained('pre-run\nhello from main\npost-run\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
# never run, so no preRun or postRun
src = open(os.path.join(self.get_dir(), 'a.out.js')).read().replace('// {{PRE_RUN_ADDITIONS}}', 'addRunDependency()')
open(os.path.join(self.get_dir(), 'a.out.js'), 'w').write(src)
self.assertNotContained('pre-run\nhello from main\npost-run\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
# noInitialRun prevents run
for no_initial_run, run_dep in [(0, 0), (1, 0), (0, 1)]:
print no_initial_run, run_dep
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp')]).communicate()
src = 'var Module = { noInitialRun: %d };\n' % no_initial_run + open(os.path.join(self.get_dir(), 'a.out.js')).read()
if run_dep:
src = src.replace('// {{PRE_RUN_ADDITIONS}}', '// {{PRE_RUN_ADDITIONS}}\naddRunDependency("test");') \
.replace('// {{POST_RUN_ADDITIONS}}', '// {{POST_RUN_ADDITIONS}}\nremoveRunDependency("test");')
open(os.path.join(self.get_dir(), 'a.out.js'), 'w').write(src)
assert ('hello from main' in run_js(os.path.join(self.get_dir(), 'a.out.js'))) != no_initial_run, 'only run if no noInitialRun'
if no_initial_run:
# Calling main later should still work, filesystem etc. must be set up.
print 'call main later'
src = open(os.path.join(self.get_dir(), 'a.out.js')).read() + '\nModule.callMain();\n';
open(os.path.join(self.get_dir(), 'a.out.js'), 'w').write(src)
assert 'hello from main' in run_js(os.path.join(self.get_dir(), 'a.out.js')), 'main should print when called manually'
# Use postInit
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var Module = {
preRun: function() { Module.print('pre-run') },
postRun: function() { Module.print('post-run') },
preInit: function() { Module.print('pre-init') }
};
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'pre.js']).communicate()
self.assertContained('pre-init\npre-run\nhello from main\npost-run\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_prepost2(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write('''
#include <stdio.h>
int main() {
printf("hello from main\\n");
return 0;
}
''')
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var Module = {
preRun: function() { Module.print('pre-run') },
};
''')
open(os.path.join(self.get_dir(), 'pre2.js'), 'w').write('''
Module.postRun = function() { Module.print('post-run') };
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'pre.js', '--pre-js', 'pre2.js']).communicate()
self.assertContained('pre-run\nhello from main\npost-run\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_prepre(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write('''
#include <stdio.h>
int main() {
printf("hello from main\\n");
return 0;
}
''')
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var Module = {
preRun: [function() { Module.print('pre-run') }],
};
''')
open(os.path.join(self.get_dir(), 'pre2.js'), 'w').write('''
Module.preRun.push(function() { Module.print('prepre') });
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'pre.js', '--pre-js', 'pre2.js']).communicate()
self.assertContained('prepre\npre-run\nhello from main\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_save_bc(self):
for save in [0, 1]:
self.clear()
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_loop_malloc.cpp')] + ([] if not save else ['--save-bc', self.in_dir('my_bitcode.bc')])).communicate()
assert 'hello, world!' in run_js(self.in_dir('a.out.js'))
assert os.path.exists(self.in_dir('my_bitcode.bc')) == save
if save:
try_delete('a.out.js')
Building.llvm_dis(self.in_dir('my_bitcode.bc'), self.in_dir('my_ll.ll'))
try:
os.environ['EMCC_LEAVE_INPUTS_RAW'] = '1'
Popen([PYTHON, EMCC, 'my_ll.ll', '-o', 'two.js']).communicate()
assert 'hello, world!' in run_js(self.in_dir('two.js'))
finally:
del os.environ['EMCC_LEAVE_INPUTS_RAW']
def test_fix_closure(self):
input = path_from_root('tests', 'test-fix-closure.js')
expected = path_from_root('tests', 'test-fix-closure.out.js')
Popen([PYTHON, path_from_root('tools', 'fix_closure.py'), input, 'out.js']).communicate(input)
output = open('out.js').read()
assert '0,zzz_Q_39fa,0' in output
assert 'function(a,c)' not in output # should be uninlined, so it gets a name
assert run_js(input) == run_js('out.js')
def test_js_optimizer(self):
for input, expected, passes in [
(path_from_root('tools', 'test-js-optimizer.js'), open(path_from_root('tools', 'test-js-optimizer-output.js')).read(),
['hoistMultiples', 'removeAssignsToUndefined', 'simplifyExpressions']),
(path_from_root('tools', 'test-js-optimizer-si.js'), open(path_from_root('tools', 'test-js-optimizer-si-output.js')).read(),
['simplifyIfs']),
(path_from_root('tools', 'test-js-optimizer-regs.js'), open(path_from_root('tools', 'test-js-optimizer-regs-output.js')).read(),
['registerize']),
(path_from_root('tools', 'eliminator', 'eliminator-test.js'), open(path_from_root('tools', 'eliminator', 'eliminator-test-output.js')).read(),
['eliminate']),
(path_from_root('tools', 'eliminator', 'safe-eliminator-test.js'), open(path_from_root('tools', 'eliminator', 'safe-eliminator-test-output.js')).read(),
['eliminateMemSafe']),
(path_from_root('tools', 'eliminator', 'asm-eliminator-test.js'), open(path_from_root('tools', 'eliminator', 'asm-eliminator-test-output.js')).read(),
['asm', 'eliminate']),
(path_from_root('tools', 'test-js-optimizer-asm-regs.js'), open(path_from_root('tools', 'test-js-optimizer-asm-regs-output.js')).read(),
['asm', 'registerize']),
(path_from_root('tools', 'test-js-optimizer-asm-regs-harder.js'), open(path_from_root('tools', 'test-js-optimizer-asm-regs-harder-output.js')).read(),
['asm', 'registerizeHarder']),
(path_from_root('tools', 'test-js-optimizer-asm-regs-min.js'), open(path_from_root('tools', 'test-js-optimizer-asm-regs-min-output.js')).read(),
['asm', 'registerize', 'minifyLocals']),
(path_from_root('tools', 'test-js-optimizer-asm-pre.js'), open(path_from_root('tools', 'test-js-optimizer-asm-pre-output.js')).read(),
['asm', 'simplifyExpressions']),
(path_from_root('tools', 'test-js-optimizer-asm-pre-f32.js'), open(path_from_root('tools', 'test-js-optimizer-asm-pre-output-f32.js')).read(),
['asm', 'asmPreciseF32', 'simplifyExpressions', 'optimizeFrounds']),
(path_from_root('tools', 'test-js-optimizer-asm-last.js'), open(path_from_root('tools', 'test-js-optimizer-asm-last-output.js')).read(),
['asm', 'last']),
(path_from_root('tools', 'test-js-optimizer-asm-relocate.js'), open(path_from_root('tools', 'test-js-optimizer-asm-relocate-output.js')).read(),
['asm', 'relocate']),
(path_from_root('tools', 'test-js-optimizer-asm-outline1.js'), open(path_from_root('tools', 'test-js-optimizer-asm-outline1-output.js')).read(),
['asm', 'outline']),
(path_from_root('tools', 'test-js-optimizer-asm-outline2.js'), open(path_from_root('tools', 'test-js-optimizer-asm-outline2-output.js')).read(),
['asm', 'outline']),
(path_from_root('tools', 'test-js-optimizer-asm-outline3.js'), open(path_from_root('tools', 'test-js-optimizer-asm-outline3-output.js')).read(),
['asm', 'outline']),
(path_from_root('tools', 'test-js-optimizer-asm-minlast.js'), open(path_from_root('tools', 'test-js-optimizer-asm-minlast-output.js')).read(),
['asm', 'minifyWhitespace', 'last']),
(path_from_root('tools', 'test-js-optimizer-shiftsAggressive.js'), open(path_from_root('tools', 'test-js-optimizer-shiftsAggressive-output.js')).read(),
['asm', 'aggressiveVariableElimination']),
]:
print input
output = Popen(listify(NODE_JS) + [path_from_root('tools', 'js-optimizer.js'), input] + passes, stdin=PIPE, stdout=PIPE).communicate()[0]
self.assertIdentical(expected, output.replace('\r\n', '\n').replace('\n\n', '\n'))
def test_m_mm(self):
open(os.path.join(self.get_dir(), 'foo.c'), 'w').write('''#include <emscripten.h>''')
for opt in ['M', 'MM']:
output, err = Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'foo.c'), '-' + opt], stdout=PIPE, stderr=PIPE).communicate()
assert 'foo.o: ' in output, '-%s failed to produce the right output: %s' % (opt, output)
assert 'error' not in err, 'Unexpected stderr: ' + err
def test_chunking(self):
def nonfc():
if os.environ.get('EMCC_FAST_COMPILER') != '0': return self.skip('not relevant for fastcomp, only checks js compiler chunking')
if os.environ.get('EMCC_DEBUG'): return self.skip('cannot run in debug mode')
if os.environ.get('EMCC_CORES'): return self.skip('cannot run if cores are altered')
if multiprocessing.cpu_count() < 2: return self.skip('need multiple cores')
try:
os.environ['EMCC_DEBUG'] = '1'
os.environ['EMCC_CORES'] = '2' # standardize over machines
for asm, linkable, chunks in [
(0, 0, 2), (0, 1, 2),
(1, 0, 2), (1, 1, 2)
]:
print asm, linkable, chunks
output, err = Popen([PYTHON, EMCC, path_from_root('tests', 'hello_libcxx.cpp'), '-O1', '-s', 'LINKABLE=%d' % linkable, '-s', 'ASM_JS=%d' % asm] + (['-O2'] if asm else []), stdout=PIPE, stderr=PIPE).communicate()
ok = False
for c in range(chunks, chunks+2):
ok = ok or ('phase 2 working on %d chunks' % c in err)
assert ok, err
finally:
del os.environ['EMCC_DEBUG']
del os.environ['EMCC_CORES']
nonfastcomp(nonfc)
def test_debuginfo(self):
if os.environ.get('EMCC_DEBUG'): return self.skip('cannot run in debug mode')
try:
os.environ['EMCC_DEBUG'] = '1'
# llvm debug info is kept only when we can see it, which is without the js optimize, -O0. js debug info is lost by registerize in -O2, so - g disables it
for args, expect_llvm, expect_js in [
(['-O0'], True, True),
(['-O0', '-g'], True, True),
(['-O1'], False, True),
(['-O1', '-g'], False, True),
(['-O2'], False, False),
(['-O2', '-g'], False, True),
]:
print args, expect_llvm, expect_js
output, err = Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world.cpp')] + args, stdout=PIPE, stderr=PIPE).communicate()
assert expect_llvm == ('strip-debug' not in err)
assert expect_js == ('registerize' not in err)
finally:
del os.environ['EMCC_DEBUG']
def test_scons(self): # also incidentally tests c++11 integration in llvm 3.1
try_delete(os.path.join(self.get_dir(), 'test'))
shutil.copytree(path_from_root('tests', 'scons'), os.path.join(self.get_dir(), 'test'))
shutil.copytree(path_from_root('tools', 'scons', 'site_scons'), os.path.join(self.get_dir(), 'test', 'site_scons'))
os.chdir(os.path.join(self.get_dir(), 'test'))
Popen(['scons']).communicate()
output = run_js('scons_integration.js')
assert 'If you see this - the world is all right!' in output
def test_embind(self):
for args, fail in [
([], True), # without --bind, we fail
(['--bind'], False),
(['--bind', '-O1'], False),
(['--bind', '-O2'], False),
]:
print args, fail
self.clear()
try_delete(self.in_dir('a.out.js'))
Popen([PYTHON, EMCC, path_from_root('tests', 'embind', 'embind_test.cpp'), '--post-js', path_from_root('tests', 'embind', 'underscore-1.4.2.js'), '--post-js', path_from_root('tests', 'embind', 'imvu_test_adapter.js'), '--post-js', path_from_root('tests', 'embind', 'embind.test.js')] + args, stderr=PIPE if fail else None).communicate()
assert os.path.exists(self.in_dir('a.out.js')) == (not fail)
if not fail:
output = run_js(self.in_dir('a.out.js'), stdout=PIPE, stderr=PIPE, full_output=True, assert_returncode=0)
assert "FAIL" not in output, output
def test_llvm_nativizer(self):
try:
Popen(['as', '--version'], stdout=PIPE, stderr=PIPE).communicate()
except:
return self.skip('no gnu as, cannot run nativizer')
# avoid impure_ptr problems etc.
shutil.copyfile(path_from_root('tests', 'files.cpp'), os.path.join(self.get_dir(), 'files.cpp'))
open(os.path.join(self.get_dir(), 'somefile.binary'), 'w').write('''waka waka############################''')
open(os.path.join(self.get_dir(), 'test.file'), 'w').write('''ay file..............,,,,,,,,,,,,,,''')
open(os.path.join(self.get_dir(), 'stdin'), 'w').write('''inter-active''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'files.cpp'), '-c']).communicate()
Popen([PYTHON, path_from_root('tools', 'nativize_llvm.py'), os.path.join(self.get_dir(), 'files.o')], stdout=PIPE, stderr=PIPE).communicate(input)
output = Popen([os.path.join(self.get_dir(), 'files.o.run')], stdin=open(os.path.join(self.get_dir(), 'stdin')), stdout=PIPE, stderr=PIPE).communicate()
self.assertContained('''size: 37
data: 119,97,107,97,32,119,97,107,97,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35
loop: 119 97 107 97 32 119 97 107 97 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35
input:inter-active
texto
$
5 : 10,30,20,11,88
other=ay file...
seeked= file.
''', output[0])
self.assertContained('texte\n', output[1])
def test_emconfig(self):
output = Popen([PYTHON, EMCONFIG, 'LLVM_ROOT'], stdout=PIPE, stderr=PIPE).communicate()[0].strip()
try:
assert output == LLVM_ROOT
except:
print >> sys.stderr, 'Assertion failed: python %s LLVM_ROOT returned "%s" instead of expected "%s"!' % (EMCONFIG, output, LLVM_ROOT)
raise
invalid = 'Usage: em-config VAR_NAME'
# Don't accept variables that do not exist
output = Popen([PYTHON, EMCONFIG, 'VAR_WHICH_DOES_NOT_EXIST'], stdout=PIPE, stderr=PIPE).communicate()[0].strip()
assert output == invalid
# Don't accept no arguments
output = Popen([PYTHON, EMCONFIG], stdout=PIPE, stderr=PIPE).communicate()[0].strip()
assert output == invalid
# Don't accept more than one variable
output = Popen([PYTHON, EMCONFIG, 'LLVM_ROOT', 'EMCC'], stdout=PIPE, stderr=PIPE).communicate()[0].strip()
assert output == invalid
# Don't accept arbitrary python code
output = Popen([PYTHON, EMCONFIG, 'sys.argv[1]'], stdout=PIPE, stderr=PIPE).communicate()[0].strip()
assert output == invalid
def test_link_s(self):
# -s OPT=VALUE can conflict with -s as a linker option. We warn and ignore
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(r'''
extern "C" {
void something();
}
int main() {
something();
return 0;
}
''')
open(os.path.join(self.get_dir(), 'supp.cpp'), 'w').write(r'''
#include <stdio.h>
extern "C" {
void something() {
printf("yello\n");
}
}
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '-o', 'main.o']).communicate()
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'supp.cpp'), '-o', 'supp.o']).communicate()
output = Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.o'), '-s', os.path.join(self.get_dir(), 'supp.o'), '-s', 'SAFE_HEAP=1'], stderr=PIPE).communicate()
self.assertContained('treating -s as linker option', output[1])
output = run_js('a.out.js')
assert 'yello' in output, 'code works'
code = open('a.out.js').read()
assert 'SAFE_HEAP' in code, 'valid -s option had an effect'
def test_conftest_s_flag_passing(self):
open(os.path.join(self.get_dir(), 'conftest.c'), 'w').write(r'''
int main() {
return 0;
}
''')
os.environ["EMMAKEN_JUST_CONFIGURE"] = "1"
cmd = [PYTHON, EMCC, '-s', 'ASSERTIONS=1', os.path.join(self.get_dir(), 'conftest.c'), '-o', 'conftest']
output = Popen(cmd, stderr=PIPE).communicate()
del os.environ["EMMAKEN_JUST_CONFIGURE"]
self.assertNotContained('emcc: warning: treating -s as linker option', output[1])
assert os.path.exists('conftest')
def test_file_packager(self):
try:
os.mkdir('subdir')
except:
pass
open('data1.txt', 'w').write('data1')
os.chdir('subdir')
open('data2.txt', 'w').write('data2')
# relative path to below the current dir is invalid
out, err = Popen([PYTHON, FILE_PACKAGER, 'test.data', '--preload', '../data1.txt'], stdout=PIPE, stderr=PIPE).communicate()
assert len(out) == 0
assert 'below the current directory' in err
# relative path that ends up under us is cool
out, err = Popen([PYTHON, FILE_PACKAGER, 'test.data', '--preload', '../subdir/data2.txt'], stdout=PIPE, stderr=PIPE).communicate()
assert len(out) > 0
assert 'below the current directory' not in err
# direct path leads to the same code being generated - relative path does not make us do anything different
out2, err2 = Popen([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data2.txt'], stdout=PIPE, stderr=PIPE).communicate()
assert len(out2) > 0
assert 'below the current directory' not in err2
def clean(txt):
return filter(lambda line: 'PACKAGE_UUID' not in line, txt.split('\n'))
out = clean(out)
out2 = clean(out2)
assert out == out2
# sanity check that we do generate different code for different inputs
out3, err3 = Popen([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data2.txt', '[email protected]'], stdout=PIPE, stderr=PIPE).communicate()
out3 = clean(out3)
assert out != out3
def test_crunch(self):
# crunch should not be run if a .crn exists that is more recent than the .dds
shutil.copyfile(path_from_root('tests', 'ship.dds'), 'ship.dds')
time.sleep(0.1)
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--crunch=32', '--preload', 'ship.dds'], stdout=open('pre.js', 'w')).communicate()
assert os.stat('test.data').st_size < 0.25*os.stat('ship.dds').st_size, 'Compressed should be much smaller than dds'
crunch_time = os.stat('ship.crn').st_mtime
dds_time = os.stat('ship.dds').st_mtime
assert crunch_time > dds_time, 'Crunch is more recent'
# run again, should not recrunch!
time.sleep(0.1)
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--crunch=32', '--preload', 'ship.dds'], stdout=open('pre.js', 'w')).communicate()
assert crunch_time == os.stat('ship.crn').st_mtime, 'Crunch is unchanged'
# update dds, so should recrunch
time.sleep(0.1)
os.utime('ship.dds', None)
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--crunch=32', '--preload', 'ship.dds'], stdout=open('pre.js', 'w')).communicate()
assert crunch_time < os.stat('ship.crn').st_mtime, 'Crunch was changed'
def test_headless(self):
if SPIDERMONKEY_ENGINE not in JS_ENGINES: return self.skip('cannot run without spidermonkey due to node limitations (Uint8ClampedArray etc.)')
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'example.png'))
Popen([PYTHON, EMCC, path_from_root('tests', 'sdl_headless.c'), '-s', 'HEADLESS=1']).communicate()
output = run_js('a.out.js', engine=SPIDERMONKEY_ENGINE, stderr=PIPE)
assert '''Init: 0
Font: 0x1
Sum: 0
you should see two lines of text in different colors and a blue rectangle
SDL_Quit called (and ignored)
done.
''' in output, output
def test_preprocess(self):
self.clear()
out, err = Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world.c'), '-E'], stdout=PIPE).communicate()
assert not os.path.exists('a.out.js')
# Test explicitly that the output contains a line typically written by the preprocessor.
# Clang outputs on Windows lines like "#line 1", on Unix '# 1 '.
# TODO: This is one more of those platform-specific discrepancies, investigate more if this ever becomes an issue,
# ideally we would have emcc output identical data on all platforms.
assert '''#line 1 ''' in out or '''# 1 ''' in out
assert '''hello_world.c"''' in out
assert '''printf("hello, world!''' in out
def test_demangle(self):
open('src.cpp', 'w').write('''
#include <stdio.h>
#include <emscripten.h>
void two(char c) {
EM_ASM(Module.print(stackTrace()));
}
void one(int x) {
two(x % 17);
}
int main() {
EM_ASM(Module.print(demangle('__Znwj'))); // check for no aborts
EM_ASM(Module.print(demangle('_main')));
EM_ASM(Module.print(demangle('__Z2f2v')));
EM_ASM(Module.print(demangle('__Z12abcdabcdabcdi')));
EM_ASM(Module.print(demangle('__ZL12abcdabcdabcdi')));
EM_ASM(Module.print(demangle('__Z4testcsifdPvPiPc')));
EM_ASM(Module.print(demangle('__ZN4test5moarrEcslfdPvPiPc')));
EM_ASM(Module.print(demangle('__ZN4Waka1f12a234123412345pointEv')));
EM_ASM(Module.print(demangle('__Z3FooIiEvv')));
EM_ASM(Module.print(demangle('__Z3FooIidEvi')));
EM_ASM(Module.print(demangle('__ZN3Foo3BarILi5EEEvv')));
EM_ASM(Module.print(demangle('__ZNK10__cxxabiv120__si_class_type_info16search_below_dstEPNS_19__dynamic_cast_infoEPKvib')));
EM_ASM(Module.print(demangle('__Z9parsewordRPKciRi')));
EM_ASM(Module.print(demangle('__Z5multiwahtjmxyz')));
EM_ASM(Module.print(demangle('__Z1aA32_iPA5_c')));
EM_ASM(Module.print(demangle('__ZN21FWakaGLXFleeflsMarfooC2EjjjPKvbjj')));
EM_ASM(Module.print(demangle('__ZN5wakaw2Cm10RasterBaseINS_6watwat9PolocatorEE8merbine1INS4_2OREEEvPKjj'))); // we get this wrong, but at least emit a '?'
one(17);
return 0;
}
''')
Popen([PYTHON, EMCC, 'src.cpp', '-s', 'LINKABLE=1']).communicate()
output = run_js('a.out.js')
self.assertContained('''operator new()
main()
f2()
abcdabcdabcd(int)
abcdabcdabcd(int)
test(char, short, int, float, double, void*, int*, char*)
test::moarr(char, short, long, float, double, void*, int*, char*)
Waka::f::a23412341234::point()
void Foo<int>()
void Foo<int, double>(int)
void Foo::Bar<5>()
__cxxabiv1::__si_class_type_info::search_below_dst(__cxxabiv1::__dynamic_cast_info*, void*, int, bool)
parseword(char*&, int, int&)
multi(wchar_t, signed char, unsigned char, unsigned short, unsigned int, unsigned long, long long, unsigned long long, ...)
a(int [32], char [5]*)
FWakaGLXFleeflsMarfoo::FWakaGLXFleeflsMarfoo(unsigned int, unsigned int, unsigned int, void*, bool, unsigned int, unsigned int)
void wakaw::Cm::RasterBase<wakaw::watwat::Polocator?>(unsigned int*, unsigned int)
''', output)
# test for multiple functions in one stack trace
assert 'one(int)' in output
assert 'two(char)' in output
def test_module_exports_with_closure(self):
# This test checks that module.export is retained when JavaScript is minified by compiling with --closure 1
# This is important as if module.export is not present the Module object will not be visible to node.js
# Run with ./runner.py other.test_module_exports_with_closure
# First make sure test.js isn't present.
try_delete(path_from_root('tests', 'Module-exports', 'test.js'))
assert not os.path.exists(path_from_root('tests', 'Module-exports', 'test.js'))
# compile with -O2 --closure 0
Popen([PYTHON, EMCC, path_from_root('tests', 'Module-exports', 'test.c'), '-o', path_from_root('tests', 'Module-exports', 'test.js'), '-O2', '--closure', '0', '--pre-js', path_from_root('tests', 'Module-exports', 'setup.js'), '-s', 'EXPORTED_FUNCTIONS=["_bufferTest"]'], stdout=PIPE, stderr=PIPE).communicate()
# Check that compilation was successful
assert os.path.exists(path_from_root('tests', 'Module-exports', 'test.js'))
test_js_closure_0 = open(path_from_root('tests', 'Module-exports', 'test.js')).read()
# Check that test.js compiled with --closure 0 contains "module['exports'] = Module;"
assert ("module['exports'] = Module;" in test_js_closure_0) or ('module["exports"]=Module' in test_js_closure_0)
# Check that main.js (which requires test.js) completes successfully when run in node.js
# in order to check that the exports are indeed functioning correctly.
if NODE_JS in JS_ENGINES:
self.assertContained('bufferTest finished', run_js(path_from_root('tests', 'Module-exports', 'main.js'), engine=NODE_JS))
# Delete test.js again and check it's gone.
try_delete(path_from_root('tests', 'Module-exports', 'test.js'))
assert not os.path.exists(path_from_root('tests', 'Module-exports', 'test.js'))
# compile with -O2 --closure 1
Popen([PYTHON, EMCC, path_from_root('tests', 'Module-exports', 'test.c'), '-o', path_from_root('tests', 'Module-exports', 'test.js'), '-O2', '--closure', '1', '--pre-js', path_from_root('tests', 'Module-exports', 'setup.js'), '-s', 'EXPORTED_FUNCTIONS=["_bufferTest"]'], stdout=PIPE, stderr=PIPE).communicate()
# Check that compilation was successful
assert os.path.exists(path_from_root('tests', 'Module-exports', 'test.js'))
test_js_closure_1 = open(path_from_root('tests', 'Module-exports', 'test.js')).read()
# Check that test.js compiled with --closure 1 contains "module.exports", we want to verify that
# "module['exports']" got minified to "module.exports" when compiling with --closure 1
assert "module.exports" in test_js_closure_1
# Check that main.js (which requires test.js) completes successfully when run in node.js
# in order to check that the exports are indeed functioning correctly.
if NODE_JS in JS_ENGINES:
self.assertContained('bufferTest finished', run_js(path_from_root('tests', 'Module-exports', 'main.js'), engine=NODE_JS))
# Tidy up files that might have been created by this test.
try_delete(path_from_root('tests', 'Module-exports', 'test.js'))
try_delete(path_from_root('tests', 'Module-exports', 'test.js.map'))
def test_fs_stream_proto(self):
open('src.cpp', 'wb').write(r'''
#include <stdio.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/stat.h>
#include <errno.h>
#include <string.h>
int main()
{
int file_size = 0;
int h = open("src.cpp", O_RDONLY, 0666);
if (0 != h)
{
FILE* file = fdopen(h, "rb");
if (0 != file)
{
fseek(file, 0, SEEK_END);
file_size = ftell(file);
fseek(file, 0, SEEK_SET);
}
else
{
printf("fdopen() failed: %s\n", strerror(errno));
return 10;
}
close(h);
printf("File size: %d\n", file_size);
}
else
{
printf("open() failed: %s\n", strerror(errno));
return 10;
}
return 0;
}
''')
Popen([PYTHON, EMCC, 'src.cpp', '--embed-file', 'src.cpp']).communicate()
for engine in JS_ENGINES:
out = run_js('a.out.js', engine=engine, stderr=PIPE, full_output=True)
self.assertContained('File size: 722', out)
def test_simd(self):
if get_clang_version() == '3.2':
simd_args = ['-O3', '-vectorize', '-vectorize-loops']
elif get_clang_version() == '3.3':
simd_args = ['-O3', '-vectorize-loops', '-vectorize-slp-aggressive', '-bb-vectorize-aligned-only'] # XXX this generates <2 x float> , '-vectorize-slp']
elif get_clang_version() == '3.4':
simd_args = ['-O3'] # vectorization on by default, SIMD=1 makes us not disable it
else:
raise Exception('unknown llvm version')
simd_args += ['-bb-vectorize-vector-bits=128', '-force-vector-width=4']
self.clear()
Popen([PYTHON, EMCC, path_from_root('tests', 'linpack.c'), '-O2', '-s', 'SIMD=1', '-DSP', '--llvm-opts', str(simd_args)]).communicate()
self.assertContained('Unrolled Single Precision', run_js('a.out.js'))
def test_dependency_file(self):
# Issue 1732: -MMD (and friends) create dependency files that need to be
# copied from the temporary directory.
open(os.path.join(self.get_dir(), 'test.cpp'), 'w').write(r'''
#include "test.hpp"
void my_function()
{
}
''')
open(os.path.join(self.get_dir(), 'test.hpp'), 'w').write(r'''
void my_function();
''')
Popen([PYTHON, EMCC, '-MMD', '-c', os.path.join(self.get_dir(), 'test.cpp'), '-o',
os.path.join(self.get_dir(), 'test.o')]).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'test.d')), 'No dependency file generated'
deps = open(os.path.join(self.get_dir(), 'test.d')).read()
# Look for ': ' instead of just ':' to not confuse C:\path\ notation with make "target: deps" rule. Not perfect, but good enough for this test.
head, tail = deps.split(': ', 2)
assert 'test.o' in head, 'Invalid dependency target'
assert 'test.cpp' in tail and 'test.hpp' in tail, 'Invalid dependencies generated'
def test_dependency_file_2(self):
self.clear()
shutil.copyfile(path_from_root('tests', 'hello_world.c'), 'a.c')
Popen([PYTHON, EMCC, 'a.c', '-MMD', '-MF', 'test.d', '-c']).communicate()
self.assertContained(open('test.d').read(), 'a.o: a.c\n')
self.clear()
shutil.copyfile(path_from_root('tests', 'hello_world.c'), 'a.c')
Popen([PYTHON, EMCC, 'a.c', '-MMD', '-MF', 'test.d', '-c', '-o', 'test.o']).communicate()
self.assertContained(open('test.d').read(), 'test.o: a.c\n')
self.clear()
shutil.copyfile(path_from_root('tests', 'hello_world.c'), 'a.c')
os.mkdir('obj')
Popen([PYTHON, EMCC, 'a.c', '-MMD', '-MF', 'test.d', '-c', '-o', 'obj/test.o']).communicate()
self.assertContained(open('test.d').read(), 'obj/test.o: a.c\n')
def test_quoted_js_lib_key(self):
open('lib.js', 'w').write(r'''
mergeInto(LibraryManager.library, {
__internal_data:{
'<' : 0,
'white space' : 1
},
printf__deps: ['__internal_data', 'fprintf']
});
''')
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world.cpp'), '--js-library', 'lib.js']).communicate()
self.assertContained('hello, world!', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_float_h(self):
process = Popen([PYTHON, EMCC, path_from_root('tests', 'float+.c')], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
assert process.returncode is 0, 'float.h should agree with our system: ' + out + '\n\n\n' + err
def test_default_obj_ext(self):
outdir = os.path.join(self.get_dir(), 'out_dir') + '/'
self.clear()
os.mkdir(outdir)
process = Popen([PYTHON, EMCC, '-c', path_from_root('tests', 'hello_world.c'), '-o', outdir], stderr=PIPE)
out, err = process.communicate()
assert not err, err
assert os.path.isfile(outdir + 'hello_world.o')
self.clear()
os.mkdir(outdir)
process = Popen([PYTHON, EMCC, '-c', path_from_root('tests', 'hello_world.c'), '-o', outdir, '--default-obj-ext', 'obj'], stderr=PIPE)
out, err = process.communicate()
assert not err, err
assert os.path.isfile(outdir + 'hello_world.obj')
def test_doublestart_bug(self):
open('code.cpp', 'w').write(r'''
#include <stdio.h>
#include <emscripten.h>
void main_loop(void) {
static int cnt = 0;
if (++cnt >= 10) emscripten_cancel_main_loop();
}
int main(void) {
printf("This should only appear once.\n");
emscripten_set_main_loop(main_loop, 10, 0);
return 0;
}
''')
open('pre.js', 'w').write(r'''
if (typeof Module === 'undefined') Module = eval('(function() { try { return Module || {} } catch(e) { return {} } })()');
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
Module['addRunDependency']('test_run_dependency');
Module['removeRunDependency']('test_run_dependency');
});
''')
Popen([PYTHON, EMCC, 'code.cpp', '--pre-js', 'pre.js']).communicate()
output = run_js(os.path.join(self.get_dir(), 'a.out.js'), engine=NODE_JS)
assert output.count('This should only appear once.') == 1, '\n'+output
def test_module_print(self):
open('code.cpp', 'w').write(r'''
#include <stdio.h>
int main(void) {
printf("123456789\n");
return 0;
}
''')
open('pre.js', 'w').write(r'''
var Module = { print: function(x) { throw '<{(' + x + ')}>' } };
''')
Popen([PYTHON, EMCC, 'code.cpp', '--pre-js', 'pre.js']).communicate()
output = run_js(os.path.join(self.get_dir(), 'a.out.js'), stderr=PIPE, full_output=True, engine=NODE_JS)
assert r'<{(123456789)}>' in output, output
def test_precompiled_headers(self):
self.clear()
open('header.h', 'w').write('#define X 5\n')
Popen([PYTHON, EMCC, '-xc++-header', 'header.h', '-c']).communicate()
assert os.path.exists('header.h.gch')
open('src.cpp', 'w').write(r'''
#include <stdio.h>
int main() {
printf("|%d|\n", X);
return 0;
}
''')
Popen([PYTHON, EMCC, 'src.cpp', '-include', 'header.h']).communicate()
output = run_js(self.in_dir('a.out.js'), stderr=PIPE, full_output=True, engine=NODE_JS)
assert '|5|' in output, output
# also verify that the gch is actually used
err = Popen([PYTHON, EMCC, 'src.cpp', '-include', 'header.h', '-Xclang', '-print-stats'], stderr=PIPE).communicate()
assert '*** PCH/Modules Loaded:\nModule: header.h.gch' in err[1], err[1]
# and sanity check it is not mentioned when not
try_delete('header.h.gch')
err = Popen([PYTHON, EMCC, 'src.cpp', '-include', 'header.h', '-Xclang', '-print-stats'], stderr=PIPE).communicate()
assert '*** PCH/Modules Loaded:\nModule: header.h.gch' not in err[1], err[1]
# with specified target via -o
try_delete('header.h.gch')
Popen([PYTHON, EMCC, '-xc++-header', 'header.h', '-o', 'my.gch']).communicate()
assert os.path.exists('my.gch')
def test_warn_unaligned(self):
if os.environ.get('EMCC_FAST_COMPILER') == '0': return self.skip('need fastcomp')
open('src.cpp', 'w').write(r'''
#include <stdio.h>
static const double grid[4][2] = {{-3 / 3., -1 / 3.},
{+1 / 3., -3 / 3.},
{-1 / 3., +3 / 3.},
{+3 / 3., +1 / 3.}};
int main() {
for (int i = 0; i < 4; i++)
printf("%d:%.2f,%.2f ", i, grid[i][0], grid[i][1]);
printf("\n");
return 0;
}
''')
output = Popen([PYTHON, EMCC, 'src.cpp', '-O1', '-s', 'WARN_UNALIGNED=1'], stderr=PIPE).communicate()
assert 'emcc: warning: unaligned store' in output[1]
output = Popen([PYTHON, EMCC, 'src.cpp', '-s', 'WARN_UNALIGNED=1', '-g'], stderr=PIPE).communicate()
assert 'emcc: warning: unaligned store' in output[1]
assert '@line 9 "src.cpp"' in output[1]
def test_no_exit_runtime(self):
open('code.cpp', 'w').write(r'''
#include <stdio.h>
template<int x>
struct Waste {
Waste() {
printf("coming around %d\n", x);
}
~Waste() {
printf("going away %d\n", x);
}
};
Waste<1> w1;
Waste<2> w2;
Waste<3> w3;
Waste<4> w4;
Waste<5> w5;
int main(int argc, char **argv) {
return 0;
}
''')
for no_exit in [0, 1]:
for opts in [[], ['-O1'], ['-O2', '-g2'], ['-O2', '-g2', '--llvm-lto', '1']]:
print no_exit, opts
Popen([PYTHON, EMCC] + opts + ['code.cpp', '-s', 'NO_EXIT_RUNTIME=' + str(no_exit)]).communicate()
output = run_js(os.path.join(self.get_dir(), 'a.out.js'), stderr=PIPE, full_output=True, engine=NODE_JS)
src = open('a.out.js').read()
exit = 1-no_exit
assert 'coming around' in output
assert ('going away' in output) == exit, 'destructors should not run if no exit'
assert ('_ZN5WasteILi2EED1Ev' in src) == exit, 'destructors should not appear if no exit'
assert ('atexit(' in src) == exit, 'atexit should not appear or be called'
def test_os_oz(self):
if os.environ.get('EMCC_DEBUG'): return self.skip('cannot run in debug mode')
try:
os.environ['EMCC_DEBUG'] = '1'
for args, expect in [
(['-O1'], 'LLVM opts: -O1'),
(['-O2'], 'LLVM opts: -O3'),
(['-Os'], 'LLVM opts: -Os'),
(['-Oz'], 'LLVM opts: -Oz'),
(['-O3'], 'LLVM opts: -O3'),
]:
print args, expect
output, err = Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world.cpp')] + args, stdout=PIPE, stderr=PIPE).communicate()
self.assertContained(expect, err)
self.assertContained('hello, world!', run_js('a.out.js'))
finally:
del os.environ['EMCC_DEBUG']
def test_global_inits(self):
open('inc.h', 'w').write(r'''
#include <stdio.h>
template<int x>
struct Waste {
int state;
Waste() : state(10) {}
void test(int a) {
printf("%d\n", a + state);
}
~Waste() {
printf("going away %d\n", x);
}
};
Waste<3> *getMore();
''')
open('main.cpp', 'w').write(r'''
#include "inc.h"
Waste<1> mw1;
Waste<2> mw2;
int main(int argc, char **argv) {
printf("argc: %d\n", argc);
mw1.state += argc;
mw2.state += argc;
mw1.test(5);
mw2.test(6);
getMore()->test(0);
return 0;
}
''')
open('side.cpp', 'w').write(r'''
#include "inc.h"
Waste<3> sw3;
Waste<3> *getMore() {
return &sw3;
}
''')
for opts, has_global in [
(['-O2', '-g'], True),
(['-O2', '-g', '-s', 'NO_EXIT_RUNTIME=1'], False), # no-exit-runtime removes the atexits, and then globalgce can work it's magic to remove the global initializer entirely
(['-Os', '-g'], True),
(['-Os', '-g', '-s', 'NO_EXIT_RUNTIME=1'], False),
(['-O2', '-g', '--llvm-lto', '1'], True),
(['-O2', '-g', '-s', 'NO_EXIT_RUNTIME=1', '--llvm-lto', '1'], False),
]:
print opts, has_global
Popen([PYTHON, EMCC, 'main.cpp', '-c'] + opts).communicate()
Popen([PYTHON, EMCC, 'side.cpp', '-c'] + opts).communicate()
Popen([PYTHON, EMCC, 'main.o', 'side.o'] + opts).communicate()
output = run_js(os.path.join(self.get_dir(), 'a.out.js'), stderr=PIPE, full_output=True, engine=NODE_JS)
src = open('a.out.js').read()
self.assertContained('argc: 1\n16\n17\n10\n', run_js('a.out.js'))
assert ('_GLOBAL_' in src) == has_global
def test_implicit_func(self):
open('src.c', 'w').write(r'''
#include <stdio.h>
int main()
{
printf("hello %d\n", strnlen("waka", 2)); // Implicit declaration, no header, for strnlen
int (*my_strnlen)(char*, ...) = strnlen;
printf("hello %d\n", my_strnlen("shaka", 2));
return 0;
}
''')
IMPLICIT_WARNING = '''warning: implicit declaration of function 'strnlen' is invalid in C99'''
IMPLICIT_ERROR = '''error: implicit declaration of function 'strnlen' is invalid in C99'''
for opts, expected, compile_expected in [
([], None, [IMPLICIT_ERROR]),
(['-Wno-error=implicit-function-declaration'], ['hello '], [IMPLICIT_WARNING]), # turn error into warning
(['-Wno-implicit-function-declaration'], ['hello '], []), # turn error into nothing at all (runtime output is incorrect)
]:
print opts, expected
try_delete('a.out.js')
stdout, stderr = Popen([PYTHON, EMCC, 'src.c'] + opts, stderr=PIPE).communicate()
for ce in compile_expected + ['''warning: incompatible pointer types''']:
self.assertContained(ce, stderr)
if expected is None:
assert not os.path.exists('a.out.js')
else:
output = run_js(os.path.join(self.get_dir(), 'a.out.js'), stderr=PIPE, full_output=True)
for e in expected:
self.assertContained(e, output)
def test_incorrect_static_call(self):
for opts in [0, 1]:
for asserts in [0, 1]:
extra = []
if opts != 1-asserts: extra = ['-s', 'ASSERTIONS=' + str(asserts)]
cmd = [PYTHON, EMCC, path_from_root('tests', 'cases', 'sillyfuncast2_noasm.ll'), '-O' + str(opts)] + extra
print cmd
stdout, stderr = Popen(cmd, stderr=PIPE).communicate()
assert ('''unexpected number of arguments 3 in call to 'doit', should be 2''' in stderr) == asserts, stderr
assert ('''unexpected return type i32 in call to 'doit', should be void''' in stderr) == asserts, stderr
assert ('''unexpected argument type float at index 1 in call to 'doit', should be i32''' in stderr) == asserts, stderr
def test_llvm_lit(self):
llvm_src = LLVM_ROOT
while not os.path.exists(os.path.join(llvm_src, 'emscripten-version.txt')): llvm_src = os.path.dirname(llvm_src)
cmd = [os.path.join(LLVM_ROOT, 'llvm-lit'), '-v', os.path.join(llvm_src, 'test', 'CodeGen', 'JS')]
print cmd
p = Popen(cmd)
p.communicate()
assert p.returncode == 0, 'LLVM tests must pass with exit code 0'
def test_odin_validation(self):
if not SPIDERMONKEY_ENGINE or SPIDERMONKEY_ENGINE not in JS_ENGINES: return self.skip('this test tests asm.js validation in SpiderMonkey')
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world.c'), '-O1'], stdout=PIPE, stderr=PIPE).communicate()
output = run_js('a.out.js', stderr=PIPE, full_output=True, engine=SPIDERMONKEY_ENGINE)
assert 'asm.js' in output, 'spidermonkey should mention asm.js compilation: ' + output
def test_bad_triple(self):
Popen([CLANG, path_from_root('tests', 'hello_world.c'), '-c', '-emit-llvm', '-o', 'a.bc'] + get_clang_native_args(), stdout=PIPE, stderr=PIPE).communicate()
out, err = Popen([PYTHON, EMCC, 'a.bc'], stdout=PIPE, stderr=PIPE).communicate()
assert 'warning' in err, err
assert 'incorrect target triple' in err, err
def test_valid_abspath(self):
# Test whether abspath warning appears
abs_include_path = path_from_root('tests')
process = Popen([PYTHON, EMCC, '-I%s' % abs_include_path, path_from_root('tests', 'hello_world.c')], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
warning = '-I or -L of an absolute path "-I%s" encountered. If this is to a local system header/library, it may cause problems (local system files make sense for compiling natively on your system, but not necessarily to JavaScript). Pass \'-Wno-warn-absolute-paths\' to emcc to hide this warning.' % abs_include_path
assert(warning in err)
# Hide warning for this include path
process = Popen([PYTHON, EMCC, '--valid-abspath', abs_include_path,'-I%s' % abs_include_path, path_from_root('tests', 'hello_world.c')], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
assert(warning not in err)
def test_simplify_ifs(self):
def test(src, nums):
open('src.c', 'w').write(src)
for opts, ifs in [
[['-g2'], nums[0]],
[['-profiling'], nums[1]],
[['-profiling', '-g2'], nums[2]]
]:
print opts, ifs
try_delete('a.out.js')
Popen([PYTHON, EMCC, 'src.c', '-O2'] + opts, stdout=PIPE).communicate()
src = open('a.out.js').read()
main = src[src.find('function _main'):src.find('\n}', src.find('function _main'))]
actual_ifs = main.count('if (')
assert ifs == actual_ifs, main + ' : ' + str([ifs, actual_ifs])
#print main
test(r'''
#include <stdio.h>
#include <string.h>
int main(int argc, char **argv) {
if (argc > 5 && strlen(argv[0]) > 1 && strlen(argv[1]) > 2) printf("halp");
return 0;
}
''', [3, 1, 1])
test(r'''
#include <stdio.h>
#include <string.h>
int main(int argc, char **argv) {
while (argc % 3 == 0) {
if (argc > 5 && strlen(argv[0]) > 1 && strlen(argv[1]) > 2) {
printf("halp");
argc++;
} else {
while (argc > 0) {
printf("%d\n", argc--);
}
}
}
return 0;
}
''', [8, 5, 5])
test(r'''
#include <stdio.h>
#include <string.h>
int main(int argc, char **argv) {
while (argc % 17 == 0) argc *= 2;
if (argc > 5 && strlen(argv[0]) > 10 && strlen(argv[1]) > 20) {
printf("halp");
argc++;
} else {
printf("%d\n", argc--);
}
while (argc % 17 == 0) argc *= 2;
return argc;
}
''', [6, 3, 3])
test(r'''
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char *argv[]) {
if (getenv("A") && getenv("B")) {
printf("hello world\n");
} else {
printf("goodnight moon\n");
}
printf("and that's that\n");
return 0;
}
''', [3, 1, 1])
test(r'''
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char *argv[]) {
if (getenv("A") || getenv("B")) {
printf("hello world\n");
}
printf("and that's that\n");
return 0;
}
''', [3, 1, 1])
def test_symbol_map(self):
for m in [0, 1]:
self.clear()
cmd = [PYTHON, EMCC, path_from_root('tests', 'hello_world.c'), '-O2']
if m: cmd += ['--emit-symbol-map']
print cmd
stdout, stderr = Popen(cmd, stderr=PIPE).communicate()
assert ('''wrote symbol map file''' in stderr) == m, stderr
assert (os.path.exists('a.out.js.symbols') == m), stderr
if m:
symbols = open('a.out.js.symbols').read()
assert ':_main' in symbols
def test_bc_to_bc(self):
# emcc should 'process' bitcode to bitcode. build systems can request this if
# e.g. they assume our 'executable' extension is bc, and compile an .o to a .bc
# (the user would then need to build bc to js of course, but we need to actually
# emit the bc)
cmd = Popen([PYTHON, EMCC, '-c', path_from_root('tests', 'hello_world.c')]).communicate()
assert os.path.exists('hello_world.o')
cmd = Popen([PYTHON, EMCC, 'hello_world.o', '-o', 'hello_world.bc']).communicate()
assert os.path.exists('hello_world.o')
assert os.path.exists('hello_world.bc')
def test_bad_function_pointer_cast(self):
open('src.cpp', 'w').write(r'''
#include <stdio.h>
typedef int (*callback) (int, ...);
int impl(int foo) {
printf("Hello, world.\n");
return 0;
}
int main() {
volatile callback f = (callback) impl;
f(0); /* This fails with or without additional arguments. */
return 0;
}
''')
for opts in [0, 1, 2]:
for safe in [0, 1]:
cmd = [PYTHON, EMCC, 'src.cpp', '-O' + str(opts), '-s', 'SAFE_HEAP=' + str(safe)]
print cmd
Popen(cmd).communicate()
output = run_js('a.out.js', stderr=PIPE, full_output=True)
if safe:
assert 'Function table mask error' in output, output
else:
if opts == 0:
assert 'Invalid function pointer called' in output, output
else:
assert 'abort()' in output, output
def test_aliased_func_pointers(self):
open('src.cpp', 'w').write(r'''
#include <stdio.h>
int impl1(int foo) { return foo; }
float impla(float foo) { return foo; }
int impl2(int foo) { return foo+1; }
float implb(float foo) { return foo+1; }
int impl3(int foo) { return foo+2; }
float implc(float foo) { return foo+2; }
int main(int argc, char **argv) {
volatile void *f = (void*)impl1;
if (argc == 50) f = (void*)impla;
if (argc == 51) f = (void*)impl2;
if (argc == 52) f = (void*)implb;
if (argc == 53) f = (void*)impl3;
if (argc == 54) f = (void*)implc;
return (int)f;
}
''')
print 'aliasing'
sizes_ii = {}
sizes_dd = {}
for alias in [None, 0, 1]:
cmd = [PYTHON, EMCC, 'src.cpp', '-O1']
if alias is not None:
cmd += ['-s', 'ALIASING_FUNCTION_POINTERS=' + str(alias)]
else:
alias = -1
print cmd
Popen(cmd).communicate()
src = open('a.out.js').read().split('\n')
for line in src:
if line.strip().startswith('var FUNCTION_TABLE_ii = '):
sizes_ii[alias] = line.count(',')
if line.strip().startswith('var FUNCTION_TABLE_dd = '):
sizes_dd[alias] = line.count(',')
for sizes in [sizes_ii, sizes_dd]:
assert sizes[-1] == 3 # default - let them alias
assert sizes[0] == 7 # no aliasing, all unique, fat tables
assert sizes[1] == 3 # aliased once more
def test_bad_export(self):
for m in ['', ' ']:
self.clear()
cmd = [PYTHON, EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'EXPORTED_FUNCTIONS=["' + m + '_main"]']
print cmd
stdout, stderr = Popen(cmd, stderr=PIPE).communicate()
if m:
assert 'function requested to be exported, but not implemented: " _main"' in stderr, stderr
else:
self.assertContained('hello, world!', run_js('a.out.js'))
|
[] |
[] |
[
"EMCC_DEBUG",
"EMCC_FORCE_STDLIBS",
"EMCC_CORES",
"EMCC_FAST_COMPILER",
"EMMAKEN_JUST_CONFIGURE",
"EMCC_LEAVE_INPUTS_RAW",
"EM_BUILD_VERBOSE"
] |
[]
|
["EMCC_DEBUG", "EMCC_FORCE_STDLIBS", "EMCC_CORES", "EMCC_FAST_COMPILER", "EMMAKEN_JUST_CONFIGURE", "EMCC_LEAVE_INPUTS_RAW", "EM_BUILD_VERBOSE"]
|
python
| 7 | 0 | |
build/gyp/pylib/gyp/generator/ninja.py
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import gyp
import gyp.common
import gyp.msvs_emulation
import gyp.system_test
import gyp.xcode_emulation
import os.path
import re
import subprocess
import sys
import gyp.ninja_syntax as ninja_syntax
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
# Gyp expects the following variables to be expandable by the build
# system to the appropriate locations. Ninja prefers paths to be
# known at gyp time. To resolve this, introduce special
# variables starting with $! (which begin with a $ so gyp knows it
# should be treated as a path, but is otherwise an invalid
# ninja/shell variable) that are passed to gyp here but expanded
# before writing out into the target .ninja files; see
# ExpandSpecial.
'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR',
'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen',
'PRODUCT_DIR': '$!PRODUCT_DIR',
# Special variables that may be used by gyp 'rule' targets.
# We generate definitions for these variables on the fly when processing a
# rule.
'RULE_INPUT_ROOT': '${root}',
'RULE_INPUT_DIRNAME': '${dirname}',
'RULE_INPUT_PATH': '${source}',
'RULE_INPUT_EXT': '${ext}',
'RULE_INPUT_NAME': '${name}',
}
# TODO: figure out how to not build extra host objects in the non-cross-compile
# case when this is enabled, and enable unconditionally.
generator_supports_multiple_toolsets = (
os.environ.get('AR_target') or os.environ.get('CC_target') or
os.environ.get('CXX_target'))
def StripPrefix(arg, prefix):
if arg.startswith(prefix):
return arg[len(prefix):]
return arg
def QuoteShellArgument(arg, flavor):
"""Quote a string such that it will be interpreted as a single argument
by the shell."""
# Rather than attempting to enumerate the bad shell characters, just
# whitelist common OK ones and quote anything else.
if re.match(r'^[a-zA-Z0-9_=-]+$', arg):
return arg # No quoting necessary.
if flavor == 'win':
return gyp.msvs_emulation.QuoteCmdExeArgument(arg)
return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'"
def InvertRelativePath(path):
"""Given a relative path like foo/bar, return the inverse relative path:
the path from the relative path back to the origin dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string."""
if not path:
return path
# Only need to handle relative paths into subdirectories for now.
assert '..' not in path, path
depth = len(path.split(os.path.sep))
return os.path.sep.join(['..'] * depth)
class Target:
"""Target represents the paths used within a single gyp target.
Conceptually, building a single target A is a series of steps:
1) actions/rules/copies generates source/resources/etc.
2) compiles generates .o files
3) link generates a binary (library/executable)
4) bundle merges the above in a mac bundle
(Any of these steps can be optional.)
From a build ordering perspective, a dependent target B could just
depend on the last output of this series of steps.
But some dependent commands sometimes need to reach inside the box.
For example, when linking B it needs to get the path to the static
library generated by A.
This object stores those paths. To keep things simple, member
variables only store concrete paths to single files, while methods
compute derived values like "the last output of the target".
"""
def __init__(self, type):
# Gyp type ("static_library", etc.) of this target.
self.type = type
# File representing whether any input dependencies necessary for
# dependent actions have completed.
self.preaction_stamp = None
# File representing whether any input dependencies necessary for
# dependent compiles have completed.
self.precompile_stamp = None
# File representing the completion of actions/rules/copies, if any.
self.actions_stamp = None
# Path to the output of the link step, if any.
self.binary = None
# Path to the file representing the completion of building the bundle,
# if any.
self.bundle = None
def Linkable(self):
"""Return true if this is a target that can be linked against."""
return self.type in ('static_library', 'shared_library')
def PreActionInput(self):
"""Return the path, if any, that should be used as a dependency of
any dependent action step."""
return self.FinalOutput() or self.preaction_stamp
def PreCompileInput(self):
"""Return the path, if any, that should be used as a dependency of
any dependent compile step."""
return self.actions_stamp or self.precompile_stamp
def FinalOutput(self):
"""Return the last output of the target, which depends on all prior
steps."""
return self.bundle or self.binary or self.actions_stamp
# A small discourse on paths as used within the Ninja build:
# All files we produce (both at gyp and at build time) appear in the
# build directory (e.g. out/Debug).
#
# Paths within a given .gyp file are always relative to the directory
# containing the .gyp file. Call these "gyp paths". This includes
# sources as well as the starting directory a given gyp rule/action
# expects to be run from. We call the path from the source root to
# the gyp file the "base directory" within the per-.gyp-file
# NinjaWriter code.
#
# All paths as written into the .ninja files are relative to the build
# directory. Call these paths "ninja paths".
#
# We translate between these two notions of paths with two helper
# functions:
#
# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file)
# into the equivalent ninja path.
#
# - GypPathToUniqueOutput translates a gyp path into a ninja path to write
# an output file; the result can be namespaced such that is unique
# to the input file name as well as the output target name.
class NinjaWriter:
def __init__(self, target_outputs, base_dir, build_dir, output_file, flavor,
abs_build_dir=None):
"""
base_dir: path from source root to directory containing this gyp file,
by gyp semantics, all input paths are relative to this
build_dir: path from source root to build output
abs_build_dir: absolute path to the build directory
"""
self.target_outputs = target_outputs
self.base_dir = base_dir
self.build_dir = build_dir
self.ninja = ninja_syntax.Writer(output_file)
self.flavor = flavor
self.abs_build_dir = abs_build_dir
self.obj_ext = '.obj' if flavor == 'win' else '.o'
# Relative path from build output dir to base dir.
self.build_to_base = os.path.join(InvertRelativePath(build_dir), base_dir)
# Relative path from base dir to build dir.
self.base_to_build = os.path.join(InvertRelativePath(base_dir), build_dir)
def ExpandSpecial(self, path, product_dir=None):
"""Expand specials like $!PRODUCT_DIR in |path|.
If |product_dir| is None, assumes the cwd is already the product
dir. Otherwise, |product_dir| is the relative path to the product
dir.
"""
PRODUCT_DIR = '$!PRODUCT_DIR'
if PRODUCT_DIR in path:
if product_dir:
path = path.replace(PRODUCT_DIR, product_dir)
else:
path = path.replace(PRODUCT_DIR + '/', '')
path = path.replace(PRODUCT_DIR + '\\', '')
path = path.replace(PRODUCT_DIR, '.')
INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR'
if INTERMEDIATE_DIR in path:
int_dir = self.GypPathToUniqueOutput('gen')
# GypPathToUniqueOutput generates a path relative to the product dir,
# so insert product_dir in front if it is provided.
path = path.replace(INTERMEDIATE_DIR,
os.path.join(product_dir or '', int_dir))
if self.flavor == 'win':
# Don't use os.path.normpath here. Callers pass in './foo' and expect
# the result to be runnable, but normpath removes the prefix.
return path.replace('/', '\\')
return path
def ExpandRuleVariables(self, path, root, dirname, source, ext, name):
path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root)
path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'],
dirname)
path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source)
path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext)
path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name)
return path
def GypPathToNinja(self, path, env=None):
"""Translate a gyp path to a ninja path, optionally expanding environment
variable references in |path| with |env|.
See the above discourse on path conversions."""
if env:
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
if path.startswith('$!'):
return self.ExpandSpecial(path)
assert '$' not in path, path
return os.path.normpath(os.path.join(self.build_to_base, path))
def GypPathToUniqueOutput(self, path, qualified=True):
"""Translate a gyp path to a ninja path for writing output.
If qualified is True, qualify the resulting filename with the name
of the target. This is necessary when e.g. compiling the same
path twice for two separate output targets.
See the above discourse on path conversions."""
path = self.ExpandSpecial(path)
assert not path.startswith('$'), path
# Translate the path following this scheme:
# Input: foo/bar.gyp, target targ, references baz/out.o
# Output: obj/foo/baz/targ.out.o (if qualified)
# obj/foo/baz/out.o (otherwise)
# (and obj.host instead of obj for cross-compiles)
#
# Why this scheme and not some other one?
# 1) for a given input, you can compute all derived outputs by matching
# its path, even if the input is brought via a gyp file with '..'.
# 2) simple files like libraries and stamps have a simple filename.
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
path_dir, path_basename = os.path.split(path)
if qualified:
path_basename = self.name + '.' + path_basename
return os.path.normpath(os.path.join(obj, self.base_dir, path_dir,
path_basename))
def WriteCollapsedDependencies(self, name, targets):
"""Given a list of targets, return a path for a single file
representing the result of building all the targets or None.
Uses a stamp file if necessary."""
assert targets == filter(None, targets), targets
if len(targets) == 0:
return None
if len(targets) > 1:
stamp = self.GypPathToUniqueOutput(name + '.stamp')
targets = self.ninja.build(stamp, 'stamp', targets)
self.ninja.newline()
return targets[0]
def WriteSpec(self, spec, config_name):
"""The main entry point for NinjaWriter: write the build rules for a spec.
Returns a Target object, which represents the output paths for this spec.
Returns None if there are no outputs (e.g. a settings-only 'none' type
target)."""
self.config_name = config_name
self.name = spec['target_name']
self.toolset = spec['toolset']
config = spec['configurations'][config_name]
self.target = Target(spec['type'])
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
else:
self.xcode_settings = None
# Compute predepends for all rules.
# actions_depends is the dependencies this target depends on before running
# any of its action/rule/copy steps.
# compile_depends is the dependencies this target depends on before running
# any of its compile steps.
actions_depends = []
compile_depends = []
# TODO(evan): it is rather confusing which things are lists and which
# are strings. Fix these.
if 'dependencies' in spec:
for dep in spec['dependencies']:
if dep in self.target_outputs:
target = self.target_outputs[dep]
actions_depends.append(target.PreActionInput())
compile_depends.append(target.PreCompileInput())
actions_depends = filter(None, actions_depends)
compile_depends = filter(None, compile_depends)
actions_depends = self.WriteCollapsedDependencies('actions_depends',
actions_depends)
compile_depends = self.WriteCollapsedDependencies('compile_depends',
compile_depends)
self.target.preaction_stamp = actions_depends
self.target.precompile_stamp = compile_depends
# Write out actions, rules, and copies. These must happen before we
# compile any sources, so compute a list of predependencies for sources
# while we do it.
extra_sources = []
mac_bundle_depends = []
self.target.actions_stamp = self.WriteActionsRulesCopies(
spec, extra_sources, actions_depends, mac_bundle_depends)
# If we have actions/rules/copies, we depend directly on those, but
# otherwise we depend on dependent target's actions/rules/copies etc.
# We never need to explicitly depend on previous target's link steps,
# because no compile ever depends on them.
compile_depends_stamp = (self.target.actions_stamp or compile_depends)
# Write out the compilation steps, if any.
link_deps = []
sources = spec.get('sources', []) + extra_sources
if sources:
link_deps = self.WriteSources(
config_name, config, sources, compile_depends_stamp,
gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, self.GypPathToNinja,
lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang)))
# Some actions/rules output 'sources' that are already object files.
link_deps += [self.GypPathToNinja(f)
for f in sources if f.endswith(self.obj_ext)]
# Write out a link step, if needed.
output = None
if link_deps or self.target.actions_stamp or actions_depends:
output = self.WriteTarget(spec, config_name, config, link_deps,
self.target.actions_stamp or actions_depends)
if self.is_mac_bundle:
mac_bundle_depends.append(output)
# Bundle all of the above together, if needed.
if self.is_mac_bundle:
output = self.WriteMacBundle(spec, mac_bundle_depends)
if not output:
return None
if self.name != output and self.toolset == 'target':
# Write a short name to build this target. This benefits both the
# "build chrome" case as well as the gyp tests, which expect to be
# able to run actions and build libraries by their short name.
self.ninja.build(self.name, 'phony', output)
assert self.target.FinalOutput(), output
return self.target
def WriteActionsRulesCopies(self, spec, extra_sources, prebuild,
mac_bundle_depends):
"""Write out the Actions, Rules, and Copies steps. Return a path
representing the outputs of these steps."""
outputs = []
extra_mac_bundle_resources = []
if 'actions' in spec:
outputs += self.WriteActions(spec['actions'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'rules' in spec:
outputs += self.WriteRules(spec['rules'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'copies' in spec:
outputs += self.WriteCopies(spec['copies'], prebuild)
stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs)
if self.is_mac_bundle:
mac_bundle_resources = spec.get('mac_bundle_resources', []) + \
extra_mac_bundle_resources
self.WriteMacBundleResources(mac_bundle_resources, mac_bundle_depends)
self.WriteMacInfoPlist(mac_bundle_depends)
return stamp
def GenerateDescription(self, verb, message, fallback):
"""Generate and return a description of a build step.
|verb| is the short summary, e.g. ACTION or RULE.
|message| is a hand-written description, or None if not available.
|fallback| is the gyp-level name of the step, usable as a fallback.
"""
if self.toolset != 'target':
verb += '(%s)' % self.toolset
if message:
return '%s %s' % (verb, self.ExpandSpecial(message))
else:
return '%s %s: %s' % (verb, self.name, fallback)
def WriteActions(self, actions, extra_sources, prebuild,
extra_mac_bundle_resources):
# Actions cd into the base directory.
env = self.GetXcodeEnv()
all_outputs = []
for action in actions:
# First write out a rule for the action.
name = re.sub(r'[ {}$]', '_', action['action_name'])
description = self.GenerateDescription('ACTION',
action.get('message', None),
name)
rule_name = self.WriteNewNinjaRule(name, action['action'], description,
env=env)
inputs = [self.GypPathToNinja(i, env) for i in action['inputs']]
if int(action.get('process_outputs_as_sources', False)):
extra_sources += action['outputs']
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += action['outputs']
outputs = [self.GypPathToNinja(o, env) for o in action['outputs']]
# Then write out an edge using the rule.
self.ninja.build(outputs, rule_name, inputs,
order_only=prebuild)
all_outputs += outputs
self.ninja.newline()
return all_outputs
def WriteRules(self, rules, extra_sources, prebuild,
extra_mac_bundle_resources):
all_outputs = []
for rule in rules:
# First write out a rule for the rule action.
name = rule['rule_name']
args = rule['action']
description = self.GenerateDescription(
'RULE',
rule.get('message', None),
('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name)
rule_name = self.WriteNewNinjaRule(name, args, description)
# TODO: if the command references the outputs directly, we should
# simplify it to just use $out.
# Rules can potentially make use of some special variables which
# must vary per source file.
# Compute the list of variables we'll need to provide.
special_locals = ('source', 'root', 'dirname', 'ext', 'name')
needed_variables = set(['source'])
for argument in args:
for var in special_locals:
if ('${%s}' % var) in argument:
needed_variables.add(var)
# For each source file, write an edge that generates all the outputs.
for source in rule.get('rule_sources', []):
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
# Gather the list of outputs, expanding $vars if possible.
outputs = []
for output in rule['outputs']:
outputs.append(self.ExpandRuleVariables(output, root, dirname,
source, ext, basename))
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
extra_bindings = []
for var in needed_variables:
if var == 'root':
extra_bindings.append(('root', root))
elif var == 'dirname':
extra_bindings.append(('dirname', dirname))
elif var == 'source':
# '$source' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
source_expanded = self.ExpandSpecial(source, self.base_to_build)
extra_bindings.append(('source', source_expanded))
elif var == 'ext':
extra_bindings.append(('ext', ext))
elif var == 'name':
extra_bindings.append(('name', basename))
else:
assert var == None, repr(var)
inputs = map(self.GypPathToNinja, rule.get('inputs', []))
outputs = map(self.GypPathToNinja, outputs)
self.ninja.build(outputs, rule_name, self.GypPathToNinja(source),
implicit=inputs,
order_only=prebuild,
variables=extra_bindings)
all_outputs.extend(outputs)
return all_outputs
def WriteCopies(self, copies, prebuild):
outputs = []
env = self.GetXcodeEnv()
for copy in copies:
for path in copy['files']:
# Normalize the path so trailing slashes don't confuse us.
path = os.path.normpath(path)
basename = os.path.split(path)[1]
src = self.GypPathToNinja(path, env)
dst = self.GypPathToNinja(os.path.join(copy['destination'], basename),
env)
outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild)
return outputs
def WriteMacBundleResources(self, resources, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources'."""
for output, res in gyp.xcode_emulation.GetMacBundleResources(
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.xcode_settings, map(self.GypPathToNinja, resources)):
self.ninja.build(output, 'mac_tool', res,
variables=[('mactool_cmd', 'copy-bundle-resource')])
bundle_depends.append(output)
def WriteMacInfoPlist(self, bundle_depends):
"""Write build rules for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.xcode_settings, self.GypPathToNinja)
if not info_plist:
return
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = self.GypPathToUniqueOutput(
os.path.basename(info_plist))
defines = ' '.join(
[QuoteShellArgument(ninja_syntax.escape('-D' + d), self.flavor)
for d in defines])
info_plist = self.ninja.build(intermediate_plist, 'infoplist', info_plist,
variables=[('defines',defines)])
env = self.GetXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
self.ninja.build(out, 'mac_tool', info_plist,
variables=[('mactool_cmd', 'copy-info-plist'),
('env', env)])
bundle_depends.append(out)
def WriteSources(self, config_name, config, sources, predepends,
precompiled_header):
"""Write build rules to compile all of |sources|."""
if self.toolset == 'target':
self.ninja.variable('ar', '$ar_target')
self.ninja.variable('cc', '$cc_target')
self.ninja.variable('cxx', '$cxx_target')
self.ninja.variable('ld', '$ld_target')
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(config_name)
cflags_c = self.xcode_settings.GetCflagsC(config_name)
cflags_cc = self.xcode_settings.GetCflagsCC(config_name)
cflags_objc = ['$cflags_c'] + \
self.xcode_settings.GetCflagsObjC(config_name)
cflags_objcc = ['$cflags_cc'] + \
self.xcode_settings.GetCflagsObjCC(config_name)
else:
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cc = config.get('cflags_cc', [])
self.WriteVariableList('defines',
[QuoteShellArgument(ninja_syntax.escape('-D' + d), self.flavor)
for d in config.get('defines', [])])
self.WriteVariableList('includes',
['-I' + self.GypPathToNinja(i)
for i in config.get('include_dirs', [])])
pch_commands = precompiled_header.GetGchBuildCommands()
if self.flavor == 'mac':
self.WriteVariableList('cflags_pch_c',
[precompiled_header.GetInclude('c')])
self.WriteVariableList('cflags_pch_cc',
[precompiled_header.GetInclude('cc')])
self.WriteVariableList('cflags_pch_objc',
[precompiled_header.GetInclude('m')])
self.WriteVariableList('cflags_pch_objcc',
[precompiled_header.GetInclude('mm')])
self.WriteVariableList('cflags', map(self.ExpandSpecial, cflags))
self.WriteVariableList('cflags_c', map(self.ExpandSpecial, cflags_c))
self.WriteVariableList('cflags_cc', map(self.ExpandSpecial, cflags_cc))
if self.flavor == 'mac':
self.WriteVariableList('cflags_objc', map(self.ExpandSpecial,
cflags_objc))
self.WriteVariableList('cflags_objcc', map(self.ExpandSpecial,
cflags_objcc))
self.ninja.newline()
outputs = []
for source in sources:
filename, ext = os.path.splitext(source)
ext = ext[1:]
if ext in ('cc', 'cpp', 'cxx'):
command = 'cxx'
elif ext in ('c', 's', 'S'):
command = 'cc'
elif self.flavor == 'mac' and ext == 'm':
command = 'objc'
elif self.flavor == 'mac' and ext == 'mm':
command = 'objcxx'
else:
# TODO: should we assert here on unexpected extensions?
continue
input = self.GypPathToNinja(source)
output = self.GypPathToUniqueOutput(filename + self.obj_ext)
implicit = precompiled_header.GetObjDependencies([input], [output])
self.ninja.build(output, command, input,
implicit=[gch for _, _, gch in implicit],
order_only=predepends)
outputs.append(output)
self.WritePchTargets(pch_commands)
self.ninja.newline()
return outputs
def WritePchTargets(self, pch_commands):
"""Writes ninja rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
var_name = {
'c': 'cflags_pch_c',
'cc': 'cflags_pch_cc',
'm': 'cflags_pch_objc',
'mm': 'cflags_pch_objcc',
}[lang]
cmd = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', }.get(lang)
self.ninja.build(gch, cmd, input, variables=[(var_name, lang_flag)])
def WriteLink(self, spec, config_name, config, link_deps):
"""Write out a link step. Fills out target.binary. """
command = {
'executable': 'link',
'loadable_module': 'solink_module',
'shared_library': 'solink',
}[spec['type']]
implicit_deps = set()
if 'dependencies' in spec:
# Two kinds of dependencies:
# - Linkable dependencies (like a .a or a .so): add them to the link line.
# - Non-linkable dependencies (like a rule that generates a file
# and writes a stamp file): add them to implicit_deps
extra_link_deps = set()
for dep in spec['dependencies']:
target = self.target_outputs.get(dep)
if not target:
continue
linkable = target.Linkable()
if linkable:
extra_link_deps.add(target.binary)
final_output = target.FinalOutput()
if not linkable or final_output != target.binary:
implicit_deps.add(final_output)
link_deps.extend(list(extra_link_deps))
extra_bindings = []
if self.is_mac_bundle:
output = self.ComputeMacBundleBinaryOutput()
else:
output = self.ComputeOutput(spec)
extra_bindings.append(('postbuilds',
self.GetPostbuildCommand(spec, output, output)))
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(config_name,
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.GypPathToNinja)
else:
ldflags = config.get('ldflags', [])
self.WriteVariableList('ldflags',
gyp.common.uniquer(map(self.ExpandSpecial,
ldflags)))
libraries = gyp.common.uniquer(map(self.ExpandSpecial,
spec.get('libraries', [])))
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
self.WriteVariableList('libs', libraries)
self.target.binary = output
if command in ('solink', 'solink_module'):
extra_bindings.append(('soname', os.path.split(output)[1]))
if self.flavor == 'win':
import_lib = output + '.lib'
extra_bindings.append(('dll', output))
extra_bindings.append(('implib', import_lib))
self.target.binary = import_lib
output = [output, import_lib]
self.ninja.build(output, command, link_deps,
implicit=list(implicit_deps),
variables=extra_bindings)
def WriteTarget(self, spec, config_name, config, link_deps, compile_deps):
if spec['type'] == 'none':
# TODO(evan): don't call this function for 'none' target types, as
# it doesn't do anything, and we fake out a 'binary' with a stamp file.
self.target.binary = compile_deps
elif spec['type'] == 'static_library':
self.target.binary = self.ComputeOutput(spec)
self.ninja.build(self.target.binary, 'alink', link_deps,
order_only=compile_deps,
variables=[('postbuilds', self.GetPostbuildCommand(
spec, self.target.binary, self.target.binary))])
else:
self.WriteLink(spec, config_name, config, link_deps)
return self.target.binary
def WriteMacBundle(self, spec, mac_bundle_depends):
assert self.is_mac_bundle
package_framework = spec['type'] in ('shared_library', 'loadable_module')
output = self.ComputeMacBundleOutput()
postbuild = self.GetPostbuildCommand(spec, output, self.target.binary,
is_command_start=not package_framework)
variables = []
if postbuild:
variables.append(('postbuilds', postbuild))
if package_framework:
variables.append(('version', self.xcode_settings.GetFrameworkVersion()))
self.ninja.build(output, 'package_framework', mac_bundle_depends,
variables=variables)
else:
self.ninja.build(output, 'stamp', mac_bundle_depends,
variables=variables)
self.target.bundle = output
return output
def GetXcodeEnv(self, additional_settings=None):
"""Returns the variables Xcode would set for build steps."""
assert self.abs_build_dir
abs_build_dir = self.abs_build_dir
return gyp.xcode_emulation.GetXcodeEnv(
self.xcode_settings, abs_build_dir,
os.path.join(abs_build_dir, self.build_to_base), self.config_name,
additional_settings)
def GetXcodePostbuildEnv(self):
"""Returns the variables Xcode would set for postbuild steps."""
postbuild_settings = {}
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE')
if strip_save_file:
postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = self.GypPathToNinja(
strip_save_file)
return self.GetXcodeEnv(additional_settings=postbuild_settings)
def GetPostbuildCommand(self, spec, output, output_binary,
is_command_start=False):
"""Returns a shell command that runs all the postbuilds, and removes
|output| if any of them fails. If |is_command_start| is False, then the
returned string will start with ' && '."""
if not self.xcode_settings or spec['type'] == 'none' or not output:
return ''
output = QuoteShellArgument(output, self.flavor)
target_postbuilds = self.xcode_settings.GetTargetPostbuilds(
self.config_name,
output,
QuoteShellArgument(output_binary, self.flavor),
quiet=True)
postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(
spec, self.GypPathToNinja, quiet=True)
postbuilds = target_postbuilds + postbuilds
if not postbuilds:
return ''
env = self.ComputeExportEnvString(self.GetXcodePostbuildEnv())
commands = env + ' F=0; ' + \
' '.join([ninja_syntax.escape(command) + ' || F=$$?;'
for command in postbuilds])
command_string = env + commands + ' ((exit $$F) || rm -rf %s) ' % output + \
'&& exit $$F)'
if is_command_start:
return '(' + command_string + ' && '
else:
return '$ && (' + command_string
def ComputeExportEnvString(self, env):
"""Given an environment, returns a string looking like
'export FOO=foo; export BAR="${FOO} bar;'
that exports |env| to the shell."""
export_str = []
for k in gyp.xcode_emulation.TopologicallySortedEnvVarKeys(env):
export_str.append('export %s=%s;' %
(k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(env[k]))))
return ' '.join(export_str)
def ComputeMacBundleOutput(self):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = self.ExpandSpecial(generator_default_variables['PRODUCT_DIR'])
return os.path.join(path, self.xcode_settings.GetWrapperName())
def ComputeMacBundleBinaryOutput(self):
"""Return the 'output' (full output path) to the binary in a bundle."""
assert self.is_mac_bundle
path = self.ExpandSpecial(generator_default_variables['PRODUCT_DIR'])
return os.path.join(path, self.xcode_settings.GetExecutablePath())
def ComputeOutputFileName(self, spec, type=None):
"""Compute the filename of the final output for the current target."""
if not type:
type = spec['type']
default_variables = copy.copy(generator_default_variables)
CalculateVariables(default_variables, {'flavor': self.flavor})
# Compute filename prefix: the product prefix, or a default for
# the product type.
DEFAULT_PREFIX = {
'loadable_module': default_variables['SHARED_LIB_PREFIX'],
'shared_library': default_variables['SHARED_LIB_PREFIX'],
'static_library': default_variables['STATIC_LIB_PREFIX'],
'executable': default_variables['EXECUTABLE_PREFIX'],
}
prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, ''))
# Compute filename extension: the product extension, or a default
# for the product type.
DEFAULT_EXTENSION = {
'loadable_module': default_variables['SHARED_LIB_SUFFIX'],
'shared_library': default_variables['SHARED_LIB_SUFFIX'],
'static_library': default_variables['STATIC_LIB_SUFFIX'],
'executable': default_variables['EXECUTABLE_SUFFIX'],
}
extension = spec.get('product_extension')
if extension:
extension = '.' + extension
else:
extension = DEFAULT_EXTENSION.get(type, '')
if 'product_name' in spec:
# If we were given an explicit name, use that.
target = spec['product_name']
else:
# Otherwise, derive a name from the target name.
target = spec['target_name']
if prefix == 'lib':
# Snip out an extra 'lib' from libs if appropriate.
target = StripPrefix(target, 'lib')
if type in ('static_library', 'loadable_module', 'shared_library',
'executable'):
return '%s%s%s' % (prefix, target, extension)
elif type == 'none':
return '%s.stamp' % target
else:
raise 'Unhandled output type', type
def ComputeOutput(self, spec, type=None):
"""Compute the path for the final output of the spec."""
assert not self.is_mac_bundle or type
if not type:
type = spec['type']
if self.flavor == 'mac' and type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
filename = self.xcode_settings.GetExecutablePath()
else:
filename = self.ComputeOutputFileName(spec, type)
if 'product_dir' in spec:
path = os.path.join(spec['product_dir'], filename)
return self.ExpandSpecial(path)
# Some products go into the output root, libraries go into shared library
# dir, and everything else goes into the normal place.
type_in_output_root = ['executable', 'loadable_module']
if self.flavor == 'mac' and self.toolset == 'target':
type_in_output_root += ['shared_library', 'static_library']
elif self.flavor == 'win' and self.toolset == 'target':
type_in_output_root += ['shared_library']
if type in type_in_output_root:
return filename
elif type == 'shared_library':
libdir = 'lib'
if self.toolset != 'target':
libdir = os.path.join('lib', '%s' % self.toolset)
return os.path.join(libdir, filename)
else:
return self.GypPathToUniqueOutput(filename, qualified=False)
def WriteVariableList(self, var, values):
if values is None:
values = []
self.ninja.variable(var, ' '.join(values))
def WriteNewNinjaRule(self, name, args, description, env={}):
"""Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule."""
# TODO: we shouldn't need to qualify names; we do it because
# currently the ninja rule namespace is global, but it really
# should be scoped to the subninja.
rule_name = self.name
if self.toolset == 'target':
rule_name += '.' + self.toolset
rule_name += '.' + name
rule_name = rule_name.replace(' ', '_')
args = args[:]
# gyp dictates that commands are run from the base directory.
# cd into the directory before running, and adjust paths in
# the arguments to point to the proper locations.
if self.flavor == 'win':
cd = 'cmd /s /c "cd %s && ' % self.build_to_base
else:
cd = 'cd %s; ' % self.build_to_base
args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
env = self.ComputeExportEnvString(env)
if self.flavor == 'win':
# TODO(scottmg): Respect msvs_cygwin setting here.
# If there's no command, fake one to match the dangling |&&| above.
command = gyp.msvs_emulation.EncodeCmdExeList(args) or 'cmd /c'
else:
command = gyp.common.EncodePOSIXShellList(args)
if env:
# If an environment is passed in, variables in the command should be
# read from it, instead of from ninja's internal variables.
command = ninja_syntax.escape(command)
command = cd + env + command
# GYP rules/actions express being no-ops by not touching their outputs.
# Avoid executing downstream dependencies in this case by specifying
# restat=1 to ninja.
self.ninja.rule(rule_name, command, description, restat=True)
self.ninja.newline()
return rule_name
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
cc_target = os.environ.get('CC.target', os.environ.get('CC', 'cc'))
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Ninja generator.
import gyp.generator.xcode as xcode_generator
global generator_additional_non_configuration_keys
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
global generator_additional_path_sections
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
default_variables['EXECUTABLE_SUFFIX'] = '.exe'
default_variables['STATIC_LIB_PREFIX'] = ''
default_variables['STATIC_LIB_SUFFIX'] = '.lib'
default_variables['SHARED_LIB_PREFIX'] = ''
default_variables['SHARED_LIB_SUFFIX'] = '.dll'
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'lib'))
default_variables.setdefault('LIB_DIR', '')
def OpenOutput(path):
"""Open |path| for writing, creating directories if necessary."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
return open(path, 'w')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.join(generator_flags.get('output_dir', 'out'),
config_name)
master_ninja = ninja_syntax.Writer(
OpenOutput(os.path.join(options.toplevel_dir, build_dir, 'build.ninja')),
width=120)
# Put build-time support tools in out/{config_name}.
gyp.common.CopyTool(flavor, os.path.join(options.toplevel_dir, build_dir))
# Grab make settings for CC/CXX.
if flavor == 'win':
cc = cxx = 'cl'
else:
cc, cxx = 'gcc', 'g++'
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings = data[build_file].get('make_global_settings', [])
build_to_root = InvertRelativePath(build_dir)
for key, value in make_global_settings:
if key == 'CC': cc = os.path.join(build_to_root, value)
if key == 'CXX': cxx = os.path.join(build_to_root, value)
flock = 'flock'
if flavor == 'mac':
flock = './gyp-mac-tool flock'
master_ninja.variable('ar', os.environ.get('AR', 'ar'))
master_ninja.variable('cc', os.environ.get('CC', cc))
master_ninja.variable('cxx', os.environ.get('CXX', cxx))
if flavor == 'win':
master_ninja.variable('ld', 'link')
else:
master_ninja.variable('ld', flock + ' linker.lock $cxx')
master_ninja.variable('ar_target', os.environ.get('AR_target', '$ar'))
master_ninja.variable('cc_target', os.environ.get('CC_target', '$cc'))
master_ninja.variable('cxx_target', os.environ.get('CXX_target', '$cxx'))
if flavor == 'win':
master_ninja.variable('ld_target', 'link')
else:
master_ninja.variable('ld_target', flock + ' linker.lock $cxx_target')
if flavor == 'mac':
master_ninja.variable('mac_tool', os.path.join('.', 'gyp-mac-tool'))
master_ninja.newline()
if flavor != 'win':
master_ninja.rule(
'cc',
description='CC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'),
depfile='$out.d')
master_ninja.rule(
'cxx',
description='CXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc '
'$cflags_pch_cc -c $in -o $out'),
depfile='$out.d')
else:
# TODO(scottmg): Requires deplist branch of ninja for now (for
# /showIncludes handling).
master_ninja.rule(
'cc',
description='CC $out',
command=('cmd /c $cc /nologo /showIncludes '
'$defines $includes $cflags $cflags_c '
'$cflags_pch_c /c $in /Fo$out '
'| ninja-deplist-helper -f cl -o $out.dl'),
deplist='$out.dl')
master_ninja.rule(
'cxx',
description='CXX $out',
command=('cmd /c $cxx /nologo /showIncludes '
'$defines $includes $cflags $cflags_cc '
'$cflags_pch_cc /c $in /Fo$out '
'| ninja-deplist-helper -f cl -o $out.dl'),
deplist='$out.dl')
if flavor != 'mac' and flavor != 'win':
master_ninja.rule(
'alink',
description='AR $out',
command='rm -f $out && $ar rcsT $out $in')
master_ninja.rule(
'solink',
description='SOLINK $out',
command=('$ld -shared $ldflags -o $out -Wl,-soname=$soname '
'-Wl,--whole-archive $in -Wl,--no-whole-archive $libs'))
master_ninja.rule(
'solink_module',
description='SOLINK(module) $out',
command=('$ld -shared $ldflags -o $out -Wl,-soname=$soname '
'-Wl,--start-group $in -Wl,--end-group $libs'))
master_ninja.rule(
'link',
description='LINK $out',
command=('$ld $ldflags -o $out -Wl,-rpath=\$$ORIGIN/lib '
'-Wl,--start-group $in -Wl,--end-group $libs'))
elif flavor == 'win':
master_ninja.rule(
'alink',
description='LIB $out',
command='lib /nologo /OUT:$out $in')
master_ninja.rule(
'solink',
description='LINK(DLL) $dll',
command=('$ld /nologo /IMPLIB:$implib /DLL $ldflags /OUT:$dll $in $libs'))
master_ninja.rule(
'solink_module',
description='LINK(DLL) $dll',
command=('$ld /nologo /IMPLIB:$implib /DLL $ldflags /OUT:$dll $in $libs'))
master_ninja.rule(
'link',
description='LINK $out',
command=('$ld /nologo $ldflags /OUT:$out $in $libs'))
else:
master_ninja.rule(
'objc',
description='OBJC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc '
'$cflags_pch_objc -c $in -o $out'),
depfile='$out.d')
master_ninja.rule(
'objcxx',
description='OBJCXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc '
'$cflags_pch_objcc -c $in -o $out'),
depfile='$out.d')
master_ninja.rule(
'alink',
description='LIBTOOL-STATIC $out, POSTBUILDS',
command='rm -f $out && '
'./gyp-mac-tool filter-libtool libtool -static -o $out $in'
'$postbuilds')
# TODO(thakis): The solink_module rule is likely wrong. Xcode seems to pass
# -bundle -single_module here (for osmesa.so).
master_ninja.rule(
'solink',
description='SOLINK $out, POSTBUILDS',
command=('$ld -shared $ldflags -o $out '
'$in $libs$postbuilds'))
master_ninja.rule(
'solink_module',
description='SOLINK(module) $out, POSTBUILDS',
command=('$ld -shared $ldflags -o $out '
'$in $libs$postbuilds'))
master_ninja.rule(
'link',
description='LINK $out, POSTBUILDS',
command=('$ld $ldflags -o $out '
'$in $libs$postbuilds'))
master_ninja.rule(
'infoplist',
description='INFOPLIST $out',
command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && '
'plutil -convert xml1 $out $out'))
master_ninja.rule(
'mac_tool',
description='MACTOOL $mactool_cmd $in',
command='$env $mac_tool $mactool_cmd $in $out')
master_ninja.rule(
'package_framework',
description='PACKAGE FRAMEWORK $out, POSTBUILDS',
command='$mac_tool package-framework $out $version$postbuilds '
'&& touch $out')
if flavor == 'win':
master_ninja.rule(
'stamp',
description='STAMP $out',
command='cmd /c copy /y nul $out>nul')
# TODO(scottmg): Copy fallback?
master_ninja.rule(
'copy',
description='COPY $in $out',
command='cmd /c mklink /h $out $in >nul || mklink /h /j $out $in >nul')
else:
master_ninja.rule(
'stamp',
description='STAMP $out',
command='${postbuilds}touch $out')
master_ninja.rule(
'copy',
description='COPY $in $out',
command='ln -f $in $out 2>/dev/null || (rm -rf $out && cp -af $in $out)')
master_ninja.newline()
all_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
all_targets.add(target)
all_outputs = set()
# target_outputs is a map from qualified target name to a Target object.
target_outputs = {}
for qualified_target in target_list:
# qualified_target is like: third_party/icu/icu.gyp:icui18n#target
build_file, name, toolset = \
gyp.common.ParseQualifiedTarget(qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings == this_make_global_settings, (
"make_global_settings needs to be the same for all targets.")
spec = target_dicts[qualified_target]
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
build_file = gyp.common.RelativePath(build_file, options.toplevel_dir)
base_path = os.path.dirname(build_file)
obj = 'obj'
if toolset != 'target':
obj += '.' + toolset
output_file = os.path.join(obj, base_path, name + '.ninja')
abs_build_dir=os.path.abspath(os.path.join(options.toplevel_dir, build_dir))
writer = NinjaWriter(target_outputs, base_path, build_dir,
OpenOutput(os.path.join(options.toplevel_dir,
build_dir,
output_file)),
flavor, abs_build_dir=abs_build_dir)
master_ninja.subninja(output_file)
target = writer.WriteSpec(spec, config_name)
if target:
target_outputs[qualified_target] = target
if qualified_target in all_targets:
all_outputs.add(target.FinalOutput())
if all_outputs:
master_ninja.build('all', 'phony', list(all_outputs))
def GenerateOutput(target_list, target_dicts, data, params):
if params['options'].generator_output:
raise NotImplementedError, "--generator_output not implemented for ninja"
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
|
[] |
[] |
[
"CXX_target",
"AR_target",
"AR",
"CC",
"CC_target",
"CXX",
"CC.target"
] |
[]
|
["CXX_target", "AR_target", "AR", "CC", "CC_target", "CXX", "CC.target"]
|
python
| 7 | 0 | |
script/machine/badger.py
|
################################################################################
# #
# MACHINE-SPECIFIC FUNCTIONS #
# #
# OPTIONS: #
# COMPILER : PATH TO COMPILER EXECUTABLE #
# GSL_DIR : PATH TO GSL INSTALLATION #
# MPI_DIR : PATH TO MPI INSTALLATION #
# HDF5_DIR : PATH TO HDF5 INSTALLATION #
# EXECUTABLE : BINARY WRAPPER USED TO LAUNCH BHLIGHT #
# #
# MPI_DIR AND HDF5_DIR ARE NOT REQUIRED IF COMPILER HANDLES HEADERS AND #
# LIBRARIES FOR THESE DEPENDENCIES #
# #
################################################################################
import util
import sys
import os
import re
# module purge
# module load gcc
# module load openmpi
# module load hdf5-parallel
# module load python
flags_base = '-Wall -Werror -fdiagnostics-color -fopenmp'
fcflags = '-lmpi_usempif08 -lmpi_usempi_ignore_tkr -lmpi_mpifh'
fflags_base = '-fdiagnostics-color -fopenmp -cpp'
GSL_NAME='local-gnu-openmpi'
def matches_host():
host = os.uname()[1]
frontend = 'ba-fe'
backend = re.compile(r'ba\d+')
return frontend in host or bool(backend.match(host))
def get_options():
host = {}
host['NAME'] = os.uname()[1]
host['COMPILER'] = 'h5pcc'
host['COMPILER_FLAGS'] = flags_base + ' ' + fcflags + ' ' + '-O2 -march=native'
host['DEBUG_FLAGS'] = flags_base + ' ' + fcflags + ' ' + '-g -O0'
# Change this to your locally installed GSL
host['GSL_DIR'] = os.path.join(os.environ['HOME'],GSL_NAME)
host['FORTRAN_COMP'] = 'h5pfc'
host['FCFLAGS'] = fflags_base + ' ' + '-O2'
host['FDEBUG_FLAGS'] = fflags_base + ' ' + '-g -O0'
host['FORTLINK'] = '-lgfortran -lhdf5_fortran'
host['FORTLIB'] = ''
host['EXECUTABLE'] = 'mpirun -np 1'
host['MPI_EXECUTABLE'] = 'mpirun'
return host
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
postprocessing/collectPostOutputGSweep.py
|
#!/usr/bin/env python
"""
collectPostOutputGSweep.py
collect output from different runs & reps into single data structures
to be used with the newer sweeps across gE, gI
"""
import sys
import os
import numpy as np
import scipy.io
import matplotlib.pyplot as plt
import progressbar
import pandas as pd
#### config here
projName = "g_sweep_fix_all"
outFn = os.path.join(os.environ['HOME'], 'work', 'prebotc',
'data', projName, 'post/collected.mat')
outDfFn = os.path.join(os.environ['HOME'], 'work', 'prebotc',
'data', projName, 'post/collected_table.csv')
## setup variables
srcDir = os.path.join(os.environ['HOME'], 'work', 'prebotc', 'src')
iputFn = os.path.join(srcDir, 'pipeline', projName + "_collect_control")
postFn = os.path.join(srcDir, 'pipeline', projName + "_post")
errFn = os.path.join(srcDir, 'pipeline', projName + "_post_err")
## load
f = open(iputFn, 'r')
lines = f.readlines()
splitLines = np.array([ line.split() for line in lines ])
nstep=len(splitLines)
fp = open(postFn, 'r')
postLines = fp.readlines()
## casting as numpy array allows slicing
f.close()
fErr = open(errFn, 'w')
## get the parameters for which we will collect
ks = np.array(np.unique(splitLines[:,3]), dtype=np.float)
ks = np.sort(ks)
print "ks: " + str(ks)
pIs = np.array(np.unique(splitLines[:,4]), dtype=np.float)
pIs = np.sort(pIs)
print "pIs: " + str(pIs)
gEs = np.array(np.unique(splitLines[:,6]), dtype=np.float)
gEs = np.sort(gEs)
print "gEs: " + str(gEs)
gIs = np.array(np.unique(splitLines[:,7]), dtype=np.float)
gIs = np.sort(gIs)
print "gIs: " + str(gIs)
reps = np.array(np.unique(splitLines[:,5]), dtype=np.float)
reps = np.sort(reps)
numk = len(ks)
numpI = len(pIs)
numgE = len(gEs)
numgI = len(gIs)
numRep = len(reps)
print "num k: " + str(numk)
print "num pI: " + str(numpI)
print "num gE: " + str(numgE)
print "num gI: " + str(numgI)
print "num rep: " + str(numRep)
## setup the collected arrays
chiArray = np.zeros((numk, numpI, numgE, numgI, numRep), dtype=np.float)
fMax = np.zeros((numk, numpI, numgE, numgI, numRep), dtype=np.float)
lag = np.zeros((numk, numpI, numgE, numgI, numRep), dtype=np.float)
op_angle_mean = np.zeros((numk, numpI, numgE, numgI, numRep), dtype=np.float)
op_angle_std = np.zeros((numk, numpI, numgE, numgI, numRep), dtype=np.float)
num_expir = np.zeros((numk, numpI, numgE, numgI, numRep), dtype=np.float)
avg_firing_rate = np.zeros((numk, numpI, numgE, numgI, numRep), dtype=np.float)
amplitude_irregularity = np.zeros((numk, numpI, numgE, numgI, numRep),
dtype=np.float)
ibi_irregularity = np.zeros((numk, numpI, numgE, numgI, numRep),
dtype=np.float)
df=pd.DataFrame(columns=['post_file','k','pI','rep','gE','gI','chi',
'peak_freq','peak_lag', 'op_angle_mean','op_angle_std',
'num_expir','avg_firing_rate','amplitude_irregularity',
'ibi_irregularity'],
index=range(nstep) )
print("Looping over all postprocessing output....")
bar_updates = 100
widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()]
bar = progressbar.ProgressBar(maxval=nstep, widgets=widgets)
bar.start()
bar_i = 0
for i in range(nstep):
run = splitLines[i,:]
postFile = run[2]
k = float(run[3])
pI = float(run[4])
rep = float(run[5])
gE = float(run[6])
gI = float(run[7])
idx = (np.where(ks == k), np.where(pIs == pI), np.where(gEs == gE),
np.where(gIs == gI), np.where(rep == reps))
try:
M = scipy.io.loadmat(postFile)
chiArray[idx] = float(M['chi'])
fMax[idx] = float(M['peak_freq'])
lag[idx] = float(M['peak_lag'])
op_angle_mean[idx] = float(M['op_angle_mean'])
op_angle_std[idx] = float(M['op_angle_std'])
num_expir[idx] = float(M['num_expir'])
avg_firing_rate[idx] = float(M['avg_firing_rate'])
amplitude_irregularity[idx] = float(M['amplitude_irregularity'])
ibi_irregularity[idx] = float(M['ibi_irregularity'])
# fill dataframe
df.loc[i]=pd.Series( { 'post_file': postFile, 'k': k,
'pI': pI, 'rep': rep, 'gE': gE, 'gI': gI,
'chi': float(chiArray[idx]),
'peak_freq': float(fMax[idx]),
'peak_lag': float(lag[idx]),
'op_angle_mean': float(op_angle_mean[idx]),
'op_angle_std': float(op_angle_std[idx]),
'num_expir': float(num_expir[idx]),
'avg_firing_rate': float(avg_firing_rate[idx]),
'amplitude_irregularity':
float(amplitude_irregularity[idx]),
'ibi_irregularity': float(ibi_irregularity[idx])
}
)
except (IOError, KeyError):
# simOutFn = run[1]
# cmd = "./doPost.py " + simOutFn + " " + postFile + "\n"
print postFile + " is missing"
print "Writing command:"
cmd = postLines[i]
print cmd
fErr.write(cmd)
except:
print "Unexpected error in " + postFile
print "Writing command:"
cmd = postLines[i]
print cmd
fErr.write(cmd)
if ( i % np.floor(nstep/bar_updates) ) == 0:
bar.update(i)
bar_i+=1
bar.finish()
# means over reps
chiArray_mean=np.mean(chiArray,axis=4)
fMax_mean=np.mean(fMax, axis=4)
lag_mean=np.mean(lag, axis=4)
op_angle_mean_mean=np.mean(op_angle_mean, axis=4)
op_angle_std_mean=np.mean(op_angle_std, axis=4)
num_expir_mean=np.mean(num_expir,axis=4)
avg_firing_rate_mean=np.mean(avg_firing_rate,axis=4)
amplitude_irregularity_mean=np.mean(amplitude_irregularity,axis=4)
ibi_irregularity_mean=np.mean(ibi_irregularity,axis=4)
# standard deviations over reps
chiArray_std=np.std(chiArray,axis=4)
fMax_std=np.std(fMax, axis=4)
lag_std=np.std(lag, axis=4)
op_angle_mean_std=np.std(op_angle_mean, axis=4)
op_angle_std_std=np.std(op_angle_std, axis=4)
num_expir_std=np.std(num_expir,axis=4)
avg_firing_rate_std=np.std(avg_firing_rate,axis=4)
amplitude_irregularity_std=np.std(amplitude_irregularity,axis=4)
ibi_irregularity_std=np.std(ibi_irregularity,axis=4)
X = np.transpose(np.tile(ks,(numpI,1)))
Y = np.tile(pIs,(numk,1))
Xg= np.transpose(np.tile(gEs,(numgI,1)))
Yg= np.tile(gIs,(numgE,1))
fErr.close()
scipy.io.savemat(outFn,
mdict={'X':X,
'Y':Y,
'Xg': Xg,
'Yg': Yg,
'chiArray':chiArray_mean,
'fMax': fMax_mean,
'lag': lag_mean,
'op_angle_mean': op_angle_mean_mean,
'op_angle_std': op_angle_std_mean,
'num_expir': num_expir_mean,
'avg_firing_rate': avg_firing_rate_mean,
'amplitude_irregularity':amplitude_irregularity_mean,
'ibi_irregularity':ibi_irregularity_mean,
'chiArray_std':chiArray_std,
'fMax_std': fMax_std,
'lag_std': lag_std,
'op_angle_mean_std': op_angle_mean_std,
'op_angle_std_std': op_angle_std_std,
'num_expir_std': num_expir_std,
'avg_firing_rate_std': avg_firing_rate_std,
'amplitude_irregularity_std':amplitude_irregularity_std,
'ibi_irregularity_std':ibi_irregularity_std,
'ks': ks,
'pIs': pIs,
'gEs': gEs,
'gIs': gIs,
'reps': reps})
df.to_csv(outDfFn)
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
components/remote-environment-broker/cmd/poc-events/events.go
|
package main
import (
"flag"
"fmt"
"os"
"path/filepath"
"time"
"github.com/kyma-project/kyma/components/remote-environment-broker/pkg/apis/remoteenvironment/v1alpha1"
"github.com/kyma-project/kyma/components/remote-environment-broker/pkg/client/clientset/versioned"
"github.com/kyma-project/kyma/components/remote-environment-broker/pkg/client/clientset/versioned/scheme"
"github.com/pkg/errors"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/reference"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/record"
typedV1 "k8s.io/client-go/kubernetes/typed/core/v1"
)
func main() {
var kubeconfig *string
if home := os.Getenv("HOME"); home != "" {
kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
} else {
kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
}
flag.Parse()
config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
if err != nil {
panic(err)
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err)
}
reClient, err := versioned.NewForConfig(config)
if err != nil {
panic(err)
}
broadcaster := record.NewBroadcaster()
broadcaster.StartLogging(func(format string, args ...interface{}) {
fmt.Printf(format, args...)
})
broadcaster.StartRecordingToSink(&typedV1.EventSinkImpl{Interface: clientset.CoreV1().Events(metav1.NamespaceDefault)})
eventRecorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "Remote-Environment-Broker"})
re, err := reClient.RemoteenvironmentV1alpha1().RemoteEnvironments().Get("ec-prod", metav1.GetOptions{})
if err != nil {
panic(errors.Wrap(err, "on getting remote environment"))
}
re.Status.Conditions = append(re.Status.Conditions, v1alpha1.ReCondition{
Status: v1alpha1.ConditionTrue,
Type: v1alpha1.Stage1Done,
Message: "Message contains additional information",
Reason: "OneWordCamelCase",
})
_, err = reClient.RemoteenvironmentV1alpha1().RemoteEnvironments().UpdateStatus(re)
if err != nil {
panic(errors.Wrap(err, "while updating status"))
}
ref, err := reference.GetReference(scheme.Scheme, re)
if err != nil {
panic(errors.Wrap(err, "on getting reference for Remote Environment"))
}
eventRecorder.Event(ref, v1.EventTypeWarning, "SomeReason", "Some additional message")
eventRecorder.Event(ref, v1.EventTypeWarning, "SomeReason", "Some additional message")
eventRecorder.Event(ref, v1.EventTypeWarning, "SomeReason", "Some additional message")
time.Sleep(time.Second)
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
pubsub/nox.py
|
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import nox
LOCAL_DEPS = (
os.path.join('..', 'api_core'),
os.path.join('..', 'core'),
)
@nox.session
def default(session):
"""Default unit test session.
This is intended to be run **without** an interpreter set, so
that the current ``python`` (on the ``PATH``) or the version of
Python corresponding to the ``nox`` binary the ``PATH`` can
run the tests.
"""
# Install all test dependencies, then install this package in-place.
session.install('mock', 'pytest', 'pytest-cov', *LOCAL_DEPS)
session.install('-e', '.')
# Run py.test against the unit tests.
session.run(
'py.test',
'--quiet',
'--cov-append',
'--cov-report=',
'--cov=google.cloud.pubsub',
'--cov=google.cloud.pubsub_v1',
'--cov-config=.coveragerc',
os.path.join('tests', 'unit'),
*session.posargs
)
@nox.session
@nox.parametrize('py', ['2.7', '3.5', '3.6', '3.7'])
def unit(session, py):
"""Run the unit test suite."""
# Run unit tests against all supported versions of Python.
session.interpreter = 'python{}'.format(py)
# Set the virtualenv dirname.
session.virtualenv_dirname = 'unit-' + py
default(session)
@nox.session
@nox.parametrize('py', ['2.7', '3.6'])
def system(session, py):
"""Run the system test suite."""
# Sanity check: Only run system tests if the environment variable is set.
if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''):
session.skip('Credentials must be set via environment variable.')
# Run the system tests against latest Python 2 and Python 3 only.
session.interpreter = 'python{}'.format(py)
# Set the virtualenv dirname.
session.virtualenv_dirname = 'sys-' + py
# Use pre-release gRPC for system tests.
session.install('--pre', 'grpcio')
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install('mock', 'pytest', *LOCAL_DEPS)
session.install('../test_utils/')
session.install('.')
# Run py.test against the system tests.
session.run(
'py.test',
'--quiet',
'tests/system.py',
*session.posargs
)
@nox.session
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.interpreter = 'python3.6'
session.install('flake8', *LOCAL_DEPS)
session.install('.')
session.run('flake8', 'google', 'tests')
@nox.session
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.interpreter = 'python3.6'
# Set the virtualenv dirname.
session.virtualenv_dirname = 'setup'
session.install('docutils', 'Pygments')
session.run(
'python', 'setup.py', 'check', '--restructuredtext', '--strict')
@nox.session
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.interpreter = 'python3.6'
session.install('coverage', 'pytest-cov')
session.run('coverage', 'report', '--show-missing', '--fail-under=100')
session.run('coverage', 'erase')
|
[] |
[] |
[
"GOOGLE_APPLICATION_CREDENTIALS"
] |
[]
|
["GOOGLE_APPLICATION_CREDENTIALS"]
|
python
| 1 | 0 | |
rootfs/api/settings/testing.py
|
import random
import string
import os
from api.settings.production import * # noqa
from api.settings.production import DATABASES
# A boolean that turns on/off debug mode.
# https://docs.djangoproject.com/en/2.2/ref/settings/#debug
DEBUG = True
# If set to True, Django's normal exception handling of view functions
# will be suppressed, and exceptions will propagate upwards
# https://docs.djangoproject.com/en/2.2/ref/settings/#debug-propagate-exceptions
DEBUG_PROPAGATE_EXCEPTIONS = True
# router information
ROUTER_HOST = 'drycc-router.example.com'
ROUTER_PORT = 80
# randomize test database name so we can run multiple unit tests simultaneously
DATABASES['default']['NAME'] = "unittest-{}".format(''.join(
random.choice(string.ascii_letters + string.digits) for _ in range(8)))
DATABASES['default']['USER'] = 'postgres'
# use DB name to isolate the data for each test run
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': DATABASES['default']['NAME'],
'KEY_PREFIX': DATABASES['default']['NAME'],
}
}
DRYCC_DEFAULT_CONFIG_TAGS = os.environ.get('DRYCC_DEFAULT_CONFIG_TAGS', '')
DRYCC_APP_STORAGE_CLASS = os.environ.get('DRYCC_APP_STORAGE_CLASS', '')
class DisableMigrations(object):
def __contains__(self, item):
return True
def __getitem__(self, item):
return None
MIGRATION_MODULES = DisableMigrations()
|
[] |
[] |
[
"DRYCC_APP_STORAGE_CLASS",
"DRYCC_DEFAULT_CONFIG_TAGS"
] |
[]
|
["DRYCC_APP_STORAGE_CLASS", "DRYCC_DEFAULT_CONFIG_TAGS"]
|
python
| 2 | 0 | |
constants.go
|
package turtle
import "image/color"
// Standard directions.
const (
East = 0.0
North = 90.0
West = 180.0
South = 270.0
)
// Standard colors.
var (
Black = color.RGBA{0, 0, 0, 255}
SoftBlack = color.RGBA{10, 10, 10, 255}
White = color.RGBA{255, 255, 255, 255}
Red = color.RGBA{255, 0, 0, 255}
Green = color.RGBA{0, 255, 0, 255}
Blue = color.RGBA{0, 0, 255, 255}
Cyan = color.RGBA{0, 255, 255, 255}
Magenta = color.RGBA{255, 0, 255, 255}
Yellow = color.RGBA{255, 255, 0, 255}
// I just love this one
DarkOrange = color.RGBA{150, 75, 0, 255}
)
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
pkg/helmexec/exec.go
|
package helmexec
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
type decryptedSecret struct {
mutex sync.RWMutex
bytes []byte
}
type execer struct {
helmBinary string
runner Runner
logger *zap.SugaredLogger
kubeContext string
extra []string
decryptedSecretMutex sync.Mutex
decryptedSecrets map[string]*decryptedSecret
}
func NewLogger(writer io.Writer, logLevel string) *zap.SugaredLogger {
var cfg zapcore.EncoderConfig
cfg.MessageKey = "message"
out := zapcore.AddSync(writer)
var level zapcore.Level
err := level.Set(logLevel)
if err != nil {
panic(err)
}
core := zapcore.NewCore(
zapcore.NewConsoleEncoder(cfg),
out,
level,
)
return zap.New(core).Sugar()
}
// New for running helm commands
func New(helmBinary string, logger *zap.SugaredLogger, kubeContext string, runner Runner) *execer {
return &execer{
helmBinary: helmBinary,
logger: logger,
kubeContext: kubeContext,
runner: runner,
decryptedSecrets: make(map[string]*decryptedSecret),
}
}
func (helm *execer) SetExtraArgs(args ...string) {
helm.extra = args
}
func (helm *execer) SetHelmBinary(bin string) {
helm.helmBinary = bin
}
func (helm *execer) AddRepo(name, repository, cafile, certfile, keyfile, username, password string) error {
var args []string
args = append(args, "repo", "add", name, repository)
if certfile != "" && keyfile != "" {
args = append(args, "--cert-file", certfile, "--key-file", keyfile)
}
if cafile != "" {
args = append(args, "--ca-file", cafile)
}
if username != "" && password != "" {
args = append(args, "--username", username, "--password", password)
}
helm.logger.Infof("Adding repo %v %v", name, repository)
out, err := helm.exec(args, map[string]string{})
helm.info(out)
return err
}
func (helm *execer) UpdateRepo() error {
helm.logger.Info("Updating repo")
out, err := helm.exec([]string{"repo", "update"}, map[string]string{})
helm.info(out)
return err
}
func (helm *execer) BuildDeps(name, chart string) error {
helm.logger.Infof("Building dependency release=%v, chart=%v", name, chart)
out, err := helm.exec([]string{"dependency", "build", chart}, map[string]string{})
helm.info(out)
return err
}
func (helm *execer) UpdateDeps(chart string) error {
helm.logger.Infof("Updating dependency %v", chart)
out, err := helm.exec([]string{"dependency", "update", chart}, map[string]string{})
helm.info(out)
return err
}
func (helm *execer) SyncRelease(context HelmContext, name, chart string, flags ...string) error {
helm.logger.Infof("Upgrading release=%v, chart=%v", name, chart)
preArgs := context.GetTillerlessArgs(helm.helmBinary)
env := context.getTillerlessEnv()
out, err := helm.exec(append(append(preArgs, "upgrade", "--install", "--reset-values", name, chart), flags...), env)
helm.write(out)
return err
}
func (helm *execer) ReleaseStatus(context HelmContext, name string, flags ...string) error {
helm.logger.Infof("Getting status %v", name)
preArgs := context.GetTillerlessArgs(helm.helmBinary)
env := context.getTillerlessEnv()
out, err := helm.exec(append(append(preArgs, "status", name), flags...), env)
helm.write(out)
return err
}
func (helm *execer) List(context HelmContext, filter string, flags ...string) (string, error) {
helm.logger.Infof("Listing releases matching %v", filter)
preArgs := context.GetTillerlessArgs(helm.helmBinary)
env := context.getTillerlessEnv()
var args []string
if helm.isHelm3() {
args = []string{"list", "--filter", filter}
} else {
args = []string{"list", filter}
}
out, err := helm.exec(append(append(preArgs, args...), flags...), env)
helm.write(out)
return string(out), err
}
func (helm *execer) DecryptSecret(context HelmContext, name string, flags ...string) (string, error) {
absPath, err := filepath.Abs(name)
if err != nil {
return "", err
}
helm.logger.Debugf("Preparing to decrypt secret %v", absPath)
helm.decryptedSecretMutex.Lock()
secret, ok := helm.decryptedSecrets[absPath]
// Cache miss
if !ok {
secret = &decryptedSecret{}
helm.decryptedSecrets[absPath] = secret
secret.mutex.Lock()
defer secret.mutex.Unlock()
helm.decryptedSecretMutex.Unlock()
helm.logger.Infof("Decrypting secret %v", absPath)
preArgs := context.GetTillerlessArgs(helm.helmBinary)
env := context.getTillerlessEnv()
out, err := helm.exec(append(append(preArgs, "secrets", "dec", absPath), flags...), env)
helm.info(out)
if err != nil {
return "", err
}
// HELM_SECRETS_DEC_SUFFIX is used by the helm-secrets plugin to define the output file
decSuffix := os.Getenv("HELM_SECRETS_DEC_SUFFIX")
if len(decSuffix) == 0 {
decSuffix = ".yaml.dec"
}
decFilename := strings.Replace(absPath, ".yaml", decSuffix, 1)
secretBytes, err := ioutil.ReadFile(decFilename)
if err != nil {
return "", err
}
secret.bytes = secretBytes
if err := os.Remove(decFilename); err != nil {
return "", err
}
} else {
// Cache hit
helm.logger.Debugf("Found secret in cache %v", absPath)
secret.mutex.RLock()
helm.decryptedSecretMutex.Unlock()
defer secret.mutex.RUnlock()
}
tmpFile, err := ioutil.TempFile("", "secret")
if err != nil {
return "", err
}
_, err = tmpFile.Write(secret.bytes)
if err != nil {
return "", err
}
return tmpFile.Name(), err
}
func (helm *execer) TemplateRelease(name string, chart string, flags ...string) error {
helm.logger.Infof("Templating release=%v, chart=%v", name, chart)
var args []string
if helm.isHelm3() {
args = []string{"template", name, chart}
} else {
args = []string{"template", chart, "--name", name}
}
out, err := helm.exec(append(args, flags...), map[string]string{})
helm.write(out)
return err
}
func (helm *execer) DiffRelease(context HelmContext, name, chart string, flags ...string) error {
helm.logger.Infof("Comparing release=%v, chart=%v", name, chart)
preArgs := context.GetTillerlessArgs(helm.helmBinary)
env := context.getTillerlessEnv()
out, err := helm.exec(append(append(preArgs, "diff", "upgrade", "--reset-values", "--allow-unreleased", name, chart), flags...), env)
// Do our best to write STDOUT only when diff existed
// Unfortunately, this works only when you run helmfile with `--detailed-exitcode`
detailedExitcodeEnabled := false
for _, f := range flags {
if strings.Contains(f, "detailed-exitcode") {
detailedExitcodeEnabled = true
break
}
}
if detailedExitcodeEnabled {
switch e := err.(type) {
case ExitError:
if e.ExitStatus() == 2 {
helm.write(out)
return err
}
}
} else {
helm.write(out)
}
return err
}
func (helm *execer) Lint(name, chart string, flags ...string) error {
helm.logger.Infof("Linting release=%v, chart=%v", name, chart)
out, err := helm.exec(append([]string{"lint", chart}, flags...), map[string]string{})
helm.write(out)
return err
}
func (helm *execer) Fetch(chart string, flags ...string) error {
helm.logger.Infof("Fetching %v", chart)
out, err := helm.exec(append([]string{"fetch", chart}, flags...), map[string]string{})
helm.info(out)
return err
}
func (helm *execer) DeleteRelease(context HelmContext, name string, flags ...string) error {
helm.logger.Infof("Deleting %v", name)
preArgs := context.GetTillerlessArgs(helm.helmBinary)
env := context.getTillerlessEnv()
out, err := helm.exec(append(append(preArgs, "delete", name), flags...), env)
helm.write(out)
return err
}
func (helm *execer) TestRelease(context HelmContext, name string, flags ...string) error {
helm.logger.Infof("Testing %v", name)
preArgs := context.GetTillerlessArgs(helm.helmBinary)
env := context.getTillerlessEnv()
var args []string
if helm.isHelm3() {
args = []string{"test", "run", name}
} else {
args = []string{"test", name}
}
out, err := helm.exec(append(append(preArgs, args...), flags...), env)
helm.write(out)
return err
}
func (helm *execer) exec(args []string, env map[string]string) ([]byte, error) {
cmdargs := args
if len(helm.extra) > 0 {
cmdargs = append(cmdargs, helm.extra...)
}
if helm.kubeContext != "" {
cmdargs = append(cmdargs, "--kube-context", helm.kubeContext)
}
cmd := fmt.Sprintf("exec: %s %s", helm.helmBinary, strings.Join(cmdargs, " "))
helm.logger.Debug(cmd)
bytes, err := helm.runner.Execute(helm.helmBinary, cmdargs, env)
helm.logger.Debugf("%s: %s", cmd, bytes)
return bytes, err
}
func (helm *execer) info(out []byte) {
if len(out) > 0 {
helm.logger.Infof("%s", out)
}
}
func (helm *execer) write(out []byte) {
if len(out) > 0 {
fmt.Printf("%s\n", out)
}
}
func (helm *execer) isHelm3() bool {
return os.Getenv("HELMFILE_HELM3") != ""
}
|
[
"\"HELM_SECRETS_DEC_SUFFIX\"",
"\"HELMFILE_HELM3\""
] |
[] |
[
"HELMFILE_HELM3",
"HELM_SECRETS_DEC_SUFFIX"
] |
[]
|
["HELMFILE_HELM3", "HELM_SECRETS_DEC_SUFFIX"]
|
go
| 2 | 0 | |
libcontainer/container_linux.go
|
package libcontainer
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"net"
"os"
"os/exec"
"path"
"path/filepath"
"reflect"
"strconv"
"strings"
"sync"
"time"
"github.com/checkpoint-restore/go-criu/v5"
criurpc "github.com/checkpoint-restore/go-criu/v5/rpc"
securejoin "github.com/cyphar/filepath-securejoin"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
"github.com/vishvananda/netlink/nl"
"golang.org/x/sys/unix"
"google.golang.org/protobuf/proto"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/intelrdt"
"github.com/opencontainers/runc/libcontainer/system"
"github.com/opencontainers/runc/libcontainer/utils"
)
const stdioFdCount = 3
type linuxContainer struct {
id string
root string
config *configs.Config
cgroupManager cgroups.Manager
intelRdtManager intelrdt.Manager
initPath string
initArgs []string
initProcess parentProcess
initProcessStartTime uint64
newuidmapPath string
newgidmapPath string
m sync.Mutex
criuVersion int
state containerState
created time.Time
fifo *os.File
}
// State represents a running container's state
type State struct {
BaseState
// Platform specific fields below here
// Specified if the container was started under the rootless mode.
// Set to true if BaseState.Config.RootlessEUID && BaseState.Config.RootlessCgroups
Rootless bool `json:"rootless"`
// Paths to all the container's cgroups, as returned by (*cgroups.Manager).GetPaths
//
// For cgroup v1, a key is cgroup subsystem name, and the value is the path
// to the cgroup for this subsystem.
//
// For cgroup v2 unified hierarchy, a key is "", and the value is the unified path.
CgroupPaths map[string]string `json:"cgroup_paths"`
// NamespacePaths are filepaths to the container's namespaces. Key is the namespace type
// with the value as the path.
NamespacePaths map[configs.NamespaceType]string `json:"namespace_paths"`
// Container's standard descriptors (std{in,out,err}), needed for checkpoint and restore
ExternalDescriptors []string `json:"external_descriptors,omitempty"`
// Intel RDT "resource control" filesystem path
IntelRdtPath string `json:"intel_rdt_path"`
}
// Container is a libcontainer container object.
//
// Each container is thread-safe within the same process. Since a container can
// be destroyed by a separate process, any function may return that the container
// was not found.
type Container interface {
BaseContainer
// Methods below here are platform specific
// Checkpoint checkpoints the running container's state to disk using the criu(8) utility.
Checkpoint(criuOpts *CriuOpts) error
// Restore restores the checkpointed container to a running state using the criu(8) utility.
Restore(process *Process, criuOpts *CriuOpts) error
// If the Container state is RUNNING or CREATED, sets the Container state to PAUSED and pauses
// the execution of any user processes. Asynchronously, when the container finished being paused the
// state is changed to PAUSED.
// If the Container state is PAUSED, do nothing.
Pause() error
// If the Container state is PAUSED, resumes the execution of any user processes in the
// Container before setting the Container state to RUNNING.
// If the Container state is RUNNING, do nothing.
Resume() error
// NotifyOOM returns a read-only channel signaling when the container receives an OOM notification.
NotifyOOM() (<-chan struct{}, error)
// NotifyMemoryPressure returns a read-only channel signaling when the container reaches a given pressure level
NotifyMemoryPressure(level PressureLevel) (<-chan struct{}, error)
}
// ID returns the container's unique ID
func (c *linuxContainer) ID() string {
return c.id
}
// Config returns the container's configuration
func (c *linuxContainer) Config() configs.Config {
return *c.config
}
func (c *linuxContainer) Status() (Status, error) {
c.m.Lock()
defer c.m.Unlock()
return c.currentStatus()
}
func (c *linuxContainer) State() (*State, error) {
c.m.Lock()
defer c.m.Unlock()
return c.currentState()
}
func (c *linuxContainer) OCIState() (*specs.State, error) {
c.m.Lock()
defer c.m.Unlock()
return c.currentOCIState()
}
func (c *linuxContainer) Processes() ([]int, error) {
var pids []int
status, err := c.currentStatus()
if err != nil {
return pids, err
}
// for systemd cgroup, the unit's cgroup path will be auto removed if container's all processes exited
if status == Stopped && !c.cgroupManager.Exists() {
return pids, nil
}
pids, err = c.cgroupManager.GetAllPids()
if err != nil {
return nil, fmt.Errorf("unable to get all container pids: %w", err)
}
return pids, nil
}
func (c *linuxContainer) Stats() (*Stats, error) {
var (
err error
stats = &Stats{}
)
if stats.CgroupStats, err = c.cgroupManager.GetStats(); err != nil {
return stats, fmt.Errorf("unable to get container cgroup stats: %w", err)
}
if c.intelRdtManager != nil {
if stats.IntelRdtStats, err = c.intelRdtManager.GetStats(); err != nil {
return stats, fmt.Errorf("unable to get container Intel RDT stats: %w", err)
}
}
for _, iface := range c.config.Networks {
switch iface.Type {
case "veth":
istats, err := getNetworkInterfaceStats(iface.HostInterfaceName)
if err != nil {
return stats, fmt.Errorf("unable to get network stats for interface %q: %w", iface.HostInterfaceName, err)
}
stats.Interfaces = append(stats.Interfaces, istats)
}
}
return stats, nil
}
func (c *linuxContainer) Set(config configs.Config) error {
c.m.Lock()
defer c.m.Unlock()
status, err := c.currentStatus()
if err != nil {
return err
}
if status == Stopped {
return ErrNotRunning
}
if err := c.cgroupManager.Set(config.Cgroups.Resources); err != nil {
// Set configs back
if err2 := c.cgroupManager.Set(c.config.Cgroups.Resources); err2 != nil {
logrus.Warnf("Setting back cgroup configs failed due to error: %v, your state.json and actual configs might be inconsistent.", err2)
}
return err
}
if c.intelRdtManager != nil {
if err := c.intelRdtManager.Set(&config); err != nil {
// Set configs back
if err2 := c.cgroupManager.Set(c.config.Cgroups.Resources); err2 != nil {
logrus.Warnf("Setting back cgroup configs failed due to error: %v, your state.json and actual configs might be inconsistent.", err2)
}
if err2 := c.intelRdtManager.Set(c.config); err2 != nil {
logrus.Warnf("Setting back intelrdt configs failed due to error: %v, your state.json and actual configs might be inconsistent.", err2)
}
return err
}
}
// After config setting succeed, update config and states
c.config = &config
_, err = c.updateState(nil)
return err
}
func (c *linuxContainer) Start(process *Process) error {
c.m.Lock()
defer c.m.Unlock()
if c.config.Cgroups.Resources.SkipDevices {
return errors.New("can't start container with SkipDevices set")
}
if process.Init {
if err := c.createExecFifo(); err != nil {
return err
}
}
if err := c.start(process); err != nil {
if process.Init {
c.deleteExecFifo()
}
return err
}
return nil
}
func (c *linuxContainer) Run(process *Process) error {
if err := c.Start(process); err != nil {
return err
}
if process.Init {
return c.exec()
}
return nil
}
func (c *linuxContainer) Exec() error {
c.m.Lock()
defer c.m.Unlock()
return c.exec()
}
func (c *linuxContainer) exec() error {
path := filepath.Join(c.root, execFifoFilename)
pid := c.initProcess.pid()
blockingFifoOpenCh := awaitFifoOpen(path)
for {
select {
case result := <-blockingFifoOpenCh:
return handleFifoResult(result)
case <-time.After(time.Millisecond * 100):
stat, err := system.Stat(pid)
if err != nil || stat.State == system.Zombie {
// could be because process started, ran, and completed between our 100ms timeout and our system.Stat() check.
// see if the fifo exists and has data (with a non-blocking open, which will succeed if the writing process is complete).
if err := handleFifoResult(fifoOpen(path, false)); err != nil {
return errors.New("container process is already dead")
}
return nil
}
}
}
}
func readFromExecFifo(execFifo io.Reader) error {
data, err := io.ReadAll(execFifo)
if err != nil {
return err
}
if len(data) <= 0 {
return errors.New("cannot start an already running container")
}
return nil
}
func awaitFifoOpen(path string) <-chan openResult {
fifoOpened := make(chan openResult)
go func() {
result := fifoOpen(path, true)
fifoOpened <- result
}()
return fifoOpened
}
func fifoOpen(path string, block bool) openResult {
flags := os.O_RDONLY
if !block {
flags |= unix.O_NONBLOCK
}
f, err := os.OpenFile(path, flags, 0)
if err != nil {
return openResult{err: fmt.Errorf("exec fifo: %w", err)}
}
return openResult{file: f}
}
func handleFifoResult(result openResult) error {
if result.err != nil {
return result.err
}
f := result.file
defer f.Close()
if err := readFromExecFifo(f); err != nil {
return err
}
return os.Remove(f.Name())
}
type openResult struct {
file *os.File
err error
}
func (c *linuxContainer) start(process *Process) (retErr error) {
parent, err := c.newParentProcess(process)
if err != nil {
return fmt.Errorf("unable to create new parent process: %w", err)
}
logsDone := parent.forwardChildLogs()
if logsDone != nil {
defer func() {
// Wait for log forwarder to finish. This depends on
// runc init closing the _LIBCONTAINER_LOGPIPE log fd.
err := <-logsDone
if err != nil && retErr == nil {
retErr = fmt.Errorf("unable to forward init logs: %w", err)
}
}()
}
if err := parent.start(); err != nil {
return fmt.Errorf("unable to start container process: %w", err)
}
if process.Init {
c.fifo.Close()
if c.config.Hooks != nil {
s, err := c.currentOCIState()
if err != nil {
return err
}
if err := c.config.Hooks[configs.Poststart].RunHooks(s); err != nil {
if err := ignoreTerminateErrors(parent.terminate()); err != nil {
logrus.Warn(fmt.Errorf("error running poststart hook: %w", err))
}
return err
}
}
}
return nil
}
func (c *linuxContainer) Signal(s os.Signal, all bool) error {
c.m.Lock()
defer c.m.Unlock()
status, err := c.currentStatus()
if err != nil {
return err
}
if all {
// for systemd cgroup, the unit's cgroup path will be auto removed if container's all processes exited
if status == Stopped && !c.cgroupManager.Exists() {
return nil
}
return signalAllProcesses(c.cgroupManager, s)
}
// to avoid a PID reuse attack
if status == Running || status == Created || status == Paused {
if err := c.initProcess.signal(s); err != nil {
return fmt.Errorf("unable to signal init: %w", err)
}
if status == Paused {
// For cgroup v1, killing a process in a frozen cgroup
// does nothing until it's thawed. Only thaw the cgroup
// for SIGKILL.
if s, ok := s.(unix.Signal); ok && s == unix.SIGKILL {
_ = c.cgroupManager.Freeze(configs.Thawed)
}
}
return nil
}
return ErrNotRunning
}
func (c *linuxContainer) createExecFifo() error {
rootuid, err := c.Config().HostRootUID()
if err != nil {
return err
}
rootgid, err := c.Config().HostRootGID()
if err != nil {
return err
}
fifoName := filepath.Join(c.root, execFifoFilename)
if _, err := os.Stat(fifoName); err == nil {
return fmt.Errorf("exec fifo %s already exists", fifoName)
}
oldMask := unix.Umask(0o000)
if err := unix.Mkfifo(fifoName, 0o622); err != nil {
unix.Umask(oldMask)
return err
}
unix.Umask(oldMask)
return os.Chown(fifoName, rootuid, rootgid)
}
func (c *linuxContainer) deleteExecFifo() {
fifoName := filepath.Join(c.root, execFifoFilename)
os.Remove(fifoName)
}
// includeExecFifo opens the container's execfifo as a pathfd, so that the
// container cannot access the statedir (and the FIFO itself remains
// un-opened). It then adds the FifoFd to the given exec.Cmd as an inherited
// fd, with _LIBCONTAINER_FIFOFD set to its fd number.
func (c *linuxContainer) includeExecFifo(cmd *exec.Cmd) error {
fifoName := filepath.Join(c.root, execFifoFilename)
fifo, err := os.OpenFile(fifoName, unix.O_PATH|unix.O_CLOEXEC, 0)
if err != nil {
return err
}
c.fifo = fifo
cmd.ExtraFiles = append(cmd.ExtraFiles, fifo)
cmd.Env = append(cmd.Env,
"_LIBCONTAINER_FIFOFD="+strconv.Itoa(stdioFdCount+len(cmd.ExtraFiles)-1))
return nil
}
func (c *linuxContainer) newParentProcess(p *Process) (parentProcess, error) {
parentInitPipe, childInitPipe, err := utils.NewSockPair("init")
if err != nil {
return nil, fmt.Errorf("unable to create init pipe: %w", err)
}
messageSockPair := filePair{parentInitPipe, childInitPipe}
parentLogPipe, childLogPipe, err := os.Pipe()
if err != nil {
return nil, fmt.Errorf("unable to create log pipe: %w", err)
}
logFilePair := filePair{parentLogPipe, childLogPipe}
cmd := c.commandTemplate(p, childInitPipe, childLogPipe)
if !p.Init {
return c.newSetnsProcess(p, cmd, messageSockPair, logFilePair)
}
// We only set up fifoFd if we're not doing a `runc exec`. The historic
// reason for this is that previously we would pass a dirfd that allowed
// for container rootfs escape (and not doing it in `runc exec` avoided
// that problem), but we no longer do that. However, there's no need to do
// this for `runc exec` so we just keep it this way to be safe.
if err := c.includeExecFifo(cmd); err != nil {
return nil, fmt.Errorf("unable to setup exec fifo: %w", err)
}
return c.newInitProcess(p, cmd, messageSockPair, logFilePair)
}
func (c *linuxContainer) commandTemplate(p *Process, childInitPipe *os.File, childLogPipe *os.File) *exec.Cmd {
cmd := exec.Command(c.initPath, c.initArgs[1:]...)
cmd.Args[0] = c.initArgs[0]
cmd.Stdin = p.Stdin
cmd.Stdout = p.Stdout
cmd.Stderr = p.Stderr
cmd.Dir = c.config.Rootfs
if cmd.SysProcAttr == nil {
cmd.SysProcAttr = &unix.SysProcAttr{}
}
cmd.Env = append(cmd.Env, "GOMAXPROCS="+os.Getenv("GOMAXPROCS"))
cmd.ExtraFiles = append(cmd.ExtraFiles, p.ExtraFiles...)
if p.ConsoleSocket != nil {
cmd.ExtraFiles = append(cmd.ExtraFiles, p.ConsoleSocket)
cmd.Env = append(cmd.Env,
"_LIBCONTAINER_CONSOLE="+strconv.Itoa(stdioFdCount+len(cmd.ExtraFiles)-1),
)
}
cmd.ExtraFiles = append(cmd.ExtraFiles, childInitPipe)
cmd.Env = append(cmd.Env,
"_LIBCONTAINER_INITPIPE="+strconv.Itoa(stdioFdCount+len(cmd.ExtraFiles)-1),
"_LIBCONTAINER_STATEDIR="+c.root,
)
cmd.ExtraFiles = append(cmd.ExtraFiles, childLogPipe)
cmd.Env = append(cmd.Env,
"_LIBCONTAINER_LOGPIPE="+strconv.Itoa(stdioFdCount+len(cmd.ExtraFiles)-1),
"_LIBCONTAINER_LOGLEVEL="+p.LogLevel,
)
// NOTE: when running a container with no PID namespace and the parent process spawning the container is
// PID1 the pdeathsig is being delivered to the container's init process by the kernel for some reason
// even with the parent still running.
if c.config.ParentDeathSignal > 0 {
cmd.SysProcAttr.Pdeathsig = unix.Signal(c.config.ParentDeathSignal)
}
return cmd
}
// shouldSendMountSources says whether the child process must setup bind mounts with
// the source pre-opened (O_PATH) in the host user namespace.
// See https://github.com/opencontainers/runc/issues/2484
func (c *linuxContainer) shouldSendMountSources() bool {
// Passing the mount sources via SCM_RIGHTS is only necessary when
// both userns and mntns are active.
if !c.config.Namespaces.Contains(configs.NEWUSER) ||
!c.config.Namespaces.Contains(configs.NEWNS) {
return false
}
// nsexec.c send_mountsources() requires setns(mntns) capabilities
// CAP_SYS_CHROOT and CAP_SYS_ADMIN.
if c.config.RootlessEUID {
return false
}
// We need to send sources if there are bind-mounts.
for _, m := range c.config.Mounts {
if m.IsBind() {
return true
}
}
return false
}
func (c *linuxContainer) newInitProcess(p *Process, cmd *exec.Cmd, messageSockPair, logFilePair filePair) (*initProcess, error) {
cmd.Env = append(cmd.Env, "_LIBCONTAINER_INITTYPE="+string(initStandard))
nsMaps := make(map[configs.NamespaceType]string)
for _, ns := range c.config.Namespaces {
if ns.Path != "" {
nsMaps[ns.Type] = ns.Path
}
}
_, sharePidns := nsMaps[configs.NEWPID]
data, err := c.bootstrapData(c.config.Namespaces.CloneFlags(), nsMaps, initStandard)
if err != nil {
return nil, err
}
if c.shouldSendMountSources() {
// Elements on this slice will be paired with mounts (see StartInitialization() and
// prepareRootfs()). This slice MUST have the same size as c.config.Mounts.
mountFds := make([]int, len(c.config.Mounts))
for i, m := range c.config.Mounts {
if !m.IsBind() {
// Non bind-mounts do not use an fd.
mountFds[i] = -1
continue
}
// The fd passed here will not be used: nsexec.c will overwrite it with dup3(). We just need
// to allocate a fd so that we know the number to pass in the environment variable. The fd
// must not be closed before cmd.Start(), so we reuse messageSockPair.child because the
// lifecycle of that fd is already taken care of.
cmd.ExtraFiles = append(cmd.ExtraFiles, messageSockPair.child)
mountFds[i] = stdioFdCount + len(cmd.ExtraFiles) - 1
}
mountFdsJson, err := json.Marshal(mountFds)
if err != nil {
return nil, fmt.Errorf("Error creating _LIBCONTAINER_MOUNT_FDS: %w", err)
}
cmd.Env = append(cmd.Env,
"_LIBCONTAINER_MOUNT_FDS="+string(mountFdsJson),
)
}
init := &initProcess{
cmd: cmd,
messageSockPair: messageSockPair,
logFilePair: logFilePair,
manager: c.cgroupManager,
intelRdtManager: c.intelRdtManager,
config: c.newInitConfig(p),
container: c,
process: p,
bootstrapData: data,
sharePidns: sharePidns,
}
c.initProcess = init
return init, nil
}
func (c *linuxContainer) newSetnsProcess(p *Process, cmd *exec.Cmd, messageSockPair, logFilePair filePair) (*setnsProcess, error) {
cmd.Env = append(cmd.Env, "_LIBCONTAINER_INITTYPE="+string(initSetns))
state, err := c.currentState()
if err != nil {
return nil, fmt.Errorf("unable to get container state: %w", err)
}
// for setns process, we don't have to set cloneflags as the process namespaces
// will only be set via setns syscall
data, err := c.bootstrapData(0, state.NamespacePaths, initSetns)
if err != nil {
return nil, err
}
proc := &setnsProcess{
cmd: cmd,
cgroupPaths: state.CgroupPaths,
rootlessCgroups: c.config.RootlessCgroups,
intelRdtPath: state.IntelRdtPath,
messageSockPair: messageSockPair,
logFilePair: logFilePair,
manager: c.cgroupManager,
config: c.newInitConfig(p),
process: p,
bootstrapData: data,
initProcessPid: state.InitProcessPid,
}
if len(p.SubCgroupPaths) > 0 {
if add, ok := p.SubCgroupPaths[""]; ok {
// cgroup v1: using the same path for all controllers.
// cgroup v2: the only possible way.
for k := range proc.cgroupPaths {
proc.cgroupPaths[k] = path.Join(proc.cgroupPaths[k], add)
}
// cgroup v2: do not try to join init process's cgroup
// as a fallback (see (*setnsProcess).start).
proc.initProcessPid = 0
} else {
// Per-controller paths.
for ctrl, add := range p.SubCgroupPaths {
if val, ok := proc.cgroupPaths[ctrl]; ok {
proc.cgroupPaths[ctrl] = path.Join(val, add)
} else {
return nil, fmt.Errorf("unknown controller %s in SubCgroupPaths", ctrl)
}
}
}
}
return proc, nil
}
func (c *linuxContainer) newInitConfig(process *Process) *initConfig {
cfg := &initConfig{
Config: c.config,
Args: process.Args,
Env: process.Env,
User: process.User,
AdditionalGroups: process.AdditionalGroups,
Cwd: process.Cwd,
Capabilities: process.Capabilities,
PassedFilesCount: len(process.ExtraFiles),
ContainerID: c.ID(),
NoNewPrivileges: c.config.NoNewPrivileges,
RootlessEUID: c.config.RootlessEUID,
RootlessCgroups: c.config.RootlessCgroups,
AppArmorProfile: c.config.AppArmorProfile,
ProcessLabel: c.config.ProcessLabel,
Rlimits: c.config.Rlimits,
CreateConsole: process.ConsoleSocket != nil,
ConsoleWidth: process.ConsoleWidth,
ConsoleHeight: process.ConsoleHeight,
}
if process.NoNewPrivileges != nil {
cfg.NoNewPrivileges = *process.NoNewPrivileges
}
if process.AppArmorProfile != "" {
cfg.AppArmorProfile = process.AppArmorProfile
}
if process.Label != "" {
cfg.ProcessLabel = process.Label
}
if len(process.Rlimits) > 0 {
cfg.Rlimits = process.Rlimits
}
if cgroups.IsCgroup2UnifiedMode() {
cfg.Cgroup2Path = c.cgroupManager.Path("")
}
return cfg
}
func (c *linuxContainer) Destroy() error {
c.m.Lock()
defer c.m.Unlock()
return c.state.destroy()
}
func (c *linuxContainer) Pause() error {
c.m.Lock()
defer c.m.Unlock()
status, err := c.currentStatus()
if err != nil {
return err
}
switch status {
case Running, Created:
if err := c.cgroupManager.Freeze(configs.Frozen); err != nil {
return err
}
return c.state.transition(&pausedState{
c: c,
})
}
return ErrNotRunning
}
func (c *linuxContainer) Resume() error {
c.m.Lock()
defer c.m.Unlock()
status, err := c.currentStatus()
if err != nil {
return err
}
if status != Paused {
return ErrNotPaused
}
if err := c.cgroupManager.Freeze(configs.Thawed); err != nil {
return err
}
return c.state.transition(&runningState{
c: c,
})
}
func (c *linuxContainer) NotifyOOM() (<-chan struct{}, error) {
// XXX(cyphar): This requires cgroups.
if c.config.RootlessCgroups {
logrus.Warn("getting OOM notifications may fail if you don't have the full access to cgroups")
}
path := c.cgroupManager.Path("memory")
if cgroups.IsCgroup2UnifiedMode() {
return notifyOnOOMV2(path)
}
return notifyOnOOM(path)
}
func (c *linuxContainer) NotifyMemoryPressure(level PressureLevel) (<-chan struct{}, error) {
// XXX(cyphar): This requires cgroups.
if c.config.RootlessCgroups {
logrus.Warn("getting memory pressure notifications may fail if you don't have the full access to cgroups")
}
return notifyMemoryPressure(c.cgroupManager.Path("memory"), level)
}
var criuFeatures *criurpc.CriuFeatures
func (c *linuxContainer) checkCriuFeatures(criuOpts *CriuOpts, rpcOpts *criurpc.CriuOpts, criuFeat *criurpc.CriuFeatures) error {
t := criurpc.CriuReqType_FEATURE_CHECK
// make sure the features we are looking for are really not from
// some previous check
criuFeatures = nil
req := &criurpc.CriuReq{
Type: &t,
// Theoretically this should not be necessary but CRIU
// segfaults if Opts is empty.
// Fixed in CRIU 2.12
Opts: rpcOpts,
Features: criuFeat,
}
err := c.criuSwrk(nil, req, criuOpts, nil)
if err != nil {
logrus.Debugf("%s", err)
return errors.New("CRIU feature check failed")
}
missingFeatures := false
// The outer if checks if the fields actually exist
if (criuFeat.MemTrack != nil) &&
(criuFeatures.MemTrack != nil) {
// The inner if checks if they are set to true
if *criuFeat.MemTrack && !*criuFeatures.MemTrack {
missingFeatures = true
logrus.Debugf("CRIU does not support MemTrack")
}
}
// This needs to be repeated for every new feature check.
// Is there a way to put this in a function. Reflection?
if (criuFeat.LazyPages != nil) &&
(criuFeatures.LazyPages != nil) {
if *criuFeat.LazyPages && !*criuFeatures.LazyPages {
missingFeatures = true
logrus.Debugf("CRIU does not support LazyPages")
}
}
if missingFeatures {
return errors.New("CRIU is missing features")
}
return nil
}
func compareCriuVersion(criuVersion int, minVersion int) error {
// simple function to perform the actual version compare
if criuVersion < minVersion {
return fmt.Errorf("CRIU version %d must be %d or higher", criuVersion, minVersion)
}
return nil
}
// checkCriuVersion checks Criu version greater than or equal to minVersion
func (c *linuxContainer) checkCriuVersion(minVersion int) error {
// If the version of criu has already been determined there is no need
// to ask criu for the version again. Use the value from c.criuVersion.
if c.criuVersion != 0 {
return compareCriuVersion(c.criuVersion, minVersion)
}
criu := criu.MakeCriu()
var err error
c.criuVersion, err = criu.GetCriuVersion()
if err != nil {
return fmt.Errorf("CRIU version check failed: %w", err)
}
return compareCriuVersion(c.criuVersion, minVersion)
}
const descriptorsFilename = "descriptors.json"
func (c *linuxContainer) addCriuDumpMount(req *criurpc.CriuReq, m *configs.Mount) {
mountDest := strings.TrimPrefix(m.Destination, c.config.Rootfs)
if dest, err := securejoin.SecureJoin(c.config.Rootfs, mountDest); err == nil {
mountDest = dest[len(c.config.Rootfs):]
}
extMnt := &criurpc.ExtMountMap{
Key: proto.String(mountDest),
Val: proto.String(mountDest),
}
req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt)
}
func (c *linuxContainer) addMaskPaths(req *criurpc.CriuReq) error {
for _, path := range c.config.MaskPaths {
fi, err := os.Stat(fmt.Sprintf("/proc/%d/root/%s", c.initProcess.pid(), path))
if err != nil {
if os.IsNotExist(err) {
continue
}
return err
}
if fi.IsDir() {
continue
}
extMnt := &criurpc.ExtMountMap{
Key: proto.String(path),
Val: proto.String("/dev/null"),
}
req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt)
}
return nil
}
func (c *linuxContainer) handleCriuConfigurationFile(rpcOpts *criurpc.CriuOpts) {
// CRIU will evaluate a configuration starting with release 3.11.
// Settings in the configuration file will overwrite RPC settings.
// Look for annotations. The annotation 'org.criu.config'
// specifies if CRIU should use a different, container specific
// configuration file.
_, annotations := utils.Annotations(c.config.Labels)
configFile, exists := annotations["org.criu.config"]
if exists {
// If the annotation 'org.criu.config' exists and is set
// to a non-empty string, tell CRIU to use that as a
// configuration file. If the file does not exist, CRIU
// will just ignore it.
if configFile != "" {
rpcOpts.ConfigFile = proto.String(configFile)
}
// If 'org.criu.config' exists and is set to an empty
// string, a runc specific CRIU configuration file will
// be not set at all.
} else {
// If the mentioned annotation has not been found, specify
// a default CRIU configuration file.
rpcOpts.ConfigFile = proto.String("/etc/criu/runc.conf")
}
}
func (c *linuxContainer) criuSupportsExtNS(t configs.NamespaceType) bool {
var minVersion int
switch t {
case configs.NEWNET:
// CRIU supports different external namespace with different released CRIU versions.
// For network namespaces to work we need at least criu 3.11.0 => 31100.
minVersion = 31100
case configs.NEWPID:
// For PID namespaces criu 31500 is needed.
minVersion = 31500
default:
return false
}
return c.checkCriuVersion(minVersion) == nil
}
func criuNsToKey(t configs.NamespaceType) string {
return "extRoot" + strings.Title(configs.NsName(t)) + "NS"
}
func (c *linuxContainer) handleCheckpointingExternalNamespaces(rpcOpts *criurpc.CriuOpts, t configs.NamespaceType) error {
if !c.criuSupportsExtNS(t) {
return nil
}
nsPath := c.config.Namespaces.PathOf(t)
if nsPath == "" {
return nil
}
// CRIU expects the information about an external namespace
// like this: --external <TYPE>[<inode>]:<key>
// This <key> is always 'extRoot<TYPE>NS'.
var ns unix.Stat_t
if err := unix.Stat(nsPath, &ns); err != nil {
return err
}
criuExternal := fmt.Sprintf("%s[%d]:%s", configs.NsName(t), ns.Ino, criuNsToKey(t))
rpcOpts.External = append(rpcOpts.External, criuExternal)
return nil
}
func (c *linuxContainer) handleRestoringNamespaces(rpcOpts *criurpc.CriuOpts, extraFiles *[]*os.File) error {
for _, ns := range c.config.Namespaces {
switch ns.Type {
case configs.NEWNET, configs.NEWPID:
// If the container is running in a network or PID namespace and has
// a path to the network or PID namespace configured, we will dump
// that network or PID namespace as an external namespace and we
// will expect that the namespace exists during restore.
// This basically means that CRIU will ignore the namespace
// and expect it to be setup correctly.
if err := c.handleRestoringExternalNamespaces(rpcOpts, extraFiles, ns.Type); err != nil {
return err
}
default:
// For all other namespaces except NET and PID CRIU has
// a simpler way of joining the existing namespace if set
nsPath := c.config.Namespaces.PathOf(ns.Type)
if nsPath == "" {
continue
}
if ns.Type == configs.NEWCGROUP {
// CRIU has no code to handle NEWCGROUP
return fmt.Errorf("Do not know how to handle namespace %v", ns.Type)
}
// CRIU has code to handle NEWTIME, but it does not seem to be defined in runc
// CRIU will issue a warning for NEWUSER:
// criu/namespaces.c: 'join-ns with user-namespace is not fully tested and dangerous'
rpcOpts.JoinNs = append(rpcOpts.JoinNs, &criurpc.JoinNamespace{
Ns: proto.String(configs.NsName(ns.Type)),
NsFile: proto.String(nsPath),
})
}
}
return nil
}
func (c *linuxContainer) handleRestoringExternalNamespaces(rpcOpts *criurpc.CriuOpts, extraFiles *[]*os.File, t configs.NamespaceType) error {
if !c.criuSupportsExtNS(t) {
return nil
}
nsPath := c.config.Namespaces.PathOf(t)
if nsPath == "" {
return nil
}
// CRIU wants the information about an existing namespace
// like this: --inherit-fd fd[<fd>]:<key>
// The <key> needs to be the same as during checkpointing.
// We are always using 'extRoot<TYPE>NS' as the key in this.
nsFd, err := os.Open(nsPath)
if err != nil {
logrus.Errorf("If a specific network namespace is defined it must exist: %s", err)
return fmt.Errorf("Requested network namespace %v does not exist", nsPath)
}
inheritFd := &criurpc.InheritFd{
Key: proto.String(criuNsToKey(t)),
// The offset of four is necessary because 0, 1, 2 and 3 are
// already used by stdin, stdout, stderr, 'criu swrk' socket.
Fd: proto.Int32(int32(4 + len(*extraFiles))),
}
rpcOpts.InheritFd = append(rpcOpts.InheritFd, inheritFd)
// All open FDs need to be transferred to CRIU via extraFiles
*extraFiles = append(*extraFiles, nsFd)
return nil
}
func (c *linuxContainer) Checkpoint(criuOpts *CriuOpts) error {
c.m.Lock()
defer c.m.Unlock()
// Checkpoint is unlikely to work if os.Geteuid() != 0 || system.RunningInUserNS().
// (CLI prints a warning)
// TODO(avagin): Figure out how to make this work nicely. CRIU 2.0 has
// support for doing unprivileged dumps, but the setup of
// rootless containers might make this complicated.
// We are relying on the CRIU version RPC which was introduced with CRIU 3.0.0
if err := c.checkCriuVersion(30000); err != nil {
return err
}
if criuOpts.ImagesDirectory == "" {
return errors.New("invalid directory to save checkpoint")
}
// Since a container can be C/R'ed multiple times,
// the checkpoint directory may already exist.
if err := os.Mkdir(criuOpts.ImagesDirectory, 0o700); err != nil && !os.IsExist(err) {
return err
}
imageDir, err := os.Open(criuOpts.ImagesDirectory)
if err != nil {
return err
}
defer imageDir.Close()
rpcOpts := criurpc.CriuOpts{
ImagesDirFd: proto.Int32(int32(imageDir.Fd())),
LogLevel: proto.Int32(4),
LogFile: proto.String("dump.log"),
Root: proto.String(c.config.Rootfs),
ManageCgroups: proto.Bool(true),
NotifyScripts: proto.Bool(true),
Pid: proto.Int32(int32(c.initProcess.pid())),
ShellJob: proto.Bool(criuOpts.ShellJob),
LeaveRunning: proto.Bool(criuOpts.LeaveRunning),
TcpEstablished: proto.Bool(criuOpts.TcpEstablished),
ExtUnixSk: proto.Bool(criuOpts.ExternalUnixConnections),
FileLocks: proto.Bool(criuOpts.FileLocks),
EmptyNs: proto.Uint32(criuOpts.EmptyNs),
OrphanPtsMaster: proto.Bool(true),
AutoDedup: proto.Bool(criuOpts.AutoDedup),
LazyPages: proto.Bool(criuOpts.LazyPages),
}
// if criuOpts.WorkDirectory is not set, criu default is used.
if criuOpts.WorkDirectory != "" {
if err := os.Mkdir(criuOpts.WorkDirectory, 0o700); err != nil && !os.IsExist(err) {
return err
}
workDir, err := os.Open(criuOpts.WorkDirectory)
if err != nil {
return err
}
defer workDir.Close()
rpcOpts.WorkDirFd = proto.Int32(int32(workDir.Fd()))
}
c.handleCriuConfigurationFile(&rpcOpts)
// If the container is running in a network namespace and has
// a path to the network namespace configured, we will dump
// that network namespace as an external namespace and we
// will expect that the namespace exists during restore.
// This basically means that CRIU will ignore the namespace
// and expect to be setup correctly.
if err := c.handleCheckpointingExternalNamespaces(&rpcOpts, configs.NEWNET); err != nil {
return err
}
// Same for possible external PID namespaces
if err := c.handleCheckpointingExternalNamespaces(&rpcOpts, configs.NEWPID); err != nil {
return err
}
// CRIU can use cgroup freezer; when rpcOpts.FreezeCgroup
// is not set, CRIU uses ptrace() to pause the processes.
// Note cgroup v2 freezer is only supported since CRIU release 3.14.
if !cgroups.IsCgroup2UnifiedMode() || c.checkCriuVersion(31400) == nil {
if fcg := c.cgroupManager.Path("freezer"); fcg != "" {
rpcOpts.FreezeCgroup = proto.String(fcg)
}
}
// append optional criu opts, e.g., page-server and port
if criuOpts.PageServer.Address != "" && criuOpts.PageServer.Port != 0 {
rpcOpts.Ps = &criurpc.CriuPageServerInfo{
Address: proto.String(criuOpts.PageServer.Address),
Port: proto.Int32(criuOpts.PageServer.Port),
}
}
// pre-dump may need parentImage param to complete iterative migration
if criuOpts.ParentImage != "" {
rpcOpts.ParentImg = proto.String(criuOpts.ParentImage)
rpcOpts.TrackMem = proto.Bool(true)
}
// append optional manage cgroups mode
if criuOpts.ManageCgroupsMode != 0 {
mode := criuOpts.ManageCgroupsMode
rpcOpts.ManageCgroupsMode = &mode
}
var t criurpc.CriuReqType
if criuOpts.PreDump {
feat := criurpc.CriuFeatures{
MemTrack: proto.Bool(true),
}
if err := c.checkCriuFeatures(criuOpts, &rpcOpts, &feat); err != nil {
return err
}
t = criurpc.CriuReqType_PRE_DUMP
} else {
t = criurpc.CriuReqType_DUMP
}
if criuOpts.LazyPages {
// lazy migration requested; check if criu supports it
feat := criurpc.CriuFeatures{
LazyPages: proto.Bool(true),
}
if err := c.checkCriuFeatures(criuOpts, &rpcOpts, &feat); err != nil {
return err
}
if fd := criuOpts.StatusFd; fd != -1 {
// check that the FD is valid
flags, err := unix.FcntlInt(uintptr(fd), unix.F_GETFL, 0)
if err != nil {
return fmt.Errorf("invalid --status-fd argument %d: %w", fd, err)
}
// and writable
if flags&unix.O_WRONLY == 0 {
return fmt.Errorf("invalid --status-fd argument %d: not writable", fd)
}
if c.checkCriuVersion(31500) != nil {
// For criu 3.15+, use notifications (see case "status-ready"
// in criuNotifications). Otherwise, rely on criu status fd.
rpcOpts.StatusFd = proto.Int32(int32(fd))
}
}
}
req := &criurpc.CriuReq{
Type: &t,
Opts: &rpcOpts,
}
// no need to dump all this in pre-dump
if !criuOpts.PreDump {
hasCgroupns := c.config.Namespaces.Contains(configs.NEWCGROUP)
for _, m := range c.config.Mounts {
switch m.Device {
case "bind":
c.addCriuDumpMount(req, m)
case "cgroup":
if cgroups.IsCgroup2UnifiedMode() || hasCgroupns {
// real mount(s)
continue
}
// a set of "external" bind mounts
binds, err := getCgroupMounts(m)
if err != nil {
return err
}
for _, b := range binds {
c.addCriuDumpMount(req, b)
}
}
}
if err := c.addMaskPaths(req); err != nil {
return err
}
for _, node := range c.config.Devices {
m := &configs.Mount{Destination: node.Path, Source: node.Path}
c.addCriuDumpMount(req, m)
}
// Write the FD info to a file in the image directory
fdsJSON, err := json.Marshal(c.initProcess.externalDescriptors())
if err != nil {
return err
}
err = os.WriteFile(filepath.Join(criuOpts.ImagesDirectory, descriptorsFilename), fdsJSON, 0o600)
if err != nil {
return err
}
}
err = c.criuSwrk(nil, req, criuOpts, nil)
if err != nil {
return err
}
return nil
}
func (c *linuxContainer) addCriuRestoreMount(req *criurpc.CriuReq, m *configs.Mount) {
mountDest := strings.TrimPrefix(m.Destination, c.config.Rootfs)
if dest, err := securejoin.SecureJoin(c.config.Rootfs, mountDest); err == nil {
mountDest = dest[len(c.config.Rootfs):]
}
extMnt := &criurpc.ExtMountMap{
Key: proto.String(mountDest),
Val: proto.String(m.Source),
}
req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt)
}
func (c *linuxContainer) restoreNetwork(req *criurpc.CriuReq, criuOpts *CriuOpts) {
for _, iface := range c.config.Networks {
switch iface.Type {
case "veth":
veth := new(criurpc.CriuVethPair)
veth.IfOut = proto.String(iface.HostInterfaceName)
veth.IfIn = proto.String(iface.Name)
req.Opts.Veths = append(req.Opts.Veths, veth)
case "loopback":
// Do nothing
}
}
for _, i := range criuOpts.VethPairs {
veth := new(criurpc.CriuVethPair)
veth.IfOut = proto.String(i.HostInterfaceName)
veth.IfIn = proto.String(i.ContainerInterfaceName)
req.Opts.Veths = append(req.Opts.Veths, veth)
}
}
// makeCriuRestoreMountpoints makes the actual mountpoints for the
// restore using CRIU. This function is inspired from the code in
// rootfs_linux.go
func (c *linuxContainer) makeCriuRestoreMountpoints(m *configs.Mount) error {
switch m.Device {
case "cgroup":
// No mount point(s) need to be created:
//
// * for v1, mount points are saved by CRIU because
// /sys/fs/cgroup is a tmpfs mount
//
// * for v2, /sys/fs/cgroup is a real mount, but
// the mountpoint appears as soon as /sys is mounted
return nil
case "bind":
// The prepareBindMount() function checks if source
// exists. So it cannot be used for other filesystem types.
// TODO: pass something else than nil? Not sure if criu is
// impacted by issue #2484
if err := prepareBindMount(m, c.config.Rootfs, nil); err != nil {
return err
}
default:
// for all other filesystems just create the mountpoints
dest, err := securejoin.SecureJoin(c.config.Rootfs, m.Destination)
if err != nil {
return err
}
if err := checkProcMount(c.config.Rootfs, dest, ""); err != nil {
return err
}
if err := os.MkdirAll(dest, 0o755); err != nil {
return err
}
}
return nil
}
// isPathInPrefixList is a small function for CRIU restore to make sure
// mountpoints, which are on a tmpfs, are not created in the roofs
func isPathInPrefixList(path string, prefix []string) bool {
for _, p := range prefix {
if strings.HasPrefix(path, p+"/") {
return true
}
}
return false
}
// prepareCriuRestoreMounts tries to set up the rootfs of the
// container to be restored in the same way runc does it for
// initial container creation. Even for a read-only rootfs container
// runc modifies the rootfs to add mountpoints which do not exist.
// This function also creates missing mountpoints as long as they
// are not on top of a tmpfs, as CRIU will restore tmpfs content anyway.
func (c *linuxContainer) prepareCriuRestoreMounts(mounts []*configs.Mount) error {
// First get a list of a all tmpfs mounts
tmpfs := []string{}
for _, m := range mounts {
switch m.Device {
case "tmpfs":
tmpfs = append(tmpfs, m.Destination)
}
}
// Now go through all mounts and create the mountpoints
// if the mountpoints are not on a tmpfs, as CRIU will
// restore the complete tmpfs content from its checkpoint.
umounts := []string{}
defer func() {
for _, u := range umounts {
_ = utils.WithProcfd(c.config.Rootfs, u, func(procfd string) error {
if e := unix.Unmount(procfd, unix.MNT_DETACH); e != nil {
if e != unix.EINVAL { //nolint:errorlint // unix errors are bare
// Ignore EINVAL as it means 'target is not a mount point.'
// It probably has already been unmounted.
logrus.Warnf("Error during cleanup unmounting of %s (%s): %v", procfd, u, e)
}
}
return nil
})
}
}()
for _, m := range mounts {
if !isPathInPrefixList(m.Destination, tmpfs) {
if err := c.makeCriuRestoreMountpoints(m); err != nil {
return err
}
// If the mount point is a bind mount, we need to mount
// it now so that runc can create the necessary mount
// points for mounts in bind mounts.
// This also happens during initial container creation.
// Without this CRIU restore will fail
// See: https://github.com/opencontainers/runc/issues/2748
// It is also not necessary to order the mount points
// because during initial container creation mounts are
// set up in the order they are configured.
if m.Device == "bind" {
if err := utils.WithProcfd(c.config.Rootfs, m.Destination, func(procfd string) error {
if err := mount(m.Source, m.Destination, procfd, "", unix.MS_BIND|unix.MS_REC, ""); err != nil {
return err
}
return nil
}); err != nil {
return err
}
umounts = append(umounts, m.Destination)
}
}
}
return nil
}
func (c *linuxContainer) Restore(process *Process, criuOpts *CriuOpts) error {
c.m.Lock()
defer c.m.Unlock()
var extraFiles []*os.File
// Restore is unlikely to work if os.Geteuid() != 0 || system.RunningInUserNS().
// (CLI prints a warning)
// TODO(avagin): Figure out how to make this work nicely. CRIU doesn't have
// support for unprivileged restore at the moment.
// We are relying on the CRIU version RPC which was introduced with CRIU 3.0.0
if err := c.checkCriuVersion(30000); err != nil {
return err
}
if criuOpts.ImagesDirectory == "" {
return errors.New("invalid directory to restore checkpoint")
}
imageDir, err := os.Open(criuOpts.ImagesDirectory)
if err != nil {
return err
}
defer imageDir.Close()
// CRIU has a few requirements for a root directory:
// * it must be a mount point
// * its parent must not be overmounted
// c.config.Rootfs is bind-mounted to a temporary directory
// to satisfy these requirements.
root := filepath.Join(c.root, "criu-root")
if err := os.Mkdir(root, 0o755); err != nil {
return err
}
defer os.Remove(root)
root, err = filepath.EvalSymlinks(root)
if err != nil {
return err
}
err = mount(c.config.Rootfs, root, "", "", unix.MS_BIND|unix.MS_REC, "")
if err != nil {
return err
}
defer unix.Unmount(root, unix.MNT_DETACH) //nolint: errcheck
t := criurpc.CriuReqType_RESTORE
req := &criurpc.CriuReq{
Type: &t,
Opts: &criurpc.CriuOpts{
ImagesDirFd: proto.Int32(int32(imageDir.Fd())),
EvasiveDevices: proto.Bool(true),
LogLevel: proto.Int32(4),
LogFile: proto.String("restore.log"),
RstSibling: proto.Bool(true),
Root: proto.String(root),
ManageCgroups: proto.Bool(true),
NotifyScripts: proto.Bool(true),
ShellJob: proto.Bool(criuOpts.ShellJob),
ExtUnixSk: proto.Bool(criuOpts.ExternalUnixConnections),
TcpEstablished: proto.Bool(criuOpts.TcpEstablished),
FileLocks: proto.Bool(criuOpts.FileLocks),
EmptyNs: proto.Uint32(criuOpts.EmptyNs),
OrphanPtsMaster: proto.Bool(true),
AutoDedup: proto.Bool(criuOpts.AutoDedup),
LazyPages: proto.Bool(criuOpts.LazyPages),
},
}
if criuOpts.LsmProfile != "" {
// CRIU older than 3.16 has a bug which breaks the possibility
// to set a different LSM profile.
if err := c.checkCriuVersion(31600); err != nil {
return errors.New("--lsm-profile requires at least CRIU 3.16")
}
req.Opts.LsmProfile = proto.String(criuOpts.LsmProfile)
}
if criuOpts.LsmMountContext != "" {
if err := c.checkCriuVersion(31600); err != nil {
return errors.New("--lsm-mount-context requires at least CRIU 3.16")
}
req.Opts.LsmMountContext = proto.String(criuOpts.LsmMountContext)
}
if criuOpts.WorkDirectory != "" {
// Since a container can be C/R'ed multiple times,
// the work directory may already exist.
if err := os.Mkdir(criuOpts.WorkDirectory, 0o700); err != nil && !os.IsExist(err) {
return err
}
workDir, err := os.Open(criuOpts.WorkDirectory)
if err != nil {
return err
}
defer workDir.Close()
req.Opts.WorkDirFd = proto.Int32(int32(workDir.Fd()))
}
c.handleCriuConfigurationFile(req.Opts)
if err := c.handleRestoringNamespaces(req.Opts, &extraFiles); err != nil {
return err
}
// This will modify the rootfs of the container in the same way runc
// modifies the container during initial creation.
if err := c.prepareCriuRestoreMounts(c.config.Mounts); err != nil {
return err
}
hasCgroupns := c.config.Namespaces.Contains(configs.NEWCGROUP)
for _, m := range c.config.Mounts {
switch m.Device {
case "bind":
c.addCriuRestoreMount(req, m)
case "cgroup":
if cgroups.IsCgroup2UnifiedMode() || hasCgroupns {
continue
}
// cgroup v1 is a set of bind mounts, unless cgroupns is used
binds, err := getCgroupMounts(m)
if err != nil {
return err
}
for _, b := range binds {
c.addCriuRestoreMount(req, b)
}
}
}
if len(c.config.MaskPaths) > 0 {
m := &configs.Mount{Destination: "/dev/null", Source: "/dev/null"}
c.addCriuRestoreMount(req, m)
}
for _, node := range c.config.Devices {
m := &configs.Mount{Destination: node.Path, Source: node.Path}
c.addCriuRestoreMount(req, m)
}
if criuOpts.EmptyNs&unix.CLONE_NEWNET == 0 {
c.restoreNetwork(req, criuOpts)
}
// append optional manage cgroups mode
if criuOpts.ManageCgroupsMode != 0 {
mode := criuOpts.ManageCgroupsMode
req.Opts.ManageCgroupsMode = &mode
}
var (
fds []string
fdJSON []byte
)
if fdJSON, err = os.ReadFile(filepath.Join(criuOpts.ImagesDirectory, descriptorsFilename)); err != nil {
return err
}
if err := json.Unmarshal(fdJSON, &fds); err != nil {
return err
}
for i := range fds {
if s := fds[i]; strings.Contains(s, "pipe:") {
inheritFd := new(criurpc.InheritFd)
inheritFd.Key = proto.String(s)
inheritFd.Fd = proto.Int32(int32(i))
req.Opts.InheritFd = append(req.Opts.InheritFd, inheritFd)
}
}
err = c.criuSwrk(process, req, criuOpts, extraFiles)
// Now that CRIU is done let's close all opened FDs CRIU needed.
for _, fd := range extraFiles {
fd.Close()
}
return err
}
func (c *linuxContainer) criuApplyCgroups(pid int, req *criurpc.CriuReq) error {
// need to apply cgroups only on restore
if req.GetType() != criurpc.CriuReqType_RESTORE {
return nil
}
// XXX: Do we need to deal with this case? AFAIK criu still requires root.
if err := c.cgroupManager.Apply(pid); err != nil {
return err
}
if err := c.cgroupManager.Set(c.config.Cgroups.Resources); err != nil {
return err
}
if cgroups.IsCgroup2UnifiedMode() {
return nil
}
// the stuff below is cgroupv1-specific
path := fmt.Sprintf("/proc/%d/cgroup", pid)
cgroupsPaths, err := cgroups.ParseCgroupFile(path)
if err != nil {
return err
}
for c, p := range cgroupsPaths {
cgroupRoot := &criurpc.CgroupRoot{
Ctrl: proto.String(c),
Path: proto.String(p),
}
req.Opts.CgRoot = append(req.Opts.CgRoot, cgroupRoot)
}
return nil
}
func (c *linuxContainer) criuSwrk(process *Process, req *criurpc.CriuReq, opts *CriuOpts, extraFiles []*os.File) error {
fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_SEQPACKET|unix.SOCK_CLOEXEC, 0)
if err != nil {
return err
}
var logPath string
if opts != nil {
logPath = filepath.Join(opts.WorkDirectory, req.GetOpts().GetLogFile())
} else {
// For the VERSION RPC 'opts' is set to 'nil' and therefore
// opts.WorkDirectory does not exist. Set logPath to "".
logPath = ""
}
criuClient := os.NewFile(uintptr(fds[0]), "criu-transport-client")
criuClientFileCon, err := net.FileConn(criuClient)
criuClient.Close()
if err != nil {
return err
}
criuClientCon := criuClientFileCon.(*net.UnixConn)
defer criuClientCon.Close()
criuServer := os.NewFile(uintptr(fds[1]), "criu-transport-server")
defer criuServer.Close()
if c.criuVersion != 0 {
// If the CRIU Version is still '0' then this is probably
// the initial CRIU run to detect the version. Skip it.
logrus.Debugf("Using CRIU %d", c.criuVersion)
}
cmd := exec.Command("criu", "swrk", "3")
if process != nil {
cmd.Stdin = process.Stdin
cmd.Stdout = process.Stdout
cmd.Stderr = process.Stderr
}
cmd.ExtraFiles = append(cmd.ExtraFiles, criuServer)
if extraFiles != nil {
cmd.ExtraFiles = append(cmd.ExtraFiles, extraFiles...)
}
if err := cmd.Start(); err != nil {
return err
}
// we close criuServer so that even if CRIU crashes or unexpectedly exits, runc will not hang.
criuServer.Close()
// cmd.Process will be replaced by a restored init.
criuProcess := cmd.Process
var criuProcessState *os.ProcessState
defer func() {
if criuProcessState == nil {
criuClientCon.Close()
_, err := criuProcess.Wait()
if err != nil {
logrus.Warnf("wait on criuProcess returned %v", err)
}
}
}()
if err := c.criuApplyCgroups(criuProcess.Pid, req); err != nil {
return err
}
var extFds []string
if process != nil {
extFds, err = getPipeFds(criuProcess.Pid)
if err != nil {
return err
}
}
logrus.Debugf("Using CRIU in %s mode", req.GetType().String())
// In the case of criurpc.CriuReqType_FEATURE_CHECK req.GetOpts()
// should be empty. For older CRIU versions it still will be
// available but empty. criurpc.CriuReqType_VERSION actually
// has no req.GetOpts().
if logrus.GetLevel() >= logrus.DebugLevel &&
!(req.GetType() == criurpc.CriuReqType_FEATURE_CHECK ||
req.GetType() == criurpc.CriuReqType_VERSION) {
val := reflect.ValueOf(req.GetOpts())
v := reflect.Indirect(val)
for i := 0; i < v.NumField(); i++ {
st := v.Type()
name := st.Field(i).Name
if 'A' <= name[0] && name[0] <= 'Z' {
value := val.MethodByName("Get" + name).Call([]reflect.Value{})
logrus.Debugf("CRIU option %s with value %v", name, value[0])
}
}
}
data, err := proto.Marshal(req)
if err != nil {
return err
}
_, err = criuClientCon.Write(data)
if err != nil {
return err
}
buf := make([]byte, 10*4096)
oob := make([]byte, 4096)
for {
n, oobn, _, _, err := criuClientCon.ReadMsgUnix(buf, oob)
if req.Opts != nil && req.Opts.StatusFd != nil {
// Close status_fd as soon as we got something back from criu,
// assuming it has consumed (reopened) it by this time.
// Otherwise it will might be left open forever and whoever
// is waiting on it will wait forever.
fd := int(*req.Opts.StatusFd)
_ = unix.Close(fd)
req.Opts.StatusFd = nil
}
if err != nil {
return err
}
if n == 0 {
return errors.New("unexpected EOF")
}
if n == len(buf) {
return errors.New("buffer is too small")
}
resp := new(criurpc.CriuResp)
err = proto.Unmarshal(buf[:n], resp)
if err != nil {
return err
}
if !resp.GetSuccess() {
typeString := req.GetType().String()
return fmt.Errorf("criu failed: type %s errno %d\nlog file: %s", typeString, resp.GetCrErrno(), logPath)
}
t := resp.GetType()
switch {
case t == criurpc.CriuReqType_FEATURE_CHECK:
logrus.Debugf("Feature check says: %s", resp)
criuFeatures = resp.GetFeatures()
case t == criurpc.CriuReqType_NOTIFY:
if err := c.criuNotifications(resp, process, cmd, opts, extFds, oob[:oobn]); err != nil {
return err
}
t = criurpc.CriuReqType_NOTIFY
req = &criurpc.CriuReq{
Type: &t,
NotifySuccess: proto.Bool(true),
}
data, err = proto.Marshal(req)
if err != nil {
return err
}
_, err = criuClientCon.Write(data)
if err != nil {
return err
}
continue
case t == criurpc.CriuReqType_RESTORE:
case t == criurpc.CriuReqType_DUMP:
case t == criurpc.CriuReqType_PRE_DUMP:
default:
return fmt.Errorf("unable to parse the response %s", resp.String())
}
break
}
_ = criuClientCon.CloseWrite()
// cmd.Wait() waits cmd.goroutines which are used for proxying file descriptors.
// Here we want to wait only the CRIU process.
criuProcessState, err = criuProcess.Wait()
if err != nil {
return err
}
// In pre-dump mode CRIU is in a loop and waits for
// the final DUMP command.
// The current runc pre-dump approach, however, is
// start criu in PRE_DUMP once for a single pre-dump
// and not the whole series of pre-dump, pre-dump, ...m, dump
// If we got the message CriuReqType_PRE_DUMP it means
// CRIU was successful and we need to forcefully stop CRIU
if !criuProcessState.Success() && *req.Type != criurpc.CriuReqType_PRE_DUMP {
return fmt.Errorf("criu failed: %s\nlog file: %s", criuProcessState.String(), logPath)
}
return nil
}
// block any external network activity
func lockNetwork(config *configs.Config) error {
for _, config := range config.Networks {
strategy, err := getStrategy(config.Type)
if err != nil {
return err
}
if err := strategy.detach(config); err != nil {
return err
}
}
return nil
}
func unlockNetwork(config *configs.Config) error {
for _, config := range config.Networks {
strategy, err := getStrategy(config.Type)
if err != nil {
return err
}
if err = strategy.attach(config); err != nil {
return err
}
}
return nil
}
func (c *linuxContainer) criuNotifications(resp *criurpc.CriuResp, process *Process, cmd *exec.Cmd, opts *CriuOpts, fds []string, oob []byte) error {
notify := resp.GetNotify()
if notify == nil {
return fmt.Errorf("invalid response: %s", resp.String())
}
script := notify.GetScript()
logrus.Debugf("notify: %s\n", script)
switch script {
case "post-dump":
f, err := os.Create(filepath.Join(c.root, "checkpoint"))
if err != nil {
return err
}
f.Close()
case "network-unlock":
if err := unlockNetwork(c.config); err != nil {
return err
}
case "network-lock":
if err := lockNetwork(c.config); err != nil {
return err
}
case "setup-namespaces":
if c.config.Hooks != nil {
s, err := c.currentOCIState()
if err != nil {
return nil
}
s.Pid = int(notify.GetPid())
if err := c.config.Hooks[configs.Prestart].RunHooks(s); err != nil {
return err
}
if err := c.config.Hooks[configs.CreateRuntime].RunHooks(s); err != nil {
return err
}
}
case "post-restore":
pid := notify.GetPid()
p, err := os.FindProcess(int(pid))
if err != nil {
return err
}
cmd.Process = p
r, err := newRestoredProcess(cmd, fds)
if err != nil {
return err
}
process.ops = r
if err := c.state.transition(&restoredState{
imageDir: opts.ImagesDirectory,
c: c,
}); err != nil {
return err
}
// create a timestamp indicating when the restored checkpoint was started
c.created = time.Now().UTC()
if _, err := c.updateState(r); err != nil {
return err
}
if err := os.Remove(filepath.Join(c.root, "checkpoint")); err != nil {
if !os.IsNotExist(err) {
logrus.Error(err)
}
}
case "orphan-pts-master":
scm, err := unix.ParseSocketControlMessage(oob)
if err != nil {
return err
}
fds, err := unix.ParseUnixRights(&scm[0])
if err != nil {
return err
}
master := os.NewFile(uintptr(fds[0]), "orphan-pts-master")
defer master.Close()
// While we can access console.master, using the API is a good idea.
if err := utils.SendFd(process.ConsoleSocket, master.Name(), master.Fd()); err != nil {
return err
}
case "status-ready":
if opts.StatusFd != -1 {
// write \0 to status fd to notify that lazy page server is ready
_, err := unix.Write(opts.StatusFd, []byte{0})
if err != nil {
logrus.Warnf("can't write \\0 to status fd: %v", err)
}
_ = unix.Close(opts.StatusFd)
opts.StatusFd = -1
}
}
return nil
}
func (c *linuxContainer) updateState(process parentProcess) (*State, error) {
if process != nil {
c.initProcess = process
}
state, err := c.currentState()
if err != nil {
return nil, err
}
err = c.saveState(state)
if err != nil {
return nil, err
}
return state, nil
}
func (c *linuxContainer) saveState(s *State) (retErr error) {
tmpFile, err := os.CreateTemp(c.root, "state-")
if err != nil {
return err
}
defer func() {
if retErr != nil {
tmpFile.Close()
os.Remove(tmpFile.Name())
}
}()
err = utils.WriteJSON(tmpFile, s)
if err != nil {
return err
}
err = tmpFile.Close()
if err != nil {
return err
}
stateFilePath := filepath.Join(c.root, stateFilename)
return os.Rename(tmpFile.Name(), stateFilePath)
}
func (c *linuxContainer) currentStatus() (Status, error) {
if err := c.refreshState(); err != nil {
return -1, err
}
return c.state.status(), nil
}
// refreshState needs to be called to verify that the current state on the
// container is what is true. Because consumers of libcontainer can use it
// out of process we need to verify the container's status based on runtime
// information and not rely on our in process info.
func (c *linuxContainer) refreshState() error {
paused, err := c.isPaused()
if err != nil {
return err
}
if paused {
return c.state.transition(&pausedState{c: c})
}
t := c.runType()
switch t {
case Created:
return c.state.transition(&createdState{c: c})
case Running:
return c.state.transition(&runningState{c: c})
}
return c.state.transition(&stoppedState{c: c})
}
func (c *linuxContainer) runType() Status {
if c.initProcess == nil {
return Stopped
}
pid := c.initProcess.pid()
stat, err := system.Stat(pid)
if err != nil {
return Stopped
}
if stat.StartTime != c.initProcessStartTime || stat.State == system.Zombie || stat.State == system.Dead {
return Stopped
}
// We'll create exec fifo and blocking on it after container is created,
// and delete it after start container.
if _, err := os.Stat(filepath.Join(c.root, execFifoFilename)); err == nil {
return Created
}
return Running
}
func (c *linuxContainer) isPaused() (bool, error) {
state, err := c.cgroupManager.GetFreezerState()
if err != nil {
return false, err
}
return state == configs.Frozen, nil
}
func (c *linuxContainer) currentState() (*State, error) {
var (
startTime uint64
externalDescriptors []string
pid = -1
)
if c.initProcess != nil {
pid = c.initProcess.pid()
startTime, _ = c.initProcess.startTime()
externalDescriptors = c.initProcess.externalDescriptors()
}
intelRdtPath := ""
if c.intelRdtManager != nil {
intelRdtPath = c.intelRdtManager.GetPath()
}
state := &State{
BaseState: BaseState{
ID: c.ID(),
Config: *c.config,
InitProcessPid: pid,
InitProcessStartTime: startTime,
Created: c.created,
},
Rootless: c.config.RootlessEUID && c.config.RootlessCgroups,
CgroupPaths: c.cgroupManager.GetPaths(),
IntelRdtPath: intelRdtPath,
NamespacePaths: make(map[configs.NamespaceType]string),
ExternalDescriptors: externalDescriptors,
}
if pid > 0 {
for _, ns := range c.config.Namespaces {
state.NamespacePaths[ns.Type] = ns.GetPath(pid)
}
for _, nsType := range configs.NamespaceTypes() {
if !configs.IsNamespaceSupported(nsType) {
continue
}
if _, ok := state.NamespacePaths[nsType]; !ok {
ns := configs.Namespace{Type: nsType}
state.NamespacePaths[ns.Type] = ns.GetPath(pid)
}
}
}
return state, nil
}
func (c *linuxContainer) currentOCIState() (*specs.State, error) {
bundle, annotations := utils.Annotations(c.config.Labels)
state := &specs.State{
Version: specs.Version,
ID: c.ID(),
Bundle: bundle,
Annotations: annotations,
}
status, err := c.currentStatus()
if err != nil {
return nil, err
}
state.Status = specs.ContainerState(status.String())
if status != Stopped {
if c.initProcess != nil {
state.Pid = c.initProcess.pid()
}
}
return state, nil
}
// orderNamespacePaths sorts namespace paths into a list of paths that we
// can setns in order.
func (c *linuxContainer) orderNamespacePaths(namespaces map[configs.NamespaceType]string) ([]string, error) {
paths := []string{}
for _, ns := range configs.NamespaceTypes() {
// Remove namespaces that we don't need to join.
if !c.config.Namespaces.Contains(ns) {
continue
}
if p, ok := namespaces[ns]; ok && p != "" {
// check if the requested namespace is supported
if !configs.IsNamespaceSupported(ns) {
return nil, fmt.Errorf("namespace %s is not supported", ns)
}
// only set to join this namespace if it exists
if _, err := os.Lstat(p); err != nil {
return nil, fmt.Errorf("namespace path: %w", err)
}
// do not allow namespace path with comma as we use it to separate
// the namespace paths
if strings.ContainsRune(p, ',') {
return nil, fmt.Errorf("invalid namespace path %s", p)
}
paths = append(paths, fmt.Sprintf("%s:%s", configs.NsName(ns), p))
}
}
return paths, nil
}
func encodeIDMapping(idMap []configs.IDMap) ([]byte, error) {
data := bytes.NewBuffer(nil)
for _, im := range idMap {
line := fmt.Sprintf("%d %d %d\n", im.ContainerID, im.HostID, im.Size)
if _, err := data.WriteString(line); err != nil {
return nil, err
}
}
return data.Bytes(), nil
}
// netlinkError is an error wrapper type for use by custom netlink message
// types. Panics with errors are wrapped in netlinkError so that the recover
// in bootstrapData can distinguish intentional panics.
type netlinkError struct{ error }
// bootstrapData encodes the necessary data in netlink binary format
// as a io.Reader.
// Consumer can write the data to a bootstrap program
// such as one that uses nsenter package to bootstrap the container's
// init process correctly, i.e. with correct namespaces, uid/gid
// mapping etc.
func (c *linuxContainer) bootstrapData(cloneFlags uintptr, nsMaps map[configs.NamespaceType]string, it initType) (_ io.Reader, Err error) {
// create the netlink message
r := nl.NewNetlinkRequest(int(InitMsg), 0)
// Our custom messages cannot bubble up an error using returns, instead
// they will panic with the specific error type, netlinkError. In that
// case, recover from the panic and return that as an error.
defer func() {
if r := recover(); r != nil {
if e, ok := r.(netlinkError); ok {
Err = e.error
} else {
panic(r)
}
}
}()
// write cloneFlags
r.AddData(&Int32msg{
Type: CloneFlagsAttr,
Value: uint32(cloneFlags),
})
// write custom namespace paths
if len(nsMaps) > 0 {
nsPaths, err := c.orderNamespacePaths(nsMaps)
if err != nil {
return nil, err
}
r.AddData(&Bytemsg{
Type: NsPathsAttr,
Value: []byte(strings.Join(nsPaths, ",")),
})
}
// write namespace paths only when we are not joining an existing user ns
_, joinExistingUser := nsMaps[configs.NEWUSER]
if !joinExistingUser {
// write uid mappings
if len(c.config.UidMappings) > 0 {
if c.config.RootlessEUID && c.newuidmapPath != "" {
r.AddData(&Bytemsg{
Type: UidmapPathAttr,
Value: []byte(c.newuidmapPath),
})
}
b, err := encodeIDMapping(c.config.UidMappings)
if err != nil {
return nil, err
}
r.AddData(&Bytemsg{
Type: UidmapAttr,
Value: b,
})
}
// write gid mappings
if len(c.config.GidMappings) > 0 {
b, err := encodeIDMapping(c.config.GidMappings)
if err != nil {
return nil, err
}
r.AddData(&Bytemsg{
Type: GidmapAttr,
Value: b,
})
if c.config.RootlessEUID && c.newgidmapPath != "" {
r.AddData(&Bytemsg{
Type: GidmapPathAttr,
Value: []byte(c.newgidmapPath),
})
}
if requiresRootOrMappingTool(c.config) {
r.AddData(&Boolmsg{
Type: SetgroupAttr,
Value: true,
})
}
}
}
if c.config.OomScoreAdj != nil {
// write oom_score_adj
r.AddData(&Bytemsg{
Type: OomScoreAdjAttr,
Value: []byte(strconv.Itoa(*c.config.OomScoreAdj)),
})
}
// write rootless
r.AddData(&Boolmsg{
Type: RootlessEUIDAttr,
Value: c.config.RootlessEUID,
})
// Bind mount source to open.
if it == initStandard && c.shouldSendMountSources() {
var mounts []byte
for _, m := range c.config.Mounts {
if m.IsBind() {
if strings.IndexByte(m.Source, 0) >= 0 {
return nil, fmt.Errorf("mount source string contains null byte: %q", m.Source)
}
mounts = append(mounts, []byte(m.Source)...)
}
mounts = append(mounts, byte(0))
}
r.AddData(&Bytemsg{
Type: MountSourcesAttr,
Value: mounts,
})
}
return bytes.NewReader(r.Serialize()), nil
}
// ignoreTerminateErrors returns nil if the given err matches an error known
// to indicate that the terminate occurred successfully or err was nil, otherwise
// err is returned unaltered.
func ignoreTerminateErrors(err error) error {
if err == nil {
return nil
}
// terminate() might return an error from either Kill or Wait.
// The (*Cmd).Wait documentation says: "If the command fails to run
// or doesn't complete successfully, the error is of type *ExitError".
// Filter out such errors (like "exit status 1" or "signal: killed").
var exitErr *exec.ExitError
if errors.As(err, &exitErr) {
return nil
}
if errors.Is(err, os.ErrProcessDone) {
return nil
}
s := err.Error()
if strings.Contains(s, "Wait was already called") {
return nil
}
return err
}
func requiresRootOrMappingTool(c *configs.Config) bool {
gidMap := []configs.IDMap{
{ContainerID: 0, HostID: os.Getegid(), Size: 1},
}
return !reflect.DeepEqual(c.GidMappings, gidMap)
}
|
[
"\"GOMAXPROCS\""
] |
[] |
[
"GOMAXPROCS"
] |
[]
|
["GOMAXPROCS"]
|
go
| 1 | 0 | |
src/azure-cli-core/azure/cli/core/__init__.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
__version__ = "2.0.63"
import os
import sys
import timeit
import six
from knack.cli import CLI
from knack.commands import CLICommandsLoader
from knack.completion import ARGCOMPLETE_ENV_NAME
from knack.introspection import extract_args_from_signature, extract_full_summary_from_signature
from knack.log import get_logger
from knack.util import CLIError
from knack.arguments import ArgumentsContext, CaseInsensitiveList # pylint: disable=unused-import
logger = get_logger(__name__)
EXCLUDED_PARAMS = ['self', 'raw', 'polling', 'custom_headers', 'operation_config',
'content_version', 'kwargs', 'client', 'no_wait']
EVENT_FAILED_EXTENSION_LOAD = 'MainLoader.OnFailedExtensionLoad'
class AzCli(CLI):
def __init__(self, **kwargs):
super(AzCli, self).__init__(**kwargs)
from azure.cli.core.commands import register_cache_arguments
from azure.cli.core.commands.arm import (
register_ids_argument, register_global_subscription_argument)
from azure.cli.core.cloud import get_active_cloud
from azure.cli.core.commands.transform import register_global_transforms
from azure.cli.core._session import ACCOUNT, CONFIG, SESSION
from knack.util import ensure_dir
self.data['headers'] = {}
self.data['command'] = 'unknown'
self.data['command_extension_name'] = None
self.data['completer_active'] = ARGCOMPLETE_ENV_NAME in os.environ
self.data['query_active'] = False
azure_folder = self.config.config_dir
ensure_dir(azure_folder)
ACCOUNT.load(os.path.join(azure_folder, 'azureProfile.json'))
CONFIG.load(os.path.join(azure_folder, 'az.json'))
SESSION.load(os.path.join(azure_folder, 'az.sess'), max_age=3600)
self.cloud = get_active_cloud(self)
logger.debug('Current cloud config:\n%s', str(self.cloud.name))
register_global_transforms(self)
register_global_subscription_argument(self)
register_ids_argument(self) # global subscription must be registered first!
register_cache_arguments(self)
self.progress_controller = None
def refresh_request_id(self):
"""Assign a new random GUID as x-ms-client-request-id
The method must be invoked before each command execution in order to ensure
unique client-side request ID is generated.
"""
import uuid
self.data['headers']['x-ms-client-request-id'] = str(uuid.uuid1())
def get_progress_controller(self, det=False):
import azure.cli.core.commands.progress as progress
if not self.progress_controller:
self.progress_controller = progress.ProgressHook()
self.progress_controller.init_progress(progress.get_progress_view(det))
return self.progress_controller
def get_cli_version(self):
return __version__
def show_version(self):
from azure.cli.core.util import get_az_version_string
ver_string, updates_available = get_az_version_string()
print(ver_string)
if updates_available == -1:
logger.warning('Unable to check if your CLI is up-to-date. Check your internet connection.')
elif updates_available:
logger.warning('You have %i updates available. Consider updating your CLI installation.', updates_available)
else:
print('Your CLI is up-to-date.')
def exception_handler(self, ex): # pylint: disable=no-self-use
from azure.cli.core.util import handle_exception
return handle_exception(ex)
class MainCommandsLoader(CLICommandsLoader):
def __init__(self, cli_ctx=None):
super(MainCommandsLoader, self).__init__(cli_ctx)
self.cmd_to_loader_map = {}
self.loaders = []
def _update_command_definitions(self):
for cmd_name in self.command_table:
loaders = self.cmd_to_loader_map[cmd_name]
for loader in loaders:
loader.command_table = self.command_table
loader._update_command_definitions() # pylint: disable=protected-access
# pylint: disable=too-many-statements
def load_command_table(self, args):
from importlib import import_module
import pkgutil
import traceback
from azure.cli.core.commands import (
_load_module_command_loader, _load_extension_command_loader, BLACKLISTED_MODS, ExtensionCommandSource)
from azure.cli.core.extension import (
get_extensions, get_extension_path, get_extension_modname)
def _update_command_table_from_modules(args):
'''Loads command table(s)
When `module_name` is specified, only commands from that module will be loaded.
If the module is not found, all commands are loaded.
'''
installed_command_modules = []
try:
mods_ns_pkg = import_module('azure.cli.command_modules')
installed_command_modules = [modname for _, modname, _ in
pkgutil.iter_modules(mods_ns_pkg.__path__)
if modname not in BLACKLISTED_MODS]
except ImportError as e:
logger.warning(e)
logger.debug('Installed command modules %s', installed_command_modules)
cumulative_elapsed_time = 0
for mod in [m for m in installed_command_modules if m not in BLACKLISTED_MODS]:
try:
start_time = timeit.default_timer()
module_command_table, module_group_table = _load_module_command_loader(self, args, mod)
for cmd in module_command_table.values():
cmd.command_source = mod
self.command_table.update(module_command_table)
self.command_group_table.update(module_group_table)
elapsed_time = timeit.default_timer() - start_time
logger.debug("Loaded module '%s' in %.3f seconds.", mod, elapsed_time)
cumulative_elapsed_time += elapsed_time
except Exception as ex: # pylint: disable=broad-except
# Changing this error message requires updating CI script that checks for failed
# module loading.
import azure.cli.core.telemetry as telemetry
logger.error("Error loading command module '%s'", mod)
telemetry.set_exception(exception=ex, fault_type='module-load-error-' + mod,
summary='Error loading module: {}'.format(mod))
logger.debug(traceback.format_exc())
logger.debug("Loaded all modules in %.3f seconds. "
"(note: there's always an overhead with the first module loaded)",
cumulative_elapsed_time)
def _update_command_table_from_extensions(ext_suppressions):
def _handle_extension_suppressions(extensions):
filtered_extensions = []
for ext in extensions:
should_include = True
for suppression in ext_suppressions:
if should_include and suppression.handle_suppress(ext):
should_include = False
if should_include:
filtered_extensions.append(ext)
return filtered_extensions
extensions = get_extensions()
if extensions:
logger.debug("Found %s extensions: %s", len(extensions), [e.name for e in extensions])
allowed_extensions = _handle_extension_suppressions(extensions)
module_commands = set(self.command_table.keys())
for ext in allowed_extensions:
ext_name = ext.name
ext_dir = ext.path or get_extension_path(ext_name)
sys.path.append(ext_dir)
try:
ext_mod = get_extension_modname(ext_name, ext_dir=ext_dir)
# Add to the map. This needs to happen before we load commands as registering a command
# from an extension requires this map to be up-to-date.
# self._mod_to_ext_map[ext_mod] = ext_name
start_time = timeit.default_timer()
extension_command_table, extension_group_table = \
_load_extension_command_loader(self, args, ext_mod)
for cmd_name, cmd in extension_command_table.items():
cmd.command_source = ExtensionCommandSource(
extension_name=ext_name,
overrides_command=cmd_name in module_commands,
preview=ext.preview)
self.command_table.update(extension_command_table)
self.command_group_table.update(extension_group_table)
elapsed_time = timeit.default_timer() - start_time
logger.debug("Loaded extension '%s' in %.3f seconds.", ext_name, elapsed_time)
except Exception: # pylint: disable=broad-except
self.cli_ctx.raise_event(EVENT_FAILED_EXTENSION_LOAD, extension_name=ext_name)
logger.warning("Unable to load extension '%s'. Use --debug for more information.", ext_name)
logger.debug(traceback.format_exc())
def _wrap_suppress_extension_func(func, ext):
""" Wrapper method to handle centralization of log messages for extension filters """
res = func(ext)
should_suppress = res
reason = "Use --debug for more information."
if isinstance(res, tuple):
should_suppress, reason = res
suppress_types = (bool, type(None))
if not isinstance(should_suppress, suppress_types):
raise ValueError("Command module authoring error: "
"Valid extension suppression values are {} in {}".format(suppress_types, func))
if should_suppress:
logger.warning("Extension %s (%s) has been suppressed. %s",
ext.name, ext.version, reason)
logger.debug("Extension %s (%s) suppressed from being loaded due "
"to %s", ext.name, ext.version, func)
return should_suppress
def _get_extension_suppressions(mod_loaders):
res = []
for m in mod_loaders:
suppressions = getattr(m, 'suppress_extension', None)
if suppressions:
suppressions = suppressions if isinstance(suppressions, list) else [suppressions]
for sup in suppressions:
if isinstance(sup, ModExtensionSuppress):
res.append(sup)
return res
_update_command_table_from_modules(args)
try:
ext_suppressions = _get_extension_suppressions(self.loaders)
# We always load extensions even if the appropriate module has been loaded
# as an extension could override the commands already loaded.
_update_command_table_from_extensions(ext_suppressions)
except Exception: # pylint: disable=broad-except
logger.warning("Unable to load extensions. Use --debug for more information.")
logger.debug(traceback.format_exc())
return self.command_table
def load_arguments(self, command):
from azure.cli.core.commands.parameters import resource_group_name_type, get_location_type, deployment_name_type
from knack.arguments import ignore_type
command_loaders = self.cmd_to_loader_map.get(command, None)
if command_loaders:
for loader in command_loaders:
# register global args
with loader.argument_context('') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('location', get_location_type(self.cli_ctx))
c.argument('deployment_name', deployment_name_type)
c.argument('cmd', ignore_type)
loader.command_name = command
self.command_table[command].load_arguments() # this loads the arguments via reflection
loader.load_arguments(command) # this adds entries to the argument registries
self.argument_registry.arguments.update(loader.argument_registry.arguments)
self.extra_argument_registry.update(loader.extra_argument_registry)
loader._update_command_definitions() # pylint: disable=protected-access
class ModExtensionSuppress(object): # pylint: disable=too-few-public-methods
def __init__(self, mod_name, suppress_extension_name, suppress_up_to_version, reason=None, recommend_remove=False,
recommend_update=False):
self.mod_name = mod_name
self.suppress_extension_name = suppress_extension_name
self.suppress_up_to_version = suppress_up_to_version
self.reason = reason
self.recommend_remove = recommend_remove
self.recommend_update = recommend_update
def handle_suppress(self, ext):
from pkg_resources import parse_version
should_suppress = ext.name == self.suppress_extension_name and ext.version and \
parse_version(ext.version) <= parse_version(self.suppress_up_to_version)
if should_suppress:
reason = self.reason or "Use --debug for more information."
logger.warning("Extension %s (%s) has been suppressed. %s",
ext.name, ext.version, reason)
logger.debug("Extension %s (%s) suppressed from being loaded due "
"to %s", ext.name, ext.version, self.mod_name)
if self.recommend_remove:
logger.warning("Remove this extension with 'az extension remove --name %s'", ext.name)
if self.recommend_update:
logger.warning("Update this extension with 'az extension update --name %s'", ext.name)
return should_suppress
class AzCommandsLoader(CLICommandsLoader): # pylint: disable=too-many-instance-attributes
def __init__(self, cli_ctx=None, min_profile=None, max_profile='latest',
command_group_cls=None, argument_context_cls=None, suppress_extension=None,
**kwargs):
from azure.cli.core.commands import AzCliCommand, AzCommandGroup, AzArgumentContext
super(AzCommandsLoader, self).__init__(cli_ctx=cli_ctx,
command_cls=AzCliCommand,
excluded_command_handler_args=EXCLUDED_PARAMS)
self.min_profile = min_profile
self.max_profile = max_profile
self.suppress_extension = suppress_extension
self.module_kwargs = kwargs
self.command_name = None
self.skip_applicability = False
self._command_group_cls = command_group_cls or AzCommandGroup
self._argument_context_cls = argument_context_cls or AzArgumentContext
def _update_command_definitions(self):
master_arg_registry = self.cli_ctx.invocation.commands_loader.argument_registry
master_extra_arg_registry = self.cli_ctx.invocation.commands_loader.extra_argument_registry
for command_name, command in self.command_table.items():
# Add any arguments explicitly registered for this command
for argument_name, argument_definition in master_extra_arg_registry[command_name].items():
command.arguments[argument_name] = argument_definition
for argument_name in command.arguments:
overrides = master_arg_registry.get_cli_argument(command_name, argument_name)
command.update_argument(argument_name, overrides)
def _apply_doc_string(self, dest, command_kwargs):
from azure.cli.core.profiles._shared import APIVersionException
doc_string_source = command_kwargs.get('doc_string_source', None)
if not doc_string_source:
return
if not isinstance(doc_string_source, str):
raise CLIError("command authoring error: applying doc_string_source '{}' directly will cause slowdown. "
'Import by string name instead.'.format(doc_string_source.__name__))
model = doc_string_source
try:
model = self.get_models(doc_string_source)
except APIVersionException:
model = None
if not model:
from importlib import import_module
(path, model_name) = doc_string_source.split('#', 1)
method_name = None
if '.' in model_name:
(model_name, method_name) = model_name.split('.', 1)
module = import_module(path)
model = getattr(module, model_name)
if method_name:
model = getattr(model, method_name, None)
if not model:
raise CLIError("command authoring error: source '{}' not found.".format(doc_string_source))
dest.__doc__ = model.__doc__
def _get_resource_type(self):
resource_type = self.module_kwargs.get('resource_type', None)
if not resource_type:
command_type = self.module_kwargs.get('command_type', None)
resource_type = command_type.settings.get('resource_type', None) if command_type else None
return resource_type
def get_api_version(self, resource_type=None, operation_group=None):
from azure.cli.core.profiles import get_api_version
resource_type = resource_type or self._get_resource_type()
version = get_api_version(self.cli_ctx, resource_type)
if isinstance(version, str):
return version
version = getattr(version, operation_group, None)
if version:
return version
from azure.cli.core.profiles._shared import APIVersionException
raise APIVersionException(operation_group, self.cli_ctx.cloud.profile)
def supported_api_version(self, resource_type=None, min_api=None, max_api=None, operation_group=None):
from azure.cli.core.profiles import supported_api_version
if not min_api and not max_api:
# optimistically assume that fully supported if no api restriction listed
return True
api_support = supported_api_version(
cli_ctx=self.cli_ctx,
resource_type=resource_type or self._get_resource_type(),
min_api=min_api or self.min_profile,
max_api=max_api or self.max_profile,
operation_group=operation_group)
if isinstance(api_support, bool):
return api_support
if operation_group:
return getattr(api_support, operation_group)
return api_support
def get_sdk(self, *attr_args, **kwargs):
from azure.cli.core.profiles import get_sdk
return get_sdk(self.cli_ctx, kwargs.pop('resource_type', self._get_resource_type()),
*attr_args, **kwargs)
def get_models(self, *attr_args, **kwargs):
from azure.cli.core.profiles import get_sdk
resource_type = kwargs.get('resource_type', self._get_resource_type())
operation_group = kwargs.get('operation_group', self.module_kwargs.get('operation_group', None))
return get_sdk(self.cli_ctx, resource_type, *attr_args, mod='models', operation_group=operation_group)
def command_group(self, group_name, command_type=None, **kwargs):
if command_type:
kwargs['command_type'] = command_type
if 'deprecate_info' in kwargs:
kwargs['deprecate_info'].target = group_name
return self._command_group_cls(self, group_name, **kwargs)
def argument_context(self, scope, **kwargs):
return self._argument_context_cls(self, scope, **kwargs)
def _cli_command(self, name, operation=None, handler=None, argument_loader=None, description_loader=None, **kwargs):
from knack.deprecation import Deprecated
kwargs['deprecate_info'] = Deprecated.ensure_new_style_deprecation(self.cli_ctx, kwargs, 'command')
if operation and not isinstance(operation, six.string_types):
raise TypeError("Operation must be a string. Got '{}'".format(operation))
if handler and not callable(handler):
raise TypeError("Handler must be a callable. Got '{}'".format(operation))
if bool(operation) == bool(handler):
raise TypeError("Must specify exactly one of either 'operation' or 'handler'")
name = ' '.join(name.split())
client_factory = kwargs.get('client_factory', None)
def default_command_handler(command_args):
from azure.cli.core.util import get_arg_list, augment_no_wait_handler_args
from azure.cli.core.commands.client_factory import resolve_client_arg_name
op = handler or self.get_op_handler(operation, operation_group=kwargs.get('operation_group'))
op_args = get_arg_list(op)
cmd = command_args.get('cmd') if 'cmd' in op_args else command_args.pop('cmd')
client = client_factory(cmd.cli_ctx, command_args) if client_factory else None
supports_no_wait = kwargs.get('supports_no_wait', None)
if supports_no_wait:
no_wait_enabled = command_args.pop('no_wait', False)
augment_no_wait_handler_args(no_wait_enabled, op, command_args)
if client:
client_arg_name = resolve_client_arg_name(operation, kwargs)
if client_arg_name in op_args:
command_args[client_arg_name] = client
return op(**command_args)
def default_arguments_loader():
op = handler or self.get_op_handler(operation, operation_group=kwargs.get('operation_group'))
self._apply_doc_string(op, kwargs)
cmd_args = list(extract_args_from_signature(op, excluded_params=self.excluded_command_handler_args))
return cmd_args
def default_description_loader():
op = handler or self.get_op_handler(operation, operation_group=kwargs.get('operation_group'))
self._apply_doc_string(op, kwargs)
return extract_full_summary_from_signature(op)
kwargs['arguments_loader'] = argument_loader or default_arguments_loader
kwargs['description_loader'] = description_loader or default_description_loader
if self.supported_api_version(resource_type=kwargs.get('resource_type'),
min_api=kwargs.get('min_api'),
max_api=kwargs.get('max_api'),
operation_group=kwargs.get('operation_group')):
self._populate_command_group_table_with_subgroups(' '.join(name.split()[:-1]))
self.command_table[name] = self.command_cls(self, name,
handler or default_command_handler,
**kwargs)
def get_op_handler(self, operation, operation_group=None):
""" Import and load the operation handler """
# Patch the unversioned sdk path to include the appropriate API version for the
# resource type in question.
from importlib import import_module
import types
from azure.cli.core.profiles import AZURE_API_PROFILES
from azure.cli.core.profiles._shared import get_versioned_sdk_path
for rt in AZURE_API_PROFILES[self.cli_ctx.cloud.profile]:
if operation.startswith(rt.import_prefix):
operation = operation.replace(rt.import_prefix,
get_versioned_sdk_path(self.cli_ctx.cloud.profile, rt,
operation_group=operation_group))
try:
mod_to_import, attr_path = operation.split('#')
op = import_module(mod_to_import)
for part in attr_path.split('.'):
op = getattr(op, part)
if isinstance(op, types.FunctionType):
return op
return six.get_method_function(op)
except (ValueError, AttributeError):
raise ValueError("The operation '{}' is invalid.".format(operation))
def get_default_cli():
from azure.cli.core.azlogging import AzCliLogging
from azure.cli.core.commands import AzCliCommandInvoker
from azure.cli.core.parser import AzCliCommandParser
from azure.cli.core._config import GLOBAL_CONFIG_DIR, ENV_VAR_PREFIX
from azure.cli.core._help import AzCliHelp
from azure.cli.core._output import AzOutputProducer
return AzCli(cli_name='az',
config_dir=GLOBAL_CONFIG_DIR,
config_env_var_prefix=ENV_VAR_PREFIX,
commands_loader_cls=MainCommandsLoader,
invocation_cls=AzCliCommandInvoker,
parser_cls=AzCliCommandParser,
logging_cls=AzCliLogging,
output_cls=AzOutputProducer,
help_cls=AzCliHelp)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
dev-tools/mage/fmt.go
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package mage
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/magefile/mage/mg"
"github.com/magefile/mage/sh"
"github.com/pkg/errors"
"github.com/snappyflow/beats/v7/dev-tools/mage/gotool"
)
var (
// GoImportsImportPath controls the import path used to install goimports.
GoImportsImportPath = "golang.org/x/tools/cmd/goimports"
// GoImportsLocalPrefix is a string prefix matching imports that should be
// grouped after third-party packages.
GoImportsLocalPrefix = "github.com/elastic"
)
// Format adds license headers, formats .go files with goimports, and formats
// .py files with autopep8.
func Format() {
// Don't run AddLicenseHeaders and GoImports concurrently because they
// both can modify the same files.
if BeatProjectType != CommunityProject {
mg.Deps(AddLicenseHeaders)
}
mg.Deps(GoImports, PythonAutopep8)
}
// GoImports executes goimports against all .go files in and below the CWD. It
// ignores vendor/ and generator/_templates/ directories.
func GoImports() error {
goFiles, err := FindFilesRecursive(func(path string, _ os.FileInfo) bool {
return filepath.Ext(path) == ".go" &&
!strings.Contains(path, "vendor/") &&
!strings.Contains(path, "generator/_templates/")
})
if err != nil {
return err
}
if len(goFiles) == 0 {
return nil
}
fmt.Println(">> fmt - goimports: Formatting Go code")
if err := gotool.Install(
gotool.Install.Package(filepath.Join(GoImportsImportPath)),
); err != nil {
return err
}
args := append(
[]string{"-local", GoImportsLocalPrefix, "-l", "-w"},
goFiles...,
)
return sh.RunV("goimports", args...)
}
// PythonAutopep8 executes autopep8 on all .py files in and below the CWD. It
// ignores build/ directories.
func PythonAutopep8() error {
pyFiles, err := FindFilesRecursive(func(path string, _ os.FileInfo) bool {
return filepath.Ext(path) == ".py" &&
!strings.Contains(path, "build/") &&
!strings.Contains(path, "vendor/")
})
if err != nil {
return err
}
if len(pyFiles) == 0 {
return nil
}
fmt.Println(">> fmt - autopep8: Formatting Python code")
ve, err := PythonVirtualenv()
if err != nil {
return err
}
autopep8, err := LookVirtualenvPath(ve, "autopep8")
if err != nil {
return err
}
args := append(
[]string{"--in-place", "--max-line-length", "120"},
pyFiles...,
)
return sh.RunV(autopep8, args...)
}
// AddLicenseHeaders adds license headers to .go files. It applies the
// appropriate license header based on the value of devtools.BeatLicense.
func AddLicenseHeaders() error {
if os.Getenv("CHECK_HEADERS_DISABLED") != "" {
return nil
}
fmt.Println(">> fmt - go-licenser: Adding missing headers")
mg.Deps(InstallGoLicenser)
var license string
switch BeatLicense {
case "ASL2", "ASL 2.0":
license = "ASL2"
case "Elastic", "Elastic License":
license = "Elastic"
default:
return errors.Errorf("unknown license type %v", BeatLicense)
}
licenser := gotool.Licenser
return licenser(licenser.License(license))
}
|
[
"\"CHECK_HEADERS_DISABLED\""
] |
[] |
[
"CHECK_HEADERS_DISABLED"
] |
[]
|
["CHECK_HEADERS_DISABLED"]
|
go
| 1 | 0 | |
pkg/util/kubernetes/autoscalers/datadogexternal.go
|
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2017-present Datadog, Inc.
// +build kubeapiserver
package autoscalers
import (
"errors"
"fmt"
"os"
"strconv"
"strings"
"time"
"gopkg.in/zorkian/go-datadog-api.v2"
utilserror "k8s.io/apimachinery/pkg/util/errors"
"github.com/DataDog/datadog-agent/pkg/config"
"github.com/DataDog/datadog-agent/pkg/telemetry"
httputils "github.com/DataDog/datadog-agent/pkg/util/http"
le "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/leaderelection/metrics"
"github.com/DataDog/datadog-agent/pkg/util/log"
)
var (
ddRequests = telemetry.NewCounterWithOpts("", "datadog_requests",
[]string{"status", le.JoinLeaderLabel}, "Counter of requests made to Datadog",
telemetry.Options{NoDoubleUnderscoreSep: true})
metricsEval = telemetry.NewGaugeWithOpts("", "external_metrics_processed_value",
[]string{"metric", le.JoinLeaderLabel}, "value processed from querying Datadog",
telemetry.Options{NoDoubleUnderscoreSep: true})
metricsDelay = telemetry.NewGaugeWithOpts("", "external_metrics_delay_seconds",
[]string{"metric", le.JoinLeaderLabel}, "freshness of the metric evaluated from querying Datadog",
telemetry.Options{NoDoubleUnderscoreSep: true})
rateLimitsRemaining = telemetry.NewGaugeWithOpts("", "rate_limit_queries_remaining",
[]string{"endpoint", le.JoinLeaderLabel}, "number of queries remaining before next reset",
telemetry.Options{NoDoubleUnderscoreSep: true})
rateLimitsReset = telemetry.NewGaugeWithOpts("", "rate_limit_queries_reset",
[]string{"endpoint", le.JoinLeaderLabel}, "number of seconds before next reset",
telemetry.Options{NoDoubleUnderscoreSep: true})
rateLimitsPeriod = telemetry.NewGaugeWithOpts("", "rate_limit_queries_period",
[]string{"endpoint", le.JoinLeaderLabel}, "period of rate limiting",
telemetry.Options{NoDoubleUnderscoreSep: true})
rateLimitsLimit = telemetry.NewGaugeWithOpts("", "rate_limit_queries_limit",
[]string{"endpoint", le.JoinLeaderLabel}, "maximum number of queries allowed in the period",
telemetry.Options{NoDoubleUnderscoreSep: true})
)
type Point struct {
Value float64
Timestamp int64
Valid bool
}
const (
value = 1
timestamp = 0
queryEndpoint = "/api/v1/query"
metricsEndpointPrefix = "https://api."
metricsEndpointConfig = "external_metrics_provider.endpoint"
)
// queryDatadogExternal converts the metric name and labels from the Ref format into a Datadog metric.
// It returns the last value for a bucket of 5 minutes,
func (p *Processor) queryDatadogExternal(ddQueries []string, bucketSize int64) (map[string]Point, error) {
ddQueriesLen := len(ddQueries)
if ddQueriesLen == 0 {
log.Tracef("No query in input - nothing to do")
return nil, nil
}
query := strings.Join(ddQueries, ",")
seriesSlice, err := p.datadogClient.QueryMetrics(time.Now().Unix()-bucketSize, time.Now().Unix(), query)
if err != nil {
ddRequests.Inc("error", le.JoinLeaderValue)
return nil, log.Errorf("Error while executing metric query %s: %s", query, err)
}
ddRequests.Inc("success", le.JoinLeaderValue)
processedMetrics := make(map[string]Point, ddQueriesLen)
for _, serie := range seriesSlice {
if serie.Metric == nil {
log.Infof("Could not collect values for all processedMetrics in the query %s", query)
continue
}
// Perform matching between query and reply, using query order and `QueryIndex` from API reply (QueryIndex is 0-based)
var queryIndex int = 0
if ddQueriesLen > 1 {
if serie.QueryIndex != nil && *serie.QueryIndex < ddQueriesLen {
queryIndex = *serie.QueryIndex
} else {
log.Errorf("Received Serie without QueryIndex or invalid QueryIndex while we sent multiple queries. Full query: %s / Serie expression: %v / QueryIndex: %v", query, serie.Expression, serie.QueryIndex)
continue
}
}
// Check if we already have a Serie result for this query. We expect query to result in a single Serie
// Otherwise we are not able to determine which value we should take for Autoscaling
if existingPoint, found := processedMetrics[ddQueries[queryIndex]]; found {
if existingPoint.Valid {
log.Warnf("Multiple Series found for query: %s. Please change your query to return a single Serie. Results will be flagged as invalid", ddQueries[queryIndex])
existingPoint.Valid = false
existingPoint.Timestamp = time.Now().Unix()
processedMetrics[ddQueries[queryIndex]] = existingPoint
}
continue
}
// Use on the penultimate bucket, since the very last bucket can be subject to variations due to late points.
var skippedLastPoint bool
var point Point
// Find the most recent value.
for i := len(serie.Points) - 1; i >= 0; i-- {
if serie.Points[i][value] == nil {
// We need this as if multiple metrics are queried, their points' timestamps align this can result in empty values.
continue
}
// We need at least 2 points per window queried on batched metrics.
// If a single sparse metric is processed and only has 1 point in the window, use the value.
if !skippedLastPoint && len(serie.Points) > 1 {
// Skip last point unless the query window only contains one valid point
skippedLastPoint = true
continue
}
point.Value = *serie.Points[i][value] // store the original value
point.Timestamp = int64(*serie.Points[i][timestamp] / 1000) // Datadog's API returns timestamps in s
point.Valid = true
m := fmt.Sprintf("%s{%s}", *serie.Metric, *serie.Scope)
processedMetrics[ddQueries[queryIndex]] = point
// Prometheus submissions on the processed external metrics
metricsEval.Set(point.Value, m, le.JoinLeaderValue)
precision := time.Now().Unix() - point.Timestamp
metricsDelay.Set(float64(precision), m, le.JoinLeaderValue)
log.Debugf("Validated %s | Value:%v at %d after %d/%d buckets", ddQueries[queryIndex], point.Value, point.Timestamp, i+1, len(serie.Points))
break
}
}
// If the returned Series is empty for one or more processedMetrics, add it as invalid
for _, ddQuery := range ddQueries {
if _, found := processedMetrics[ddQuery]; !found {
processedMetrics[ddQuery] = Point{
Timestamp: time.Now().Unix(),
}
}
}
// If we add no series at all, return an error on top of invalid metrics
if len(seriesSlice) == 0 {
return processedMetrics, log.Errorf("Returned series slice empty")
}
return processedMetrics, nil
}
// setTelemetryMetric is a helper to submit telemetry metrics
func setTelemetryMetric(val string, metric telemetry.Gauge) error {
valFloat, err := strconv.Atoi(val)
if err == nil {
metric.Set(float64(valFloat), queryEndpoint, le.JoinLeaderValue)
}
return err
}
func (p *Processor) updateRateLimitingMetrics() error {
updateMap := p.datadogClient.GetRateLimitStats()
queryLimits := updateMap[queryEndpoint]
errors := []error{
setTelemetryMetric(queryLimits.Limit, rateLimitsLimit),
setTelemetryMetric(queryLimits.Remaining, rateLimitsRemaining),
setTelemetryMetric(queryLimits.Period, rateLimitsPeriod),
setTelemetryMetric(queryLimits.Reset, rateLimitsReset),
}
return utilserror.NewAggregate(errors)
}
// NewDatadogClient generates a new client to query metrics from Datadog
func NewDatadogClient() (*datadog.Client, error) {
apiKey := config.SanitizeAPIKey(config.Datadog.GetString("api_key"))
appKey := config.Datadog.GetString("app_key")
// DATADOG_HOST used to be the only way to set the external metrics
// endpoint, so we need to keep backwards compatibility. In order of
// priority, we use:
// - DD_EXTERNAL_METRICS_PROVIDER_ENDPOINT
// - DATADOG_HOST
// - DD_SITE
endpoint := os.Getenv("DATADOG_HOST")
if config.Datadog.GetString(metricsEndpointConfig) != "" || endpoint == "" {
endpoint = config.GetMainEndpoint(metricsEndpointPrefix, metricsEndpointConfig)
}
if appKey == "" || apiKey == "" {
return nil, errors.New("missing the api/app key pair to query Datadog")
}
log.Infof("Initialized the Datadog Client for HPA with endpoint %q", endpoint)
client := datadog.NewClient(apiKey, appKey)
client.HttpClient.Transport = httputils.CreateHTTPTransport()
client.RetryTimeout = 3 * time.Second
client.ExtraHeader["User-Agent"] = "Datadog-Cluster-Agent"
client.SetBaseUrl(endpoint)
return client, nil
}
|
[
"\"DATADOG_HOST\""
] |
[] |
[
"DATADOG_HOST"
] |
[]
|
["DATADOG_HOST"]
|
go
| 1 | 0 | |
server.py
|
import os
import sys
import logging
from flask import Flask, jsonify, request, url_for, make_response, abort
from flask_api import status # HTTP Status Codes
from werkzeug.exceptions import NotFound
from flasgger import Swagger
from flask_sqlalchemy import SQLAlchemy
from models import Wishlist, Item, DataValidationError
from vcap import get_database_uri
app = Flask(__name__)
app.config['SWAGGER'] = {
"swagger_version": "2.0",
"specs": [
{
"version": "1.0.0",
"title": "Wishlist Service Documentation",
"description": "This is a sample wishlist service Documentation.",
"endpoint": 'v1_spec',
"route": '/index.html'
}
]
}
Swagger(app)
# dev config
app.config['SQLALCHEMY_DATABASE_URI'] = get_database_uri()
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = 'please, tell nobody... we are wishlist squad'
app.config['LOGGING_LEVEL'] = logging.INFO
# Pull options from environment
DEBUG = (os.getenv('DEBUG', 'False') == 'True')
PORT = os.getenv('PORT', '5000')
######################################################################
# Error Handlers
######################################################################
@app.errorhandler(DataValidationError)
def request_validation_error(error):
""" Handles Value Errors from bad data """
return bad_request(error)
@app.errorhandler(400)
def bad_request(error):
""" Handles bad requests with 400_BAD_REQUEST """
message = error.message or str(error)
app.logger.info(message)
return jsonify(status=400, error='Bad Request', message=message), 400
@app.errorhandler(404)
def not_found(error):
""" Handles resources not found with 404_NOT_FOUND """
message = error.message or str(error)
app.logger.info(message)
return jsonify(status=404, error='Not Found', message=message), 404
@app.errorhandler(405)
def method_not_supported(error):
""" Handles unsuppoted HTTP methods with 405_METHOD_NOT_SUPPORTED """
message = error.message or str(error)
app.logger.info(message)
return jsonify(status=405, error='Method not Allowed', message=message), 405
@app.errorhandler(415)
def mediatype_not_supported(error):
""" Handles unsuppoted media requests with 415_UNSUPPORTED_MEDIA_TYPE """
message = error.message or str(error)
app.logger.info(message)
return jsonify(status=415, error='Unsupported media type', message=message), 415
@app.errorhandler(500)
def internal_server_error(error):
""" Handles unexpected server error with 500_SERVER_ERROR """
message = error.message or str(error)
app.logger.info(message)
return jsonify(status=500, error='Internal Server Error', message=message), 500
######################################################################
# GET INDEX
######################################################################
@app.route('/')
def index():
""" Root URL response """
'''return jsonify(name='Wishlists REST API Service',
version='1.0.0.0',
paths=[url_for('get_wishlist_list', _external=True)],
status = "success"
), status.HTTP_200_OK'''
return app.send_static_file('index.html')
######################################################################
# CREATE A NEW WISHLIST
######################################################################
@app.route('/wishlists', methods=['POST'])
def create_wishlist():
"""
Creates a Wishlist object based on the JSON posted
Will create a wishlist with an auto incremented id
---
tags:
- Wishlist
parameters:
- name: body
in: body
required: true
schema:
id: wishlist_entries
required: true
- customer_id
- wishlist_name
properties:
customer_id:
type: integer
description: customer_id
default: "34"
wishlist_name:
type: string
description: name of the wishlist
default: "water Bottles"
responses:
201:
description: Successfully Created wishlist
"""
check_content_type('application/json')
wishlist = Wishlist()
json_post = request.get_json()
wishlist.deserialize(json_post)
wishlist.save()
message = wishlist.serialize()
location_url = url_for('get_wishlist', wishlist_id=wishlist.id, _external=True)
return make_response(jsonify(message), status.HTTP_201_CREATED,
{
'Location': location_url
})
######################################################################
# GET A WISHLIST
######################################################################
@app.route('/wishlists/<int:wishlist_id>', methods=['GET'])
def get_wishlist(wishlist_id):
"""
Retrieve a single Wishlist
This endpoint will return a Wishlist based on it's ID
---
tags:
- Wishlist
produces:
- application/json
parameters:
- name: wishlist_id
in: path
type: integer
required: true
definitions:
Wishlist:
type: object
properties:
id:
type: integer
customer_id:
type: integer
wishlist_name:
type: string
definitions:
Item:
type: object
properties:
id:
type: integer
wishlist_id:
type: integer
product_id:
type: integer
name:
type: string
description:
type: string
responses:
200:
description: List of items in the wishlist
schema:
$ref: '#/definitions/Wishlist'
404:
description: Wishlist with id wishlist_id not found
"""
wishlist = Wishlist.get(wishlist_id)
if not wishlist:
raise NotFound("Wishlist with id '{}' was not found.".format(wishlist_id))
return make_response(jsonify(wishlist.serialize()), status.HTTP_200_OK)
######################################################################
# GET AN ITEM
######################################################################
@app.route('/items/<int:item_id>', methods=['GET'])
def get_item(item_id):
"""
Retrieve a single Item
This endpoint will return a Item based on it's id
---
tags:
- Item
produces:
- application/json
parameters:
- name: item_id
in: path
description: the id of the item you are looking for
type: integer
required: true
responses:
200:
description: An Item
schema:
$ref: '#/definitions/Item'
404:
description: Item with id item_id not found
"""
item = Item.get(item_id)
if not item:
raise NotFound("Item with id '{}' was not found.".format(item_id))
return make_response(jsonify(item.serialize()), status.HTTP_200_OK)
######################################################################
# LIST ALL ITEMS
######################################################################
@app.route('/items', methods=['GET'])
def get_item_list():
"""
Returns all of the Items in a wishlist
---
tags:
- Item
produces:
- application/json
parameters:
- name: wishlist_id
in: path
description: the id of the wishlist
type: integer
required: true
responses:
200:
description: A list of all Items in the wishlist
schema:
type: array
items:
schema:
$ref: '#/definitions/Item'
"""
items = Item.all()
results = [item.serialize() for item in items]
return make_response(jsonify(results), status.HTTP_200_OK)
######################################################################
# LIST ALL ITEMS FROM A WISHLIST
######################################################################
@app.route('/wishlists/<int:wishlist_id>/items', methods=['GET'])
def get_wishlist_item_list(wishlist_id):
"""
Returns all items from a Wishlist
This JSON returns a list of all items in a wishlist
---
tags:
- Wishlist
produces:
- application/json
parameters:
- name: wishlist_id
in: path
description: the id of the wishlist
type: integer
required: true
responses:
200:
description: A list of all Items in database
schema:
type: array
items:
schema:
$ref: '#/definitions/Item'
"""
items = Item.find_by_wishlist_id(wishlist_id)
results = [item.serialize() for item in items]
return make_response(jsonify(results), status.HTTP_200_OK)
######################################################################
# LIST WISHLISTS (QUERY or LIST ALL)
######################################################################
@app.route('/wishlists', methods=['GET'])
def get_wishlist_list():
"""
Returns the Wishlists by searching the keywords of wishlist_name or the customer_id.
This function returns a wishlist based on wishlist_name or customer id. If the customer_id and the wishlist id parameters are empty, returns all the wishlists in the database
---
tags:
- Wishlist
produces:
- application/json
definitions:
Wishlist:
type: object
properties:
id:
type: integer
customer_id:
type: integer
wishlist_name:
type: string
parameters:
- name: keyword
in: query
description: the name of the wishlist
type: string
- name: query
in: query
description: the id of the customer
type: integer
responses:
200:
description: A Wishlist
schema:
$ref: '#/definitions/Wishlist'
"""
query_lists = []
customer_id = request.args.get('customer_id')
keyword = request.args.get('keyword')
if keyword:
query_lists = Wishlist.find_by_wishlist_name(keyword)
elif customer_id:
query_lists = Wishlist.find_by_customer_id(customer_id)
else:
""" Returns all of the Wishlists """
query_lists = Wishlist.all()
results = [wishlist.serialize() for wishlist in query_lists]
return make_response(jsonify(results), status.HTTP_200_OK)
######################################################################
# DELETE A WISHLIST
######################################################################
@app.route('/wishlists/<int:wishlist_id>', methods=['DELETE'])
def delete_wishlist(wishlist_id):
"""
Delete a Wishlist
This endpoint will delete a Wishlist based on the id specified in
the path
---
tags:
- Wishlist
parameters:
- name: wishlist_id
in: path
description: the id of the wishlist
type: integer
required: true
responses:
204:
description: returns no content
"""
wishlist = Wishlist.get(wishlist_id)
if wishlist:
items = Item.find_by_wishlist_id(wishlist_id)
for item in items:
item.delete()
wishlist.delete()
return make_response('', status.HTTP_204_NO_CONTENT)
######################################################################
# Clear A WISHLIST
######################################################################
@app.route('/wishlists/<int:wishlist_id>/clear', methods=['PUT'])
def clear_wishlist(wishlist_id):
"""
Clear a Wishlist
This endpoint will clear all the Items based on the id specified in
the path
---
tags:
- Wishlist
parameters:
- name: wishlist_id
in: path
description: the id of the wishlist
type: integer
required: true
responses:
204:
description: returns no content
"""
items = Item.find_by_wishlist_id(wishlist_id)
if items:
for item in items:
item.delete()
return make_response('', status.HTTP_204_NO_CONTENT)
######################################################################
# ADD AN ITEM TO A WISHLIST
######################################################################
@app.route('/wishlists/<int:wishlist_id>/items',methods=['POST'])
def add_item_to_wishlist(wishlist_id):
"""
Add an Item to an existing wishlist
This endpoint will add a wishlist item based on the data in the body that is posted
---
tags:
- Wishlist
consumes:
application/json
parameters:
- name: wishlist_id
in: path
type: integer
description: the id of the Wishlist to add an item
required: true
- name: body
in: body
required: true
schema:
$ref: '#/definitions/Item'
responses:
201:
description: Successfully added Item to wishlist
404:
description: Wishlist with id not found
"""
check_content_type('application/json')
wishlist = Wishlist.get(wishlist_id)
if not wishlist:
raise NotFound("Wishlist with id '{}' was not found.".format(wishlist_id))
item = Item()
json_post = request.get_json()
item.deserialize(json_post,wishlist_id)
item.save()
message = item.serialize()
"""
check if the item.wishlist_id equals to the wishlist.id
"""
check_wishlist_id = item.wishlist_id
location_url = url_for('get_wishlist', wishlist_id=wishlist.id, _external=True)
return make_response(jsonify(message), status.HTTP_201_CREATED,
{
'Location': location_url
})
######################################################################
# DELETE AN ITEM FROM A WISHLIST
######################################################################
@app.route('/wishlists/<int:wishlist_id>/items/<int:item_id>', methods=['DELETE'])
def delete_item(wishlist_id, item_id):
"""
Delete an Item
This endpoint will delete an Item from a Wishlist based on the id specified in
the path
---
tags:
- Wishlist
parameters:
- name: wishlist_id
in: path
type: integer
description: the id of the Wishlist to add an item
required: true
- name: item_id
in: path
type: integer
required: true
responses:
204:
description: Returns no content, Successfully updated Item on the wishlist
404:
description: Wishlist has no item with given Id OR Item with Id not found
"""
item = Item.get(item_id)
if item is None:
raise NotFound("Wishlist id '{}' has no item with id '{}'.".format(wishlist_id, item_id))
check_wishlist_id = item.wishlist_id
if wishlist_id != check_wishlist_id:
raise NotFound("Wishlist id '{}' does not have item with id '{}'.".format(wishlist_id, item_id))
if item:
item.delete()
return make_response('', status.HTTP_204_NO_CONTENT)
######################################################################
# UPDATE AN ITEM
######################################################################
@app.route('/wishlists/<int:wishlist_id>/items/<int:item_id>', methods=['PUT'])
def update_item(wishlist_id, item_id):
"""
Update an Item
This endpoint will update an Item based the body that is posted
---
tags:
- Item
parameters:
- name: wishlist_id
in: path
type: integer
required: true
description: id of the wishlist we wish to update
- name: item_id
in: path
type: integer
required: true
description: id of the item we wish to update
- name: body
in: body
required: true
schema:
$ref: '#/definitions/Item'
responses:
200:
description: Update was successful
404:
description: Did not find item with the given id in the wishlist
"""
check_content_type('application/json')
item = Item.get(item_id)
if not item:
raise NotFound("Item with id '{}' was not found.".format(item_id))
item.deserialize(request.get_json(), wishlist_id)
item.id = item_id
item.save()
return make_response(jsonify(item.serialize()), status.HTTP_200_OK)
######################################################################
# UPDATE A WISHLIST
######################################################################
@app.route('/wishlists/<int:wishlist_id>', methods=['PUT'])
def update_wishlists(wishlist_id):
"""
Update a Wishlist
This endpoint will update a Wishlist based the body that is posted
---
tags:
- Wishlist
parameters:
- name: wishlist_id
in: path
type: integer
required: true
- name: body
in: body
schema:
id: wishlist_entries
required: true
- customer_id
- wishlist_name
properties:
customer_id:
type: integer
description: customer_id
default: "345"
wishlist_name:
type: string
description: name of the wishlist
default: "new name of the wishlist"
responses:
200:
description: Update was successful
404:
description: Did not find item with the given id in the wishlist
"""
check_content_type('application/json')
wishlist = Wishlist.get(wishlist_id)
if not wishlist:
raise NotFound("Wishlist with id '{}' was not found.".format(wishlist_id))
wishlist.deserialize(request.get_json())
wishlist.id = wishlist_id
wishlist.save()
return make_response(jsonify(wishlist.serialize()), status.HTTP_200_OK)
######################################################################
# READ ITEM DESCRIPTION
######################################################################
@app.route('/wishlists/<int:wishlist_id>/items/<int:item_id>/description', methods=['GET'])
def get_item_description(wishlist_id,item_id):
"""
Read the item description of a Item
This endpoint will return the JSON {id:"",description:""}
---
tags:
- Wishlist
parameters:
- name: wishlist_id
in: path
type: integer
required: true
- name: item_id
in: path
type: integer
required: true
responses:
200:
description: description was fetched successfully
404:
description: Did not find item with the given id in the wishlist
"""
item = Item.get(item_id)
if not item:
raise NotFound("Item with id '{}' was not found.".format(item_id))
message = {"id": item_id,"description" : item.description}
return make_response(jsonify(message), status.HTTP_200_OK)
###########################################
#DELETE ALL WISHLISTS AND ITEMS (for test)
###########################################
@app.route('/wishlists/clear', methods = ['DELETE'])
def clear_db():
"""Clear the database"""
Wishlist.clear_db()
return make_response('', status.HTTP_204_NO_CONTENT)
######################################################################
# UTILITY FUNCTIONS
######################################################################
def init_db():
""" Initialies the SQLAlchemy app """
global app
# Item.init_db(app)
Wishlist.init_db(app)
def check_content_type(content_type):
""" Checks that the media type is correct """
if request.headers['Content-Type'] == content_type:
return
app.logger.error('Invalid Content-Type: %s', request.headers['Content-Type'])
abort(415, 'Content-Type must be {}'.format(content_type))
def initialize_logging(log_level=logging.INFO):
""" Initialized the default logging to STDOUT """
if not app.debug:
print 'Setting up logging...'
# Set up default logging for submodules to use STDOUT
# datefmt='%m/%d/%Y %I:%M:%S %p'
fmt = '[%(asctime)s] %(levelname)s in %(module)s: %(message)s'
logging.basicConfig(stream=sys.stdout, level=log_level, format=fmt)
# Make a new log handler that uses STDOUT
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter(fmt))
handler.setLevel(log_level)
# Remove the Flask default handlers and use our own
handler_list = list(app.logger.handlers)
for log_handler in handler_list:
app.logger.removeHandler(log_handler)
app.logger.addHandler(handler)
app.logger.setLevel(log_level)
app.logger.info('Logging handler established')
######################################################################
# MAIN
######################################################################
if __name__ == "__main__":
print "========================================="
print " WISHLISTS SERVICE STARTING"
print "========================================="
initialize_logging(logging.INFO)
init_db() # make our sqlalchemy tables
app.run(host='0.0.0.0', port=int(PORT), debug=DEBUG)
|
[] |
[] |
[
"PORT",
"DEBUG"
] |
[]
|
["PORT", "DEBUG"]
|
python
| 2 | 0 | |
dynamic_application_system/dynamic_application_system/wsgi.py
|
"""
WSGI config for dynamic_application_system project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dynamic_application_system.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
bilby/core/utils.py
|
from __future__ import division
from distutils.spawn import find_executable
import logging
import os
import shutil
import sys
from math import fmod
import argparse
import inspect
import functools
import types
import subprocess
import multiprocessing
from importlib import import_module
import json
import warnings
import numpy as np
from scipy.interpolate import interp2d
from scipy.special import logsumexp
import pandas as pd
import matplotlib.pyplot as plt
logger = logging.getLogger('bilby')
# Constants: values taken from LAL 505df9dd2e69b4812f1e8eee3a6d468ba7f80674
speed_of_light = 299792458.0 # m/s
parsec = 3.085677581491367e+16 # m
solar_mass = 1.9884099021470415e+30 # Kg
radius_of_earth = 6378136.6 # m
gravitational_constant = 6.6743e-11 # m^3 kg^-1 s^-2
_TOL = 14
def infer_parameters_from_function(func):
""" Infers the arguments of a function
(except the first arg which is assumed to be the dep. variable).
Throws out *args and **kwargs type arguments
Can deal with type hinting!
Parameters
----------
func: function or method
The function or method for which the parameters should be inferred.
Returns
-------
list: A list of strings with the parameters
Raises
------
ValueError
If the object passed to the function is neither a function nor a method.
Notes
-----
In order to handle methods the ``type`` of the function is checked, and
if a method has been passed the first *two* arguments are removed rather than just the first one.
This allows the reference to the instance (conventionally named ``self``)
to be removed.
"""
if isinstance(func, types.MethodType):
return infer_args_from_function_except_n_args(func=func, n=2)
elif isinstance(func, types.FunctionType):
return _infer_args_from_function_except_for_first_arg(func=func)
else:
raise ValueError("This doesn't look like a function.")
def infer_args_from_method(method):
""" Infers all arguments of a method except for 'self'
Throws out *args and **kwargs type arguments.
Can deal with type hinting!
Returns
---------
list: A list of strings with the parameters
"""
return infer_args_from_function_except_n_args(func=method, n=1)
def infer_args_from_function_except_n_args(func, n=1):
""" Inspects a function to find its arguments, and ignoring the
first n of these, returns a list of arguments from the function's
signature.
Parameters
----------
func : function or method
The function from which the arguments should be inferred.
n : int
The number of arguments which should be ignored, staring at the beginning.
Returns
-------
parameters: list
A list of parameters of the function, omitting the first ``n``.
Extended Summary
----------------
This function is intended to allow the handling of named arguments
in both functions and methods; this is important, since the first
argument of an instance method will be the instance.
See Also
--------
infer_args_from_method: Provides the arguments for a method
infer_args_from_function: Provides the arguments for a function
infer_args_from_function_except_first_arg: Provides all but first argument of a function or method.
Examples
--------
>>> def hello(a, b, c, d):
>>> pass
>>>
>>> infer_args_from_function_except_n_args(hello, 2)
['c', 'd']
"""
try:
parameters = inspect.getfullargspec(func).args
except AttributeError:
parameters = inspect.getargspec(func).args
del(parameters[:n])
return parameters
def _infer_args_from_function_except_for_first_arg(func):
return infer_args_from_function_except_n_args(func=func, n=1)
def get_dict_with_properties(obj):
property_names = [p for p in dir(obj.__class__)
if isinstance(getattr(obj.__class__, p), property)]
dict_with_properties = obj.__dict__.copy()
for key in property_names:
dict_with_properties[key] = getattr(obj, key)
return dict_with_properties
def get_sampling_frequency(time_array):
"""
Calculate sampling frequency from a time series
Attributes:
-------
time_array: array_like
Time array to get sampling_frequency from
Returns
-------
Sampling frequency of the time series: float
Raises
-------
ValueError: If the time series is not evenly sampled.
"""
tol = 1e-10
if np.ptp(np.diff(time_array)) > tol:
raise ValueError("Your time series was not evenly sampled")
else:
return np.round(1. / (time_array[1] - time_array[0]), decimals=_TOL)
def get_sampling_frequency_and_duration_from_time_array(time_array):
"""
Calculate sampling frequency and duration from a time array
Attributes:
-------
time_array: array_like
Time array to get sampling_frequency/duration from: array_like
Returns
-------
sampling_frequency, duration: float, float
Raises
-------
ValueError: If the time_array is not evenly sampled.
"""
sampling_frequency = get_sampling_frequency(time_array)
duration = len(time_array) / sampling_frequency
return sampling_frequency, duration
def get_sampling_frequency_and_duration_from_frequency_array(frequency_array):
"""
Calculate sampling frequency and duration from a frequency array
Attributes:
-------
frequency_array: array_like
Frequency array to get sampling_frequency/duration from: array_like
Returns
-------
sampling_frequency, duration: float, float
Raises
-------
ValueError: If the frequency_array is not evenly sampled.
"""
tol = 1e-10
if np.ptp(np.diff(frequency_array)) > tol:
raise ValueError("Your frequency series was not evenly sampled")
number_of_frequencies = len(frequency_array)
delta_freq = frequency_array[1] - frequency_array[0]
duration = np.round(1 / delta_freq, decimals=_TOL)
sampling_frequency = np.round(2 * (number_of_frequencies - 1) / duration, decimals=14)
return sampling_frequency, duration
def create_time_series(sampling_frequency, duration, starting_time=0.):
"""
Parameters
----------
sampling_frequency: float
duration: float
starting_time: float, optional
Returns
-------
float: An equidistant time series given the parameters
"""
_check_legal_sampling_frequency_and_duration(sampling_frequency, duration)
number_of_samples = int(duration * sampling_frequency)
return np.linspace(start=starting_time,
stop=duration + starting_time - 1 / sampling_frequency,
num=number_of_samples)
def create_frequency_series(sampling_frequency, duration):
""" Create a frequency series with the correct length and spacing.
Parameters
-------
sampling_frequency: float
duration: float
Returns
-------
array_like: frequency series
"""
_check_legal_sampling_frequency_and_duration(sampling_frequency, duration)
number_of_samples = int(np.round(duration * sampling_frequency))
number_of_frequencies = int(np.round(number_of_samples / 2) + 1)
return np.linspace(start=0,
stop=sampling_frequency / 2,
num=number_of_frequencies)
def _check_legal_sampling_frequency_and_duration(sampling_frequency, duration):
""" By convention, sampling_frequency and duration have to multiply to an integer
This will check if the product of both parameters multiplies reasonably close
to an integer.
Parameters
-------
sampling_frequency: float
duration: float
"""
num = sampling_frequency * duration
if np.abs(num - np.round(num)) > 10**(-_TOL):
raise IllegalDurationAndSamplingFrequencyException(
'\nYour sampling frequency and duration must multiply to a number'
'up to (tol = {}) decimals close to an integer number. '
'\nBut sampling_frequency={} and duration={} multiply to {}'.format(
_TOL, sampling_frequency, duration,
sampling_frequency * duration
)
)
def ra_dec_to_theta_phi(ra, dec, gmst):
""" Convert from RA and DEC to polar coordinates on celestial sphere
Parameters
-------
ra: float
right ascension in radians
dec: float
declination in radians
gmst: float
Greenwich mean sidereal time of arrival of the signal in radians
Returns
-------
float: zenith angle in radians
float: azimuthal angle in radians
"""
phi = ra - gmst
theta = np.pi / 2 - dec
return theta, phi
def theta_phi_to_ra_dec(theta, phi, gmst):
ra = phi + gmst
dec = np.pi / 2 - theta
return ra, dec
def gps_time_to_gmst(gps_time):
"""
Convert gps time to Greenwich mean sidereal time in radians
This method assumes a constant rotation rate of earth since 00:00:00, 1 Jan. 2000
A correction has been applied to give the exact correct value for 00:00:00, 1 Jan. 2018
Error accumulates at a rate of ~0.0001 radians/decade.
Parameters
-------
gps_time: float
gps time
Returns
-------
float: Greenwich mean sidereal time in radians
"""
warnings.warn(
"Function gps_time_to_gmst deprecated, use "
"lal.GreenwichMeanSiderealTime(time) instead",
DeprecationWarning)
omega_earth = 2 * np.pi * (1 / 365.2425 + 1) / 86400.
gps_2000 = 630720013.
gmst_2000 = (6 + 39. / 60 + 51.251406103947375 / 3600) * np.pi / 12
correction_2018 = -0.00017782487379358614
sidereal_time = omega_earth * (gps_time - gps_2000) + gmst_2000 + correction_2018
gmst = fmod(sidereal_time, 2 * np.pi)
return gmst
def create_white_noise(sampling_frequency, duration):
""" Create white_noise which is then coloured by a given PSD
Parameters
-------
sampling_frequency: float
duration: float
duration of the data
Returns
-------
array_like: white noise
array_like: frequency array
"""
number_of_samples = duration * sampling_frequency
number_of_samples = int(np.round(number_of_samples))
delta_freq = 1. / duration
frequencies = create_frequency_series(sampling_frequency, duration)
norm1 = 0.5 * (1. / delta_freq)**0.5
re1 = np.random.normal(0, norm1, len(frequencies))
im1 = np.random.normal(0, norm1, len(frequencies))
htilde1 = re1 + 1j * im1
# convolve data with instrument transfer function
otilde1 = htilde1 * 1.
# set DC and Nyquist = 0
otilde1[0] = 0
# no Nyquist frequency when N=odd
if np.mod(number_of_samples, 2) == 0:
otilde1[-1] = 0
# normalise for positive frequencies and units of strain/rHz
white_noise = otilde1
# python: transpose for use with infft
white_noise = np.transpose(white_noise)
frequencies = np.transpose(frequencies)
return white_noise, frequencies
def nfft(time_domain_strain, sampling_frequency):
""" Perform an FFT while keeping track of the frequency bins. Assumes input
time series is real (positive frequencies only).
Parameters
----------
time_domain_strain: array_like
Time series of strain data.
sampling_frequency: float
Sampling frequency of the data.
Returns
-------
frequency_domain_strain, frequency_array: (array_like, array_like)
Single-sided FFT of time domain strain normalised to units of
strain / Hz, and the associated frequency_array.
"""
frequency_domain_strain = np.fft.rfft(time_domain_strain)
frequency_domain_strain /= sampling_frequency
frequency_array = np.linspace(
0, sampling_frequency / 2, len(frequency_domain_strain))
return frequency_domain_strain, frequency_array
def infft(frequency_domain_strain, sampling_frequency):
""" Inverse FFT for use in conjunction with nfft.
Parameters
----------
frequency_domain_strain: array_like
Single-sided, normalised FFT of the time-domain strain data (in units
of strain / Hz).
sampling_frequency: int, float
Sampling frequency of the data.
Returns
-------
time_domain_strain: array_like
An array of the time domain strain
"""
time_domain_strain_norm = np.fft.irfft(frequency_domain_strain)
time_domain_strain = time_domain_strain_norm * sampling_frequency
return time_domain_strain
def setup_logger(outdir=None, label=None, log_level='INFO', print_version=False):
""" Setup logging output: call at the start of the script to use
Parameters
----------
outdir, label: str
If supplied, write the logging output to outdir/label.log
log_level: str, optional
['debug', 'info', 'warning']
Either a string from the list above, or an integer as specified
in https://docs.python.org/2/library/logging.html#logging-levels
print_version: bool
If true, print version information
"""
if type(log_level) is str:
try:
level = getattr(logging, log_level.upper())
except AttributeError:
raise ValueError('log_level {} not understood'.format(log_level))
else:
level = int(log_level)
logger = logging.getLogger('bilby')
logger.propagate = False
logger.setLevel(level)
if any([type(h) == logging.StreamHandler for h in logger.handlers]) is False:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter(
'%(asctime)s %(name)s %(levelname)-8s: %(message)s', datefmt='%H:%M'))
stream_handler.setLevel(level)
logger.addHandler(stream_handler)
if any([type(h) == logging.FileHandler for h in logger.handlers]) is False:
if label:
if outdir:
check_directory_exists_and_if_not_mkdir(outdir)
else:
outdir = '.'
log_file = '{}/{}.log'.format(outdir, label)
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)-8s: %(message)s', datefmt='%H:%M'))
file_handler.setLevel(level)
logger.addHandler(file_handler)
for handler in logger.handlers:
handler.setLevel(level)
if print_version:
version = get_version_information()
logger.info('Running bilby version: {}'.format(version))
def get_version_information():
version_file = os.path.join(
os.path.dirname(os.path.dirname(__file__)), '.version')
try:
with open(version_file, 'r') as f:
return f.readline().rstrip()
except EnvironmentError:
print("No version information file '.version' found")
def get_progress_bar(module='tqdm'):
"""
TODO: Write proper docstring
"""
if module in ['tqdm']:
try:
from tqdm import tqdm
except ImportError:
def tqdm(x, *args, **kwargs):
return x
return tqdm
elif module in ['tqdm_notebook']:
try:
from tqdm import tqdm_notebook as tqdm
except ImportError:
def tqdm(x, *args, **kwargs):
return x
return tqdm
def spherical_to_cartesian(radius, theta, phi):
""" Convert from spherical coordinates to cartesian.
Parameters
-------
radius: float
radial coordinate
theta: float
axial coordinate
phi: float
azimuthal coordinate
Returns
-------
list: cartesian vector
"""
cartesian = [radius * np.sin(theta) * np.cos(phi), radius * np.sin(theta) * np.sin(phi), radius * np.cos(theta)]
return cartesian
def check_directory_exists_and_if_not_mkdir(directory):
""" Checks if the given directory exists and creates it if it does not exist
Parameters
----------
directory: str
Name of the directory
"""
if directory == "":
return
elif not os.path.exists(directory):
os.makedirs(directory)
logger.debug('Making directory {}'.format(directory))
else:
logger.debug('Directory {} exists'.format(directory))
def set_up_command_line_arguments():
""" Sets up command line arguments that can be used to modify how scripts are run.
Returns
-------
command_line_args, command_line_parser: tuple
The command_line_args is a Namespace of the command line arguments while
the command_line_parser can be given to a new `argparse.ArgumentParser`
as a parent object from which to inherit.
Notes
-----
The command line arguments are passed initially at runtime, but this parser
does not have a `--help` option (i.e., the command line options are
available for any script which includes `import bilby`, but no help command
is available. This is done to avoid conflicts with child argparse routines
(see the example below).
Example
-------
In the following example we demonstrate how to setup a custom command line for a
project which uses bilby.
# Here we import bilby, which initialses and parses the default command-line args
>>> import bilby
# The command line arguments can then be accessed via
>>> bilby.core.utils.command_line_args
Namespace(clean=False, log_level=20, quite=False)
# Next, we import argparse and define a new argparse object
>>> import argparse
>>> parser = argparse.ArgumentParser(parents=[bilby.core.utils.command_line_parser])
>>> parser.add_argument('--argument', type=int, default=1)
>>> args = parser.parse_args()
Namespace(clean=False, log_level=20, quite=False, argument=1)
Placing these lines into a script, you'll be able to pass in the usual bilby default
arguments, in addition to `--argument`. To see a list of all options, call the script
with `--help`.
"""
try:
parser = argparse.ArgumentParser(
description="Command line interface for bilby scripts",
add_help=False, allow_abbrev=False)
except TypeError:
parser = argparse.ArgumentParser(
description="Command line interface for bilby scripts",
add_help=False)
parser.add_argument("-v", "--verbose", action="store_true",
help=("Increase output verbosity [logging.DEBUG]." +
" Overridden by script level settings"))
parser.add_argument("-q", "--quiet", action="store_true",
help=("Decrease output verbosity [logging.WARNING]." +
" Overridden by script level settings"))
parser.add_argument("-c", "--clean", action="store_true",
help="Force clean data, never use cached data")
parser.add_argument("-u", "--use-cached", action="store_true",
help="Force cached data and do not check its validity")
parser.add_argument("--sampler-help", nargs='?', default=False,
const='None', help="Print help for given sampler")
parser.add_argument("--bilby-test-mode", action="store_true",
help=("Used for testing only: don't run full PE, but"
" just check nothing breaks"))
parser.add_argument("--bilby-zero-likelihood-mode", action="store_true",
help=("Used for testing only: don't run full PE, but"
" just check nothing breaks"))
args, unknown_args = parser.parse_known_args()
if args.quiet:
args.log_level = logging.WARNING
elif args.verbose:
args.log_level = logging.DEBUG
else:
args.log_level = logging.INFO
return args, parser
def derivatives(vals, func, releps=1e-3, abseps=None, mineps=1e-9, reltol=1e-3,
epsscale=0.5, nonfixedidx=None):
"""
Calculate the partial derivatives of a function at a set of values. The
derivatives are calculated using the central difference, using an iterative
method to check that the values converge as step size decreases.
Parameters
----------
vals: array_like
A set of values, that are passed to a function, at which to calculate
the gradient of that function
func:
A function that takes in an array of values.
releps: float, array_like, 1e-3
The initial relative step size for calculating the derivative.
abseps: float, array_like, None
The initial absolute step size for calculating the derivative.
This overrides `releps` if set.
`releps` is set then that is used.
mineps: float, 1e-9
The minimum relative step size at which to stop iterations if no
convergence is achieved.
epsscale: float, 0.5
The factor by which releps if scaled in each iteration.
nonfixedidx: array_like, None
An array of indices in `vals` that are _not_ fixed values and therefore
can have derivatives taken. If `None` then derivatives of all values
are calculated.
Returns
-------
grads: array_like
An array of gradients for each non-fixed value.
"""
if nonfixedidx is None:
nonfixedidx = range(len(vals))
if len(nonfixedidx) > len(vals):
raise ValueError("To many non-fixed values")
if max(nonfixedidx) >= len(vals) or min(nonfixedidx) < 0:
raise ValueError("Non-fixed indexes contain non-existant indices")
grads = np.zeros(len(nonfixedidx))
# maximum number of times the gradient can change sign
flipflopmax = 10.
# set steps
if abseps is None:
if isinstance(releps, float):
eps = np.abs(vals) * releps
eps[eps == 0.] = releps # if any values are zero set eps to releps
teps = releps * np.ones(len(vals))
elif isinstance(releps, (list, np.ndarray)):
if len(releps) != len(vals):
raise ValueError("Problem with input relative step sizes")
eps = np.multiply(np.abs(vals), releps)
eps[eps == 0.] = np.array(releps)[eps == 0.]
teps = releps
else:
raise RuntimeError("Relative step sizes are not a recognised type!")
else:
if isinstance(abseps, float):
eps = abseps * np.ones(len(vals))
elif isinstance(abseps, (list, np.ndarray)):
if len(abseps) != len(vals):
raise ValueError("Problem with input absolute step sizes")
eps = np.array(abseps)
else:
raise RuntimeError("Absolute step sizes are not a recognised type!")
teps = eps
# for each value in vals calculate the gradient
count = 0
for i in nonfixedidx:
# initial parameter diffs
leps = eps[i]
cureps = teps[i]
flipflop = 0
# get central finite difference
fvals = np.copy(vals)
bvals = np.copy(vals)
# central difference
fvals[i] += 0.5 * leps # change forwards distance to half eps
bvals[i] -= 0.5 * leps # change backwards distance to half eps
cdiff = (func(fvals) - func(bvals)) / leps
while 1:
fvals[i] -= 0.5 * leps # remove old step
bvals[i] += 0.5 * leps
# change the difference by a factor of two
cureps *= epsscale
if cureps < mineps or flipflop > flipflopmax:
# if no convergence set flat derivative (TODO: check if there is a better thing to do instead)
logger.warning("Derivative calculation did not converge: setting flat derivative.")
grads[count] = 0.
break
leps *= epsscale
# central difference
fvals[i] += 0.5 * leps # change forwards distance to half eps
bvals[i] -= 0.5 * leps # change backwards distance to half eps
cdiffnew = (func(fvals) - func(bvals)) / leps
if cdiffnew == cdiff:
grads[count] = cdiff
break
# check whether previous diff and current diff are the same within reltol
rat = (cdiff / cdiffnew)
if np.isfinite(rat) and rat > 0.:
# gradient has not changed sign
if np.abs(1. - rat) < reltol:
grads[count] = cdiffnew
break
else:
cdiff = cdiffnew
continue
else:
cdiff = cdiffnew
flipflop += 1
continue
count += 1
return grads
def logtrapzexp(lnf, dx):
"""
Perform trapezium rule integration for the logarithm of a function on a regular grid.
Parameters
----------
lnf: array_like
A :class:`numpy.ndarray` of values that are the natural logarithm of a function
dx: Union[array_like, float]
A :class:`numpy.ndarray` of steps sizes between values in the function, or a
single step size value.
Returns
-------
The natural logarithm of the area under the function.
"""
return np.log(dx / 2.) + logsumexp([logsumexp(lnf[:-1]), logsumexp(lnf[1:])])
class SamplesSummary(object):
""" Object to store a set of samples and calculate summary statistics
Parameters
----------
samples: array_like
Array of samples
average: str {'median', 'mean'}
Use either a median average or mean average when calculating relative
uncertainties
level: float
The default confidence interval level, defaults t0 0.9
"""
def __init__(self, samples, average='median', confidence_level=.9):
self.samples = samples
self.average = average
self.confidence_level = confidence_level
@property
def samples(self):
return self._samples
@samples.setter
def samples(self, samples):
self._samples = samples
@property
def confidence_level(self):
return self._confidence_level
@confidence_level.setter
def confidence_level(self, confidence_level):
if 0 < confidence_level and confidence_level < 1:
self._confidence_level = confidence_level
else:
raise ValueError("Confidence level must be between 0 and 1")
@property
def average(self):
if self._average == 'mean':
return self.mean
elif self._average == 'median':
return self.median
@average.setter
def average(self, average):
allowed_averages = ['mean', 'median']
if average in allowed_averages:
self._average = average
else:
raise ValueError("Average {} not in allowed averages".format(average))
@property
def median(self):
return np.median(self.samples, axis=0)
@property
def mean(self):
return np.mean(self.samples, axis=0)
@property
def _lower_level(self):
""" The credible interval lower quantile value """
return (1 - self.confidence_level) / 2.
@property
def _upper_level(self):
""" The credible interval upper quantile value """
return (1 + self.confidence_level) / 2.
@property
def lower_absolute_credible_interval(self):
""" Absolute lower value of the credible interval """
return np.quantile(self.samples, self._lower_level, axis=0)
@property
def upper_absolute_credible_interval(self):
""" Absolute upper value of the credible interval """
return np.quantile(self.samples, self._upper_level, axis=0)
@property
def lower_relative_credible_interval(self):
""" Relative (to average) lower value of the credible interval """
return self.lower_absolute_credible_interval - self.average
@property
def upper_relative_credible_interval(self):
""" Relative (to average) upper value of the credible interval """
return self.upper_absolute_credible_interval - self.average
def run_commandline(cl, log_level=20, raise_error=True, return_output=True):
"""Run a string cmd as a subprocess, check for errors and return output.
Parameters
----------
cl: str
Command to run
log_level: int
See https://docs.python.org/2/library/logging.html#logging-levels,
default is '20' (INFO)
"""
logger.log(log_level, 'Now executing: ' + cl)
if return_output:
try:
out = subprocess.check_output(
cl, stderr=subprocess.STDOUT, shell=True,
universal_newlines=True)
except subprocess.CalledProcessError as e:
logger.log(log_level, 'Execution failed: {}'.format(e.output))
if raise_error:
raise
else:
out = 0
os.system('\n')
return(out)
else:
process = subprocess.Popen(cl, shell=True)
process.communicate()
class Counter(object):
"""
General class to count number of times a function is Called, returns total
number of function calls
Parameters
----------
initalval : int, 0
number to start counting from
"""
def __init__(self, initval=0):
self.val = multiprocessing.RawValue('i', initval)
self.lock = multiprocessing.Lock()
def increment(self):
with self.lock:
self.val.value += 1
@property
def value(self):
return self.val.value
class UnsortedInterp2d(interp2d):
def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
""" Wrapper to scipy.interpolate.interp2d which preserves the input ordering.
Parameters
----------
x: See superclass
y: See superclass
dx: See superclass
dy: See superclass
assume_sorted: bool, optional
This is just a place holder to prevent a warning.
Overwriting this will not do anything
Returns
----------
array_like: See superclass
"""
unsorted_idxs = np.argsort(np.argsort(x))
return super(UnsortedInterp2d, self).__call__(x, y, dx=dx, dy=dy, assume_sorted=False)[unsorted_idxs]
# Instantiate the default argument parser at runtime
command_line_args, command_line_parser = set_up_command_line_arguments()
# Instantiate the default logging
setup_logger(print_version=False, log_level=command_line_args.log_level)
class BilbyJsonEncoder(json.JSONEncoder):
def default(self, obj):
from .prior import MultivariateGaussianDist, Prior, PriorDict
from ..gw.prior import HealPixMapPriorDist
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, PriorDict):
return {'__prior_dict__': True, 'content': obj._get_json_dict()}
if isinstance(obj, (MultivariateGaussianDist, HealPixMapPriorDist, Prior)):
return {'__prior__': True, '__module__': obj.__module__,
'__name__': obj.__class__.__name__,
'kwargs': dict(obj.get_instantiation_dict())}
try:
from astropy import cosmology as cosmo, units
if isinstance(obj, cosmo.FLRW):
return encode_astropy_cosmology(obj)
if isinstance(obj, units.Quantity):
return encode_astropy_quantity(obj)
if isinstance(obj, units.PrefixUnit):
return str(obj)
except ImportError:
logger.debug("Cannot import astropy, cannot write cosmological priors")
if isinstance(obj, np.ndarray):
return {'__array__': True, 'content': obj.tolist()}
if isinstance(obj, complex):
return {'__complex__': True, 'real': obj.real, 'imag': obj.imag}
if isinstance(obj, pd.DataFrame):
return {'__dataframe__': True, 'content': obj.to_dict(orient='list')}
if isinstance(obj, pd.Series):
return {'__series__': True, 'content': obj.to_dict()}
if inspect.isfunction(obj):
return {"__function__": True, "__module__": obj.__module__, "__name__": obj.__name__}
if inspect.isclass(obj):
return {"__class__": True, "__module__": obj.__module__, "__name__": obj.__name__}
return json.JSONEncoder.default(self, obj)
def encode_astropy_cosmology(obj):
cls_name = obj.__class__.__name__
dct = {key: getattr(obj, key) for
key in infer_args_from_method(obj.__init__)}
dct['__cosmology__'] = True
dct['__name__'] = cls_name
return dct
def encode_astropy_quantity(dct):
dct = dict(__astropy_quantity__=True, value=dct.value, unit=str(dct.unit))
if isinstance(dct['value'], np.ndarray):
dct['value'] = list(dct['value'])
return dct
def move_old_file(filename, overwrite=False):
""" Moves or removes an old file.
Parameters
----------
filename: str
Name of the file to be move
overwrite: bool, optional
Whether or not to remove the file or to change the name
to filename + '.old'
"""
if os.path.isfile(filename):
if overwrite:
logger.debug('Removing existing file {}'.format(filename))
os.remove(filename)
else:
logger.debug(
'Renaming existing file {} to {}.old'.format(filename,
filename))
shutil.move(filename, filename + '.old')
logger.debug("Saving result to {}".format(filename))
def load_json(filename, gzip):
if gzip or os.path.splitext(filename)[1].lstrip('.') == 'gz':
import gzip
with gzip.GzipFile(filename, 'r') as file:
json_str = file.read().decode('utf-8')
dictionary = json.loads(json_str, object_hook=decode_bilby_json)
else:
with open(filename, 'r') as file:
dictionary = json.load(file, object_hook=decode_bilby_json)
return dictionary
def decode_bilby_json(dct):
if dct.get("__prior_dict__", False):
cls = getattr(import_module(dct['__module__']), dct['__name__'])
obj = cls._get_from_json_dict(dct)
return obj
if dct.get("__prior__", False):
cls = getattr(import_module(dct['__module__']), dct['__name__'])
obj = cls(**dct['kwargs'])
return obj
if dct.get("__cosmology__", False):
return decode_astropy_cosmology(dct)
if dct.get("__astropy_quantity__", False):
return decode_astropy_quantity(dct)
if dct.get("__array__", False):
return np.asarray(dct["content"])
if dct.get("__complex__", False):
return complex(dct["real"], dct["imag"])
if dct.get("__dataframe__", False):
return pd.DataFrame(dct['content'])
if dct.get("__series__", False):
return pd.Series(dct['content'])
if dct.get("__function__", False) or dct.get("__class__", False):
default = ".".join([dct["__module__"], dct["__name__"]])
return getattr(import_module(dct["__module__"]), dct["__name__"], default)
return dct
def decode_astropy_cosmology(dct):
try:
from astropy import cosmology as cosmo
cosmo_cls = getattr(cosmo, dct['__name__'])
del dct['__cosmology__'], dct['__name__']
return cosmo_cls(**dct)
except ImportError:
logger.debug("Cannot import astropy, cosmological priors may not be "
"properly loaded.")
return dct
def decode_astropy_quantity(dct):
try:
from astropy import units
if dct['value'] is None:
return None
else:
del dct['__astropy_quantity__']
return units.Quantity(**dct)
except ImportError:
logger.debug("Cannot import astropy, cosmological priors may not be "
"properly loaded.")
return dct
def reflect(u):
"""
Iteratively reflect a number until it is contained in [0, 1].
This is for priors with a reflective boundary condition, all numbers in the
set `u = 2n +/- x` should be mapped to x.
For the `+` case we just take `u % 1`.
For the `-` case we take `1 - (u % 1)`.
E.g., -0.9, 1.1, and 2.9 should all map to 0.9.
Parameters
----------
u: array-like
The array of points to map to the unit cube
Returns
-------
u: array-like
The input array, modified in place.
"""
idxs_even = np.mod(u, 2) < 1
u[idxs_even] = np.mod(u[idxs_even], 1)
u[~idxs_even] = 1 - np.mod(u[~idxs_even], 1)
return u
def safe_file_dump(data, filename, module):
""" Safely dump data to a .pickle file
Parameters
----------
data:
data to dump
filename: str
The file to dump to
module: pickle, dill
The python module to use
"""
temp_filename = filename + ".temp"
with open(temp_filename, "wb") as file:
module.dump(data, file)
shutil.move(temp_filename, filename)
def latex_plot_format(func):
"""
Wrap the plotting function to set rcParams dependent on environment variables
The rcparams can be set directly from the env. variable `BILBY_STYLE` to
point to a matplotlib style file. Or, if `BILBY_STYLE=default` (any case) a
default setup is used, this is enabled by default. To not use any rcParams,
set `BILBY_STYLE=none`. Occasionally, issues arrise with the latex
`mathdefault` command. A fix is to define this command in the rcParams. An
env. variable `BILBY_MATHDEFAULT` can be used to turn this fix on/off.
Setting `BILBY_MATHDEFAULT=1` will enable the fix, all other choices
(including undefined) will disable it. Additionally, the BILBY_STYLE and
BILBY_MATHDEFAULT arguments can be passed into any
latex_plot_format-wrapped plotting function and will be set directly.
"""
@functools.wraps(func)
def wrapper_decorator(*args, **kwargs):
from matplotlib import rcParams
if "BILBY_STYLE" in kwargs:
bilby_style = kwargs.pop("BILBY_STYLE")
else:
bilby_style = os.environ.get("BILBY_STYLE", "default")
if "BILBY_MATHDEFAULT" in kwargs:
bilby_mathdefault = kwargs.pop("BILBY_MATHDEFAULT")
else:
bilby_mathdefault = int(os.environ.get("BILBY_MATHDEFAULT", "0"))
if bilby_mathdefault == 1:
logger.debug("Setting mathdefault in the rcParams")
rcParams['text.latex.preamble'] = r'\newcommand{\mathdefault}[1][]{}'
logger.debug("Using BILBY_STYLE={}".format(bilby_style))
if bilby_style.lower() == "none":
return func(*args, **kwargs)
elif os.path.isfile(bilby_style):
plt.style.use(bilby_style)
return func(*args, **kwargs)
elif bilby_style in plt.style.available:
plt.style.use(bilby_style)
return func(*args, **kwargs)
elif bilby_style.lower() == "default":
_old_tex = rcParams["text.usetex"]
_old_serif = rcParams["font.serif"]
_old_family = rcParams["font.family"]
if find_executable("latex"):
rcParams["text.usetex"] = True
else:
rcParams["text.usetex"] = False
rcParams["font.serif"] = "Computer Modern Roman"
rcParams["font.family"] = "serif"
rcParams["text.usetex"] = _old_tex
rcParams["font.serif"] = _old_serif
rcParams["font.family"] = _old_family
return func(*args, **kwargs)
else:
logger.debug(
"Environment variable BILBY_STYLE={} not used"
.format(bilby_style)
)
return func(*args, **kwargs)
return wrapper_decorator
def safe_save_figure(fig, filename, **kwargs):
check_directory_exists_and_if_not_mkdir(os.path.dirname(filename))
from matplotlib import rcParams
try:
fig.savefig(fname=filename, **kwargs)
except RuntimeError:
logger.debug(
"Failed to save plot with tex labels turning off tex."
)
rcParams["text.usetex"] = False
fig.savefig(fname=filename, **kwargs)
def kish_log_effective_sample_size(ln_weights):
""" Calculate the Kish effective sample size from the natural-log weights
See https://en.wikipedia.org/wiki/Effective_sample_size for details
Parameters
----------
ln_weights: array
An array of the ln-weights
Returns
-------
ln_n_eff:
The natural-log of the effective sample size
"""
log_n_eff = 2 * logsumexp(ln_weights) - logsumexp(2 * ln_weights)
return log_n_eff
def get_function_path(func):
if hasattr(func, "__module__") and hasattr(func, "__name__"):
return "{}.{}".format(func.__module__, func.__name__)
else:
return func
def loaded_modules_dict():
module_names = sys.modules.keys()
vdict = {}
for key in module_names:
if "." not in key:
vdict[key] = str(getattr(sys.modules[key], "__version__", "N/A"))
return vdict
class IllegalDurationAndSamplingFrequencyException(Exception):
pass
class tcolors:
KEY = '\033[93m'
VALUE = '\033[91m'
HIGHLIGHT = '\033[95m'
END = '\033[0m'
|
[] |
[] |
[
"BILBY_MATHDEFAULT",
"BILBY_STYLE"
] |
[]
|
["BILBY_MATHDEFAULT", "BILBY_STYLE"]
|
python
| 2 | 0 | |
acceptance/lookup_test.go
|
package acceptance
import (
"os"
"testing"
"github.com/magicmemories/go-jerakia"
fixtures "github.com/magicmemories/go-jerakia/testing"
"github.com/stretchr/testify/assert"
)
func TestLookupBasic(t *testing.T) {
if v := os.Getenv("JERAKIA_ACC"); v == "" {
t.Skip("JERAKIA_ACC not set")
}
client, err := NewClient()
if err != nil {
t.Fatal(err)
}
lookupOpts := &jerakia.LookupOpts{
Namespace: "test",
}
actual, err := jerakia.Lookup(client, "cities", lookupOpts)
if err != nil {
t.Fatal(err)
}
expected := fixtures.LookupBasicResult
assert.Equal(t, expected, *actual)
}
func TestLookupSingleBoolResult(t *testing.T) {
if v := os.Getenv("JERAKIA_ACC"); v == "" {
t.Skip("JERAKIA_ACC not set")
}
client, err := NewClient()
if err != nil {
t.Fatal(err)
}
lookupOpts := &jerakia.LookupOpts{
Namespace: "test",
}
actual, err := jerakia.Lookup(client, "booltrue", lookupOpts)
if err != nil {
t.Fatal(err)
}
expected := fixtures.LookupSingleBoolResult
assert.Equal(t, expected, *actual)
}
func TestLookupMetadata(t *testing.T) {
if v := os.Getenv("JERAKIA_ACC"); v == "" {
t.Skip("JERAKIA_ACC not set")
}
client, err := NewClient()
if err != nil {
t.Fatal(err)
}
lookupOpts := &jerakia.LookupOpts{
Namespace: "test",
Metadata: map[string]string{
"hostname": "example",
},
}
actual, err := jerakia.Lookup(client, "users", lookupOpts)
if err != nil {
t.Fatal(err)
}
expected := fixtures.LookupMetadataResult
assert.Equal(t, expected, *actual)
}
func TestLookupHashMerge(t *testing.T) {
if v := os.Getenv("JERAKIA_ACC"); v == "" {
t.Skip("JERAKIA_ACC not set")
}
client, err := NewClient()
if err != nil {
t.Fatal(err)
}
lookupOpts := &jerakia.LookupOpts{
Namespace: "test",
Metadata: map[string]string{
"env": "dev",
},
LookupType: "cascade",
Merge: "hash",
}
actual, err := jerakia.Lookup(client, "hash", lookupOpts)
if err != nil {
t.Fatal(err)
}
expected := fixtures.LookupHashMergeResult
assert.Equal(t, expected, *actual)
}
func TestLookupKeyless(t *testing.T) {
if v := os.Getenv("JERAKIA_ACC"); v == "" {
t.Skip("JERAKIA_ACC not set")
}
client, err := NewClient()
if err != nil {
t.Fatal(err)
}
lookupOpts := &jerakia.LookupOpts{
Namespace: "keyless",
}
actual, err := jerakia.Lookup(client, "", lookupOpts)
if err != nil {
t.Fatal(err)
}
expected := fixtures.LookupKeylessResult
assert.Equal(t, expected, *actual)
}
|
[
"\"JERAKIA_ACC\"",
"\"JERAKIA_ACC\"",
"\"JERAKIA_ACC\"",
"\"JERAKIA_ACC\"",
"\"JERAKIA_ACC\""
] |
[] |
[
"JERAKIA_ACC"
] |
[]
|
["JERAKIA_ACC"]
|
go
| 1 | 0 | |
login3/login3/asgi.py
|
"""
ASGI config for login3 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'login3.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
appengine/predator/scripts/crash_printer/print-crash.py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
from datetime import date
from datetime import timedelta
import os
import sys
ROOT_DIR = os.path.join(os.path.dirname(__file__), os.path.pardir,
os.path.pardir)
sys.path.insert(1, ROOT_DIR)
from local_libs import script_util
script_util.SetUpSystemPaths()
from crash.type_enums import CrashClient
from util_scripts.crash_queries.crash_printer import crash_printer
_DATETIME_FORMAT = '%Y-%m-%d'
_TODAY = date.today().strftime(_DATETIME_FORMAT)
_A_YEAR_AGO = (date.today() - timedelta(days=365)).strftime(_DATETIME_FORMAT)
if __name__ == '__main__':
argparser = argparse.ArgumentParser(
description='Print crashes.')
argparser.add_argument(
'--since',
'-s',
default=_A_YEAR_AGO,
help=('Query data since this date (including this date). '
'Should be in YYYY-MM-DD format. E.g. 2015-09-31. '
'Defaults to a year ago.'))
argparser.add_argument(
'--until',
'-u',
default=_TODAY,
help=('Query data until this date (not including this date). '
'Should be in YYYY-MM-DD format. E.g. 2015-09-31. '
'Defaults to today.'))
argparser.add_argument(
'--client',
'-c',
default=CrashClient.CRACAS,
help=('Possible values are: fracas, cracas, clusterfuzz. Right now, only '
'fracas is supported.'))
argparser.add_argument(
'--app',
'-a',
default=os.getenv('APP_ID', 'predator-for-me-staging'),
help=('App id of the App engine app that query needs to access. '
'Defualts to findit-for-me-dev. You can set enviroment variable by'
' \'export APP_ID=your-app-id\' to replace the default value.'))
argparser.add_argument(
'--signature',
help='Signature of the crash.')
args = argparser.parse_args()
crash_printer.CrashPrinter(args.client, args.app,
start_date=args.since, end_date=args.until,
signature=args.signature)
|
[] |
[] |
[
"APP_ID"
] |
[]
|
["APP_ID"]
|
python
| 1 | 0 | |
tools/botanist/target/gce.go
|
// Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package target
import (
"context"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"os/exec"
"os/user"
"strings"
"time"
"go.fuchsia.dev/fuchsia/tools/bootserver"
"go.fuchsia.dev/fuchsia/tools/lib/logger"
"go.fuchsia.dev/fuchsia/tools/lib/retry"
"go.fuchsia.dev/fuchsia/tools/serial"
"golang.org/x/crypto/ssh"
)
const (
gcemClientBinary = "./gcem_client"
gceSerialEndpoint = "ssh-serialport.googleapis.com:9600"
)
// gceSerial is a ReadWriteCloser that talks to a GCE serial port via SSH.
type gceSerial struct {
in io.WriteCloser
out io.Reader
sess *ssh.Session
client *ssh.Client
closed bool
}
func newGCESerial(pkeyPath, username, endpoint string) (*gceSerial, error) {
// Load the pkey and use it to dial the GCE serial port.
data, err := ioutil.ReadFile(pkeyPath)
if err != nil {
return nil, err
}
signer, err := ssh.ParsePrivateKey(data)
if err != nil {
return nil, err
}
sshConfig := &ssh.ClientConfig{
User: username,
Auth: []ssh.AuthMethod{
ssh.PublicKeys(signer),
},
// TODO(rudymathu): Replace this with google ssh serial port key.
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
client, err := ssh.Dial("tcp", endpoint, sshConfig)
if err != nil {
return nil, err
}
// Create an SSH shell and wire up stdio.
session, err := client.NewSession()
if err != nil {
return nil, err
}
out, err := session.StdoutPipe()
if err != nil {
return nil, err
}
in, err := session.StdinPipe()
if err != nil {
return nil, err
}
if err := session.Shell(); err != nil {
return nil, err
}
return &gceSerial{
in: in,
out: out,
sess: session,
client: client,
}, nil
}
func (s *gceSerial) Read(b []byte) (int, error) {
if s.closed {
return 0, os.ErrClosed
}
return s.out.Read(b)
}
func (s *gceSerial) Write(b []byte) (int, error) {
// Chunk out writes to 128 bytes or less. SSH connections to GCE do not
// seem to properly handle longer messages.
maxChunkSize := 128
numChunks := len(b) / maxChunkSize
if len(b)%maxChunkSize != 0 {
numChunks++
}
bytesWritten := 0
for i := 0; i < numChunks; i++ {
start := i * maxChunkSize
end := start + maxChunkSize
if end > len(b) {
end = len(b)
}
n, err := s.in.Write(b[start:end])
bytesWritten += n
if err != nil {
return bytesWritten, err
}
time.Sleep(100 * time.Millisecond)
}
return bytesWritten, nil
}
func (s *gceSerial) Close() error {
multierr := ""
if err := s.in.Close(); err != nil {
multierr += fmt.Sprintf("failed to close serial SSH session input pipe: %s, ", err)
}
if err := s.sess.Close(); err != nil {
multierr += fmt.Sprintf("failed to close serial SSH session: %s, ", err)
}
if err := s.client.Close(); err != nil {
multierr += fmt.Sprintf("failed to close serial SSH client: %s", err)
}
s.closed = true
if multierr != "" {
return errors.New(multierr)
}
return nil
}
// GCEConfig represents the on disk config used by botanist to launch a GCE
// instance.
type GCEConfig struct {
// MediatorURL is the url of the GCE Mediator.
MediatorURL string `json:"mediator_url"`
// BuildID is the swarming task ID of the associated build.
BuildID string `json:"build_id"`
// CloudProject is the cloud project to create the GCE Instance in.
CloudProject string `json:"cloud_project"`
// SwarmingServer is the URL to the swarming server that fed us this
// task.
SwarmingServer string `json:"swarming_server"`
// MachineShape is the shape of the instance we want to create.
MachineShape string `json:"machine_shape"`
// InstanceName is the name of the instance.
InstanceName string `json:"instance_name"`
// Zone is the cloud zone in which the instance lives.
Zone string `json:"zone"`
}
// GCETarget represents a GCE VM running Fuchsia.
type GCETarget struct {
config GCEConfig
currentUser string
ipv4 net.IP
loggerCtx context.Context
opts Options
pubkeyPath string
serial io.ReadWriteCloser
}
// createInstanceRes is returned by the gcem_client's create-instance
// subcommand. Its schema is determined by the CreateInstanceRes proto
// message in http://google3/turquoise/infra/gce_mediator/proto/mediator.proto.
type createInstanceRes struct {
InstanceName string `json:"instanceName"`
Zone string `json:"zone"`
}
// NewGCETarget creates, starts, and connects to the serial console of a GCE VM.
func NewGCETarget(ctx context.Context, config GCEConfig, opts Options) (*GCETarget, error) {
// Generate an SSH keypair. We do this even if the caller has provided
// an SSH key in opts because we require a very specific input format:
// PEM encoded, PKCS1 marshaled RSA keys.
pkeyPath, err := generatePrivateKey()
if err != nil {
return nil, err
}
opts.SSHKey = pkeyPath
pubkeyPath, err := generatePublicKey(opts.SSHKey)
if err != nil {
return nil, err
}
logger.Infof(ctx, "generated SSH key pair for use with GCE instance")
u, err := user.Current()
if err != nil {
return nil, err
}
g := &GCETarget{
config: config,
currentUser: u.Username,
loggerCtx: ctx,
opts: opts,
pubkeyPath: pubkeyPath,
}
if config.InstanceName == "" && config.Zone == "" {
// If the instance has not been created, create it now.
logger.Infof(ctx, "creating the GCE instance")
expBackoff := retry.NewExponentialBackoff(15*time.Second, 2*time.Minute, 2)
if err := retry.Retry(ctx, expBackoff, g.createInstance, nil); err != nil {
return nil, err
}
logger.Infof(ctx, "successfully created GCE instance: Name: %s, Zone: %s", g.config.InstanceName, g.config.Zone)
} else {
// The instance has already been created, so add the SSH key to it.
logger.Infof(ctx, "adding the SSH public key to GCE instance %s", g.config.InstanceName)
expBackoff := retry.NewExponentialBackoff(15*time.Second, 2*time.Minute, 2)
if err := retry.Retry(ctx, expBackoff, g.addSSHKey, nil); err != nil {
return nil, err
}
logger.Infof(ctx, "successfully added SSH key")
}
// Connect to the serial line.
logger.Infof(ctx, "setting up the serial connection to the GCE instance")
expBackoff := retry.NewExponentialBackoff(15*time.Second, 2*time.Minute, 2)
connectSerialErrs := make(chan error)
defer close(connectSerialErrs)
go logErrors(ctx, "connectToSerial()", connectSerialErrs)
if err := retry.Retry(ctx, expBackoff, g.connectToSerial, connectSerialErrs); err != nil {
return nil, err
}
logger.Infof(ctx, "successfully connected to serial")
// If we're running a non-bringup configuration, we need to provision an SSH key.
if !opts.Netboot {
if err := g.provisionSSHKey(ctx); err != nil {
return nil, err
}
}
return g, nil
}
func logErrors(ctx context.Context, functionName string, errs <-chan error) {
for {
err, more := <-errs
if err != nil {
logger.Errorf(ctx, "%s failed: %s, retrying", functionName, err)
}
if !more {
return
}
}
}
// Provisions an SSH key over the serial connection.
func (g *GCETarget) provisionSSHKey(ctx context.Context) error {
if g.serial == nil {
return fmt.Errorf("serial is not connected")
}
time.Sleep(2 * time.Minute)
logger.Infof(g.loggerCtx, "provisioning SSH key over serial")
p, err := ioutil.ReadFile(g.pubkeyPath)
if err != nil {
return err
}
pubkey := string(p)
pubkey = strings.TrimSuffix(pubkey, "\n")
pubkey = fmt.Sprintf("\"%s %s\"", pubkey, g.currentUser)
cmds := []serial.Command{
{Cmd: []string{"/pkgfs/packages/sshd-host/0/bin/hostkeygen"}},
{Cmd: []string{"echo", pubkey, ">", "/data/ssh/authorized_keys"}},
}
if err := serial.RunCommands(ctx, g.serial, cmds); err != nil {
return err
}
logger.Infof(g.loggerCtx, "successfully provisioned SSH key")
return nil
}
func (g *GCETarget) connectToSerial() error {
username := fmt.Sprintf(
"%s.%s.%s.%s.%s",
g.config.CloudProject,
g.config.Zone,
g.config.InstanceName,
g.currentUser,
"replay-from=0",
)
serial, err := newGCESerial(g.opts.SSHKey, username, gceSerialEndpoint)
g.serial = serial
return err
}
func (g *GCETarget) addSSHKey() error {
invocation := []string{
gcemClientBinary,
"add-ssh-key",
"-host", g.config.MediatorURL,
"-project", g.config.CloudProject,
"-instance-name", g.config.InstanceName,
"-zone", g.config.Zone,
"-user", g.currentUser,
"-pubkey", g.pubkeyPath,
}
logger.Infof(g.loggerCtx, "GCE Mediator client command: %s", invocation)
cmd := exec.Command(invocation[0], invocation[1:]...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
func (g *GCETarget) createInstance() error {
taskID := os.Getenv("SWARMING_TASK_ID")
if taskID == "" {
return errors.New("task did not specify SWARMING_TASK_ID")
}
invocation := []string{
gcemClientBinary,
"create-instance",
"-host", g.config.MediatorURL,
"-project", g.config.CloudProject,
"-build-id", g.config.BuildID,
"-task-id", taskID,
"-swarming-host", g.config.SwarmingServer,
"-machine-shape", g.config.MachineShape,
"-user", g.currentUser,
"-pubkey", g.pubkeyPath,
}
logger.Infof(g.loggerCtx, "GCE Mediator client command: %s", invocation)
cmd := exec.Command(invocation[0], invocation[1:]...)
stdout, err := cmd.StdoutPipe()
if err != nil {
return err
}
cmd.Stderr = os.Stderr
if err := cmd.Start(); err != nil {
return err
}
var res createInstanceRes
if err := json.NewDecoder(stdout).Decode(&res); err != nil {
return err
}
if err := cmd.Wait(); err != nil {
return err
}
g.config.InstanceName = res.InstanceName
g.config.Zone = res.Zone
return nil
}
// generatePrivateKey generates a 2048 bit RSA private key, writes it to
// a temporary file, and returns the path to the key.
func generatePrivateKey() (string, error) {
pkey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return "", err
}
f, err := ioutil.TempFile("", "gce_pkey")
if err != nil {
return "", err
}
defer f.Close()
pemBlock := &pem.Block{
Type: "RSA PRIVATE KEY",
Headers: nil,
Bytes: x509.MarshalPKCS1PrivateKey(pkey),
}
return f.Name(), pem.Encode(f, pemBlock)
}
// generatePublicKey reads the private key at path pkey and generates a public
// key in Authorized Keys format. Returns the path to the public key file.
func generatePublicKey(pkeyFile string) (string, error) {
if pkeyFile == "" {
return "", errors.New("no private key file provided")
}
data, err := ioutil.ReadFile(pkeyFile)
if err != nil {
return "", err
}
block, _ := pem.Decode(data)
pkey, err := x509.ParsePKCS1PrivateKey(block.Bytes)
if err != nil {
return "", err
}
pubkey, err := ssh.NewPublicKey(pkey.Public())
if err != nil {
return "", err
}
f, err := ioutil.TempFile("", "gce_pubkey")
if err != nil {
return "", err
}
defer f.Close()
_, err = f.Write(ssh.MarshalAuthorizedKey(pubkey))
return f.Name(), err
}
func (g *GCETarget) Address() net.IP {
if g.ipv4 == nil {
fqdn := fmt.Sprintf("%s.%s.c.%s.internal", g.config.InstanceName, g.config.Zone, g.config.CloudProject)
addr, err := net.ResolveIPAddr("ip4", fqdn)
if err != nil {
logger.Infof(g.loggerCtx, "failed to resolve IPv4 of instance %s: %s", g.config.InstanceName, err)
return nil
}
g.ipv4 = addr.IP
}
return g.ipv4
}
func (g *GCETarget) Nodename() string {
// TODO(rudymathu): fill in nodename
return ""
}
func (g *GCETarget) Serial() io.ReadWriteCloser {
return g.serial
}
func (g *GCETarget) SSHKey() string {
return g.opts.SSHKey
}
func (g *GCETarget) Start(ctx context.Context, _ []bootserver.Image, args []string, _ string) error {
return nil
}
func (g *GCETarget) Stop(context.Context) error {
return g.serial.Close()
}
func (g *GCETarget) Wait(context.Context) error {
return ErrUnimplemented
}
|
[
"\"SWARMING_TASK_ID\""
] |
[] |
[
"SWARMING_TASK_ID"
] |
[]
|
["SWARMING_TASK_ID"]
|
go
| 1 | 0 | |
sympy/core/numbers.py
|
from __future__ import print_function, division
import decimal
import fractions
import math
import warnings
import re as regex
from collections import defaultdict
from .containers import Tuple
from .sympify import converter, sympify, _sympify, SympifyError, _convert_numpy_types
from .singleton import S, Singleton
from .expr import Expr, AtomicExpr
from .decorators import _sympifyit
from .cache import cacheit, clear_cache
from .logic import fuzzy_not
from sympy.core.compatibility import (
as_int, integer_types, long, string_types, with_metaclass, HAS_GMPY,
SYMPY_INTS, int_info)
import mpmath
import mpmath.libmp as mlib
from mpmath.libmp import mpf_pow, mpf_pi, mpf_e, phi_fixed
from mpmath.ctx_mp import mpnumeric
from mpmath.libmp.libmpf import (
finf as _mpf_inf, fninf as _mpf_ninf,
fnan as _mpf_nan, fzero as _mpf_zero, _normalize as mpf_normalize,
prec_to_dps)
from sympy.utilities.misc import debug, filldedent
from .evaluate import global_evaluate
from sympy.utilities.exceptions import SymPyDeprecationWarning
rnd = mlib.round_nearest
_LOG2 = math.log(2)
def comp(z1, z2, tol=None):
"""Return a bool indicating whether the error between z1 and z2 is <= tol.
If ``tol`` is None then True will be returned if there is a significant
difference between the numbers: ``abs(z1 - z2)*10**p <= 1/2`` where ``p``
is the lower of the precisions of the values. A comparison of strings will
be made if ``z1`` is a Number and a) ``z2`` is a string or b) ``tol`` is ''
and ``z2`` is a Number.
When ``tol`` is a nonzero value, if z2 is non-zero and ``|z1| > 1``
the error is normalized by ``|z1|``, so if you want to see if the
absolute error between ``z1`` and ``z2`` is <= ``tol`` then call this
as ``comp(z1 - z2, 0, tol)``.
"""
if type(z2) is str:
if not isinstance(z1, Number):
raise ValueError('when z2 is a str z1 must be a Number')
return str(z1) == z2
if not z1:
z1, z2 = z2, z1
if not z1:
return True
if not tol:
if tol is None:
if type(z2) is str and getattr(z1, 'is_Number', False):
return str(z1) == z2
a, b = Float(z1), Float(z2)
return int(abs(a - b)*10**prec_to_dps(
min(a._prec, b._prec)))*2 <= 1
elif all(getattr(i, 'is_Number', False) for i in (z1, z2)):
return z1._prec == z2._prec and str(z1) == str(z2)
raise ValueError('exact comparison requires two Numbers')
diff = abs(z1 - z2)
az1 = abs(z1)
if z2 and az1 > 1:
return diff/az1 <= tol
else:
return diff <= tol
def mpf_norm(mpf, prec):
"""Return the mpf tuple normalized appropriately for the indicated
precision after doing a check to see if zero should be returned or
not when the mantissa is 0. ``mpf_normlize`` always assumes that this
is zero, but it may not be since the mantissa for mpf's values "+inf",
"-inf" and "nan" have a mantissa of zero, too.
Note: this is not intended to validate a given mpf tuple, so sending
mpf tuples that were not created by mpmath may produce bad results. This
is only a wrapper to ``mpf_normalize`` which provides the check for non-
zero mpfs that have a 0 for the mantissa.
"""
sign, man, expt, bc = mpf
if not man:
# hack for mpf_normalize which does not do this;
# it assumes that if man is zero the result is 0
# (see issue 6639)
if not bc:
return _mpf_zero
else:
# don't change anything; this should already
# be a well formed mpf tuple
return mpf
# Necessary if mpmath is using the gmpy backend
from mpmath.libmp.backend import MPZ
rv = mpf_normalize(sign, MPZ(man), expt, bc, prec, rnd)
return rv
# TODO: we should use the warnings module
_errdict = {"divide": False}
def seterr(divide=False):
"""
Should sympy raise an exception on 0/0 or return a nan?
divide == True .... raise an exception
divide == False ... return nan
"""
if _errdict["divide"] != divide:
clear_cache()
_errdict["divide"] = divide
def _as_integer_ratio(p):
neg_pow, man, expt, bc = getattr(p, '_mpf_', mpmath.mpf(p)._mpf_)
p = [1, -1][neg_pow % 2]*man
if expt < 0:
q = 2**-expt
else:
q = 1
p *= 2**expt
return int(p), int(q)
def _decimal_to_Rational_prec(dec):
"""Convert an ordinary decimal instance to a Rational."""
if not dec.is_finite():
raise TypeError("dec must be finite, got %s." % dec)
s, d, e = dec.as_tuple()
prec = len(d)
if e >= 0: # it's an integer
rv = Integer(int(dec))
else:
s = (-1)**s
d = sum([di*10**i for i, di in enumerate(reversed(d))])
rv = Rational(s*d, 10**-e)
return rv, prec
def _literal_float(f):
"""Return True if n can be interpreted as a floating point number."""
pat = r"[-+]?((\d*\.\d+)|(\d+\.?))(eE[-+]?\d+)?"
return bool(regex.match(pat, f))
# (a,b) -> gcd(a,b)
_gcdcache = {}
# TODO caching with decorator, but not to degrade performance
def igcd(*args):
"""Computes nonnegative integer greatest common divisor.
The algorithm is based on the well known Euclid's algorithm. To
improve speed, igcd() has its own caching mechanism implemented.
Examples
========
>>> from sympy.core.numbers import igcd
>>> igcd(2, 4)
2
>>> igcd(5, 10, 15)
5
"""
if len(args) < 2:
raise TypeError(
'igcd() takes at least 2 arguments (%s given)' % len(args))
if 1 in args:
a = 1
k = 0
else:
a = abs(as_int(args[0]))
k = 1
if a != 1:
while k < len(args):
b = args[k]
k += 1
try:
a = _gcdcache[(a, b)]
except KeyError:
b = as_int(b)
if not b:
continue
if b == 1:
a = 1
break
if b < 0:
b = -b
t = a, b
a = igcd2(a, b)
_gcdcache[t] = _gcdcache[t[1], t[0]] = a
while k < len(args):
ok = as_int(args[k])
k += 1
return a
try:
from math import gcd as igcd2
except ImportError:
def igcd2(a, b):
"""Compute gcd of two Python integers a and b."""
if (a.bit_length() > BIGBITS and
b.bit_length() > BIGBITS):
return igcd_lehmer(a, b)
a, b = abs(a), abs(b)
while b:
a, b = b, a % b
return a
# Use Lehmer's algorithm only for very large numbers.
# The limit could be different on Python 2.7 and 3.x.
# If so, then this could be defined in compatibility.py.
BIGBITS = 5000
def igcd_lehmer(a, b):
"""Computes greatest common divisor of two integers.
Euclid's algorithm for the computation of the greatest
common divisor gcd(a, b) of two (positive) integers
a and b is based on the division identity
a = q*b + r,
where the quotient q and the remainder r are integers
and 0 <= r < b. Then each common divisor of a and b
divides r, and it follows that gcd(a, b) == gcd(b, r).
The algorithm works by constructing the sequence
r0, r1, r2, ..., where r0 = a, r1 = b, and each rn
is the remainder from the division of the two preceding
elements.
In Python, q = a // b and r = a % b are obtained by the
floor division and the remainder operations, respectively.
These are the most expensive arithmetic operations, especially
for large a and b.
Lehmer's algorithm is based on the observation that the quotients
qn = r(n-1) // rn are in general small integers even
when a and b are very large. Hence the quotients can be
usually determined from a relatively small number of most
significant bits.
The efficiency of the algorithm is further enhanced by not
computing each long remainder in Euclid's sequence. The remainders
are linear combinations of a and b with integer coefficients
derived from the quotients. The coefficients can be computed
as far as the quotients can be determined from the chosen
most significant parts of a and b. Only then a new pair of
consecutive remainders is computed and the algorithm starts
anew with this pair.
References
==========
.. [1] https://en.wikipedia.org/wiki/Lehmer%27s_GCD_algorithm
"""
a, b = abs(as_int(a)), abs(as_int(b))
if a < b:
a, b = b, a
# The algorithm works by using one or two digit division
# whenever possible. The outer loop will replace the
# pair (a, b) with a pair of shorter consecutive elements
# of the Euclidean gcd sequence until a and b
# fit into two Python (long) int digits.
nbits = 2*int_info.bits_per_digit
while a.bit_length() > nbits and b != 0:
# Quotients are mostly small integers that can
# be determined from most significant bits.
n = a.bit_length() - nbits
x, y = int(a >> n), int(b >> n) # most significant bits
# Elements of the Euclidean gcd sequence are linear
# combinations of a and b with integer coefficients.
# Compute the coefficients of consequtive pairs
# a' = A*a + B*b, b' = C*a + D*b
# using small integer arithmetic as far as possible.
A, B, C, D = 1, 0, 0, 1 # initial values
while True:
# The coefficients alternate in sign while looping.
# The inner loop combines two steps to keep track
# of the signs.
# At this point we have
# A > 0, B <= 0, C <= 0, D > 0,
# x' = x + B <= x < x" = x + A,
# y' = y + C <= y < y" = y + D,
# and
# x'*N <= a' < x"*N, y'*N <= b' < y"*N,
# where N = 2**n.
# Now, if y' > 0, and x"//y' and x'//y" agree,
# then their common value is equal to q = a'//b'.
# In addition,
# x'%y" = x' - q*y" < x" - q*y' = x"%y',
# and
# (x'%y")*N < a'%b' < (x"%y')*N.
# On the other hand, we also have x//y == q,
# and therefore
# x'%y" = x + B - q*(y + D) = x%y + B',
# x"%y' = x + A - q*(y + C) = x%y + A',
# where
# B' = B - q*D < 0, A' = A - q*C > 0.
if y + C <= 0:
break
q = (x + A) // (y + C)
# Now x'//y" <= q, and equality holds if
# x' - q*y" = (x - q*y) + (B - q*D) >= 0.
# This is a minor optimization to avoid division.
x_qy, B_qD = x - q*y, B - q*D
if x_qy + B_qD < 0:
break
# Next step in the Euclidean sequence.
x, y = y, x_qy
A, B, C, D = C, D, A - q*C, B_qD
# At this point the signs of the coefficients
# change and their roles are interchanged.
# A <= 0, B > 0, C > 0, D < 0,
# x' = x + A <= x < x" = x + B,
# y' = y + D < y < y" = y + C.
if y + D <= 0:
break
q = (x + B) // (y + D)
x_qy, A_qC = x - q*y, A - q*C
if x_qy + A_qC < 0:
break
x, y = y, x_qy
A, B, C, D = C, D, A_qC, B - q*D
# Now the conditions on top of the loop
# are again satisfied.
# A > 0, B < 0, C < 0, D > 0.
if B == 0:
# This can only happen when y == 0 in the beginning
# and the inner loop does nothing.
# Long division is forced.
a, b = b, a % b
continue
# Compute new long arguments using the coefficients.
a, b = A*a + B*b, C*a + D*b
# Small divisors. Finish with the standard algorithm.
while b:
a, b = b, a % b
return a
def ilcm(*args):
"""Computes integer least common multiple.
Examples
========
>>> from sympy.core.numbers import ilcm
>>> ilcm(5, 10)
10
>>> ilcm(7, 3)
21
>>> ilcm(5, 10, 15)
30
"""
if len(args) < 2:
raise TypeError(
'ilcm() takes at least 2 arguments (%s given)' % len(args))
if 0 in args:
return 0
a = args[0]
for b in args[1:]:
a = a*b // igcd(a, b)
return a
def igcdex(a, b):
"""Returns x, y, g such that g = x*a + y*b = gcd(a, b).
>>> from sympy.core.numbers import igcdex
>>> igcdex(2, 3)
(-1, 1, 1)
>>> igcdex(10, 12)
(-1, 1, 2)
>>> x, y, g = igcdex(100, 2004)
>>> x, y, g
(-20, 1, 4)
>>> x*100 + y*2004
4
"""
if (not a) and (not b):
return (0, 1, 0)
if not a:
return (0, b//abs(b), abs(b))
if not b:
return (a//abs(a), 0, abs(a))
if a < 0:
a, x_sign = -a, -1
else:
x_sign = 1
if b < 0:
b, y_sign = -b, -1
else:
y_sign = 1
x, y, r, s = 1, 0, 0, 1
while b:
(c, q) = (a % b, a // b)
(a, b, r, s, x, y) = (b, c, x - q*r, y - q*s, r, s)
return (x*x_sign, y*y_sign, a)
def mod_inverse(a, m):
"""
Return the number c such that, ( a * c ) % m == 1 where
c has the same sign as a. If no such value exists, a
ValueError is raised.
Examples
========
>>> from sympy import S
>>> from sympy.core.numbers import mod_inverse
Suppose we wish to find multiplicative inverse x of
3 modulo 11. This is the same as finding x such
that 3 * x = 1 (mod 11). One value of x that satisfies
this congruence is 4. Because 3 * 4 = 12 and 12 = 1 mod(11).
This is the value return by mod_inverse:
>>> mod_inverse(3, 11)
4
>>> mod_inverse(-3, 11)
-4
When there is a common factor between the numerators of
``a`` and ``m`` the inverse does not exist:
>>> mod_inverse(2, 4)
Traceback (most recent call last):
...
ValueError: inverse of 2 mod 4 does not exist
>>> mod_inverse(S(2)/7, S(5)/2)
7/2
References
==========
- https://en.wikipedia.org/wiki/Modular_multiplicative_inverse
- https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm
"""
c = None
try:
a, m = as_int(a), as_int(m)
if m > 1:
x, y, g = igcdex(a, m)
if g == 1:
c = x % m
if a < 0:
c -= m
except ValueError:
a, m = sympify(a), sympify(m)
if not (a.is_number and m.is_number):
raise TypeError(filldedent('''
Expected numbers for arguments; symbolic `mod_inverse`
is not implemented
but symbolic expressions can be handled with the
similar function,
sympy.polys.polytools.invert'''))
big = (m > 1)
if not (big is S.true or big is S.false):
raise ValueError('m > 1 did not evaluate; try to simplify %s' % m)
elif big:
c = 1/a
if c is None:
raise ValueError('inverse of %s (mod %s) does not exist' % (a, m))
return c
class Number(AtomicExpr):
"""
Represents any kind of number in sympy.
Floating point numbers are represented by the Float class.
Integer numbers (of any size), together with rational numbers (again,
there is no limit on their size) are represented by the Rational class.
If you want to represent, for example, ``1+sqrt(2)``, then you need to do::
Rational(1) + sqrt(Rational(2))
"""
is_commutative = True
is_number = True
is_Number = True
__slots__ = []
# Used to make max(x._prec, y._prec) return x._prec when only x is a float
_prec = -1
def __new__(cls, *obj):
if len(obj) == 1:
obj = obj[0]
if isinstance(obj, Number):
return obj
if isinstance(obj, SYMPY_INTS):
return Integer(obj)
if isinstance(obj, tuple) and len(obj) == 2:
return Rational(*obj)
if isinstance(obj, (float, mpmath.mpf, decimal.Decimal)):
return Float(obj)
if isinstance(obj, string_types):
val = sympify(obj)
if isinstance(val, Number):
return val
else:
raise ValueError('String "%s" does not denote a Number' % obj)
msg = "expected str|int|long|float|Decimal|Number object but got %r"
raise TypeError(msg % type(obj).__name__)
def invert(self, other, *gens, **args):
from sympy.polys.polytools import invert
if getattr(other, 'is_number', True):
return mod_inverse(self, other)
return invert(self, other, *gens, **args)
def __divmod__(self, other):
from .containers import Tuple
from sympy.functions.elementary.complexes import sign
try:
other = Number(other)
except TypeError:
msg = "unsupported operand type(s) for divmod(): '%s' and '%s'"
raise TypeError(msg % (type(self).__name__, type(other).__name__))
if not other:
raise ZeroDivisionError('modulo by zero')
if self.is_Integer and other.is_Integer:
return Tuple(*divmod(self.p, other.p))
else:
rat = self/other
w = sign(rat)*int(abs(rat)) # = rat.floor()
r = self - other*w
return Tuple(w, r)
def __rdivmod__(self, other):
try:
other = Number(other)
except TypeError:
msg = "unsupported operand type(s) for divmod(): '%s' and '%s'"
raise TypeError(msg % (type(other).__name__, type(self).__name__))
return divmod(other, self)
def __round__(self, *args):
return round(float(self), *args)
def _as_mpf_val(self, prec):
"""Evaluation of mpf tuple accurate to at least prec bits."""
raise NotImplementedError('%s needs ._as_mpf_val() method' %
(self.__class__.__name__))
def _eval_evalf(self, prec):
return Float._new(self._as_mpf_val(prec), prec)
def _as_mpf_op(self, prec):
prec = max(prec, self._prec)
return self._as_mpf_val(prec), prec
def __float__(self):
return mlib.to_float(self._as_mpf_val(53))
def floor(self):
raise NotImplementedError('%s needs .floor() method' %
(self.__class__.__name__))
def ceiling(self):
raise NotImplementedError('%s needs .ceiling() method' %
(self.__class__.__name__))
def _eval_conjugate(self):
return self
def _eval_order(self, *symbols):
from sympy import Order
# Order(5, x, y) -> Order(1,x,y)
return Order(S.One, *symbols)
def _eval_subs(self, old, new):
if old == -self:
return -new
return self # there is no other possibility
def _eval_is_finite(self):
return True
@classmethod
def class_key(cls):
return 1, 0, 'Number'
@cacheit
def sort_key(self, order=None):
return self.class_key(), (0, ()), (), self
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Number) and global_evaluate[0]:
if other is S.NaN:
return S.NaN
elif other is S.Infinity:
return S.Infinity
elif other is S.NegativeInfinity:
return S.NegativeInfinity
return AtomicExpr.__add__(self, other)
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Number) and global_evaluate[0]:
if other is S.NaN:
return S.NaN
elif other is S.Infinity:
return S.NegativeInfinity
elif other is S.NegativeInfinity:
return S.Infinity
return AtomicExpr.__sub__(self, other)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Number) and global_evaluate[0]:
if other is S.NaN:
return S.NaN
elif other is S.Infinity:
if self.is_zero:
return S.NaN
elif self.is_positive:
return S.Infinity
else:
return S.NegativeInfinity
elif other is S.NegativeInfinity:
if self.is_zero:
return S.NaN
elif self.is_positive:
return S.NegativeInfinity
else:
return S.Infinity
elif isinstance(other, Tuple):
return NotImplemented
return AtomicExpr.__mul__(self, other)
@_sympifyit('other', NotImplemented)
def __div__(self, other):
if isinstance(other, Number) and global_evaluate[0]:
if other is S.NaN:
return S.NaN
elif other is S.Infinity or other is S.NegativeInfinity:
return S.Zero
return AtomicExpr.__div__(self, other)
__truediv__ = __div__
def __eq__(self, other):
raise NotImplementedError('%s needs .__eq__() method' %
(self.__class__.__name__))
def __ne__(self, other):
raise NotImplementedError('%s needs .__ne__() method' %
(self.__class__.__name__))
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, other))
raise NotImplementedError('%s needs .__lt__() method' %
(self.__class__.__name__))
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s <= %s" % (self, other))
raise NotImplementedError('%s needs .__le__() method' %
(self.__class__.__name__))
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s > %s" % (self, other))
return _sympify(other).__lt__(self)
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s >= %s" % (self, other))
return _sympify(other).__le__(self)
def __hash__(self):
return super(Number, self).__hash__()
def is_constant(self, *wrt, **flags):
return True
def as_coeff_mul(self, *deps, **kwargs):
# a -> c*t
if self.is_Rational or not kwargs.pop('rational', True):
return self, tuple()
elif self.is_negative:
return S.NegativeOne, (-self,)
return S.One, (self,)
def as_coeff_add(self, *deps):
# a -> c + t
if self.is_Rational:
return self, tuple()
return S.Zero, (self,)
def as_coeff_Mul(self, rational=False):
"""Efficiently extract the coefficient of a product. """
if rational and not self.is_Rational:
return S.One, self
return (self, S.One) if self else (S.One, self)
def as_coeff_Add(self, rational=False):
"""Efficiently extract the coefficient of a summation. """
if not rational:
return self, S.Zero
return S.Zero, self
def gcd(self, other):
"""Compute GCD of `self` and `other`. """
from sympy.polys import gcd
return gcd(self, other)
def lcm(self, other):
"""Compute LCM of `self` and `other`. """
from sympy.polys import lcm
return lcm(self, other)
def cofactors(self, other):
"""Compute GCD and cofactors of `self` and `other`. """
from sympy.polys import cofactors
return cofactors(self, other)
class Float(Number):
"""Represent a floating-point number of arbitrary precision.
Examples
========
>>> from sympy import Float
>>> Float(3.5)
3.50000000000000
>>> Float(3)
3.00000000000000
Creating Floats from strings (and Python ``int`` and ``long``
types) will give a minimum precision of 15 digits, but the
precision will automatically increase to capture all digits
entered.
>>> Float(1)
1.00000000000000
>>> Float(10**20)
100000000000000000000.
>>> Float('1e20')
100000000000000000000.
However, *floating-point* numbers (Python ``float`` types) retain
only 15 digits of precision:
>>> Float(1e20)
1.00000000000000e+20
>>> Float(1.23456789123456789)
1.23456789123457
It may be preferable to enter high-precision decimal numbers
as strings:
Float('1.23456789123456789')
1.23456789123456789
The desired number of digits can also be specified:
>>> Float('1e-3', 3)
0.00100
>>> Float(100, 4)
100.0
Float can automatically count significant figures if a null string
is sent for the precision; space are also allowed in the string. (Auto-
counting is only allowed for strings, ints and longs).
>>> Float('123 456 789 . 123 456', '')
123456789.123456
>>> Float('12e-3', '')
0.012
>>> Float(3, '')
3.
If a number is written in scientific notation, only the digits before the
exponent are considered significant if a decimal appears, otherwise the
"e" signifies only how to move the decimal:
>>> Float('60.e2', '') # 2 digits significant
6.0e+3
>>> Float('60e2', '') # 4 digits significant
6000.
>>> Float('600e-2', '') # 3 digits significant
6.00
Notes
=====
Floats are inexact by their nature unless their value is a binary-exact
value.
>>> approx, exact = Float(.1, 1), Float(.125, 1)
For calculation purposes, evalf needs to be able to change the precision
but this will not increase the accuracy of the inexact value. The
following is the most accurate 5-digit approximation of a value of 0.1
that had only 1 digit of precision:
>>> approx.evalf(5)
0.099609
By contrast, 0.125 is exact in binary (as it is in base 10) and so it
can be passed to Float or evalf to obtain an arbitrary precision with
matching accuracy:
>>> Float(exact, 5)
0.12500
>>> exact.evalf(20)
0.12500000000000000000
Trying to make a high-precision Float from a float is not disallowed,
but one must keep in mind that the *underlying float* (not the apparent
decimal value) is being obtained with high precision. For example, 0.3
does not have a finite binary representation. The closest rational is
the fraction 5404319552844595/2**54. So if you try to obtain a Float of
0.3 to 20 digits of precision you will not see the same thing as 0.3
followed by 19 zeros:
>>> Float(0.3, 20)
0.29999999999999998890
If you want a 20-digit value of the decimal 0.3 (not the floating point
approximation of 0.3) you should send the 0.3 as a string. The underlying
representation is still binary but a higher precision than Python's float
is used:
>>> Float('0.3', 20)
0.30000000000000000000
Although you can increase the precision of an existing Float using Float
it will not increase the accuracy -- the underlying value is not changed:
>>> def show(f): # binary rep of Float
... from sympy import Mul, Pow
... s, m, e, b = f._mpf_
... v = Mul(int(m), Pow(2, int(e), evaluate=False), evaluate=False)
... print('%s at prec=%s' % (v, f._prec))
...
>>> t = Float('0.3', 3)
>>> show(t)
4915/2**14 at prec=13
>>> show(Float(t, 20)) # higher prec, not higher accuracy
4915/2**14 at prec=70
>>> show(Float(t, 2)) # lower prec
307/2**10 at prec=10
The same thing happens when evalf is used on a Float:
>>> show(t.evalf(20))
4915/2**14 at prec=70
>>> show(t.evalf(2))
307/2**10 at prec=10
Finally, Floats can be instantiated with an mpf tuple (n, c, p) to
produce the number (-1)**n*c*2**p:
>>> n, c, p = 1, 5, 0
>>> (-1)**n*c*2**p
-5
>>> Float((1, 5, 0))
-5.00000000000000
An actual mpf tuple also contains the number of bits in c as the last
element of the tuple:
>>> _._mpf_
(1, 5, 0, 3)
This is not needed for instantiation and is not the same thing as the
precision. The mpf tuple and the precision are two separate quantities
that Float tracks.
"""
__slots__ = ['_mpf_', '_prec']
# A Float represents many real numbers,
# both rational and irrational.
is_rational = None
is_irrational = None
is_number = True
is_real = True
is_Float = True
def __new__(cls, num, dps=None, prec=None, precision=None):
if prec is not None:
SymPyDeprecationWarning(
feature="Using 'prec=XX' to denote decimal precision",
useinstead="'dps=XX' for decimal precision and 'precision=XX' "\
"for binary precision",
issue=12820,
deprecated_since_version="1.1").warn()
dps = prec
del prec # avoid using this deprecated kwarg
if dps is not None and precision is not None:
raise ValueError('Both decimal and binary precision supplied. '
'Supply only one. ')
if isinstance(num, string_types):
num = num.replace(' ', '')
if num.startswith('.') and len(num) > 1:
num = '0' + num
elif num.startswith('-.') and len(num) > 2:
num = '-0.' + num[2:]
elif isinstance(num, float) and num == 0:
num = '0'
elif isinstance(num, (SYMPY_INTS, Integer)):
num = str(num) # faster than mlib.from_int
elif num is S.Infinity:
num = '+inf'
elif num is S.NegativeInfinity:
num = '-inf'
elif type(num).__module__ == 'numpy': # support for numpy datatypes
num = _convert_numpy_types(num)
elif isinstance(num, mpmath.mpf):
if precision is None:
if dps is None:
precision = num.context.prec
num = num._mpf_
if dps is None and precision is None:
dps = 15
if isinstance(num, Float):
return num
if isinstance(num, string_types) and _literal_float(num):
try:
Num = decimal.Decimal(num)
except decimal.InvalidOperation:
pass
else:
isint = '.' not in num
num, dps = _decimal_to_Rational_prec(Num)
if num.is_Integer and isint:
dps = max(dps, len(str(num).lstrip('-')))
dps = max(15, dps)
precision = mlib.libmpf.dps_to_prec(dps)
elif precision == '' and dps is None or precision is None and dps == '':
if not isinstance(num, string_types):
raise ValueError('The null string can only be used when '
'the number to Float is passed as a string or an integer.')
ok = None
if _literal_float(num):
try:
Num = decimal.Decimal(num)
except decimal.InvalidOperation:
pass
else:
isint = '.' not in num
num, dps = _decimal_to_Rational_prec(Num)
if num.is_Integer and isint:
dps = max(dps, len(str(num).lstrip('-')))
precision = mlib.libmpf.dps_to_prec(dps)
ok = True
if ok is None:
raise ValueError('string-float not recognized: %s' % num)
# decimal precision(dps) is set and maybe binary precision(precision)
# as well.From here on binary precision is used to compute the Float.
# Hence, if supplied use binary precision else translate from decimal
# precision.
if precision is None or precision == '':
precision = mlib.libmpf.dps_to_prec(dps)
precision = int(precision)
if isinstance(num, float):
_mpf_ = mlib.from_float(num, precision, rnd)
elif isinstance(num, string_types):
_mpf_ = mlib.from_str(num, precision, rnd)
elif isinstance(num, decimal.Decimal):
if num.is_finite():
_mpf_ = mlib.from_str(str(num), precision, rnd)
elif num.is_nan():
_mpf_ = _mpf_nan
elif num.is_infinite():
if num > 0:
_mpf_ = _mpf_inf
else:
_mpf_ = _mpf_ninf
else:
raise ValueError("unexpected decimal value %s" % str(num))
elif isinstance(num, tuple) and len(num) in (3, 4):
if type(num[1]) is str:
# it's a hexadecimal (coming from a pickled object)
# assume that it is in standard form
num = list(num)
# If we're loading an object pickled in Python 2 into
# Python 3, we may need to strip a tailing 'L' because
# of a shim for int on Python 3, see issue #13470.
if num[1].endswith('L'):
num[1] = num[1][:-1]
num[1] = long(num[1], 16)
_mpf_ = tuple(num)
else:
if len(num) == 4:
# handle normalization hack
return Float._new(num, precision)
else:
return (S.NegativeOne**num[0]*num[1]*S(2)**num[2]).evalf(precision)
else:
try:
_mpf_ = num._as_mpf_val(precision)
except (NotImplementedError, AttributeError):
_mpf_ = mpmath.mpf(num, prec=precision)._mpf_
# special cases
if _mpf_ == _mpf_zero:
pass # we want a Float
elif _mpf_ == _mpf_nan:
return S.NaN
obj = Expr.__new__(cls)
obj._mpf_ = _mpf_
obj._prec = precision
return obj
@classmethod
def _new(cls, _mpf_, _prec):
# special cases
if _mpf_ == _mpf_zero:
return S.Zero # XXX this is different from Float which gives 0.0
elif _mpf_ == _mpf_nan:
return S.NaN
obj = Expr.__new__(cls)
obj._mpf_ = mpf_norm(_mpf_, _prec)
# XXX: Should this be obj._prec = obj._mpf_[3]?
obj._prec = _prec
return obj
# mpz can't be pickled
def __getnewargs__(self):
return (mlib.to_pickable(self._mpf_),)
def __getstate__(self):
return {'_prec': self._prec}
def _hashable_content(self):
return (self._mpf_, self._prec)
def floor(self):
return Integer(int(mlib.to_int(
mlib.mpf_floor(self._mpf_, self._prec))))
def ceiling(self):
return Integer(int(mlib.to_int(
mlib.mpf_ceil(self._mpf_, self._prec))))
@property
def num(self):
return mpmath.mpf(self._mpf_)
def _as_mpf_val(self, prec):
rv = mpf_norm(self._mpf_, prec)
if rv != self._mpf_ and self._prec == prec:
debug(self._mpf_, rv)
return rv
def _as_mpf_op(self, prec):
return self._mpf_, max(prec, self._prec)
def _eval_is_finite(self):
if self._mpf_ in (_mpf_inf, _mpf_ninf):
return False
return True
def _eval_is_infinite(self):
if self._mpf_ in (_mpf_inf, _mpf_ninf):
return True
return False
def _eval_is_integer(self):
return self._mpf_ == _mpf_zero
def _eval_is_negative(self):
if self._mpf_ == _mpf_ninf:
return True
if self._mpf_ == _mpf_inf:
return False
return self.num < 0
def _eval_is_positive(self):
if self._mpf_ == _mpf_inf:
return True
if self._mpf_ == _mpf_ninf:
return False
return self.num > 0
def _eval_is_zero(self):
return self._mpf_ == _mpf_zero
def __nonzero__(self):
return self._mpf_ != _mpf_zero
__bool__ = __nonzero__
def __neg__(self):
return Float._new(mlib.mpf_neg(self._mpf_), self._prec)
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Number) and global_evaluate[0]:
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_add(self._mpf_, rhs, prec, rnd), prec)
return Number.__add__(self, other)
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Number) and global_evaluate[0]:
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_sub(self._mpf_, rhs, prec, rnd), prec)
return Number.__sub__(self, other)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Number) and global_evaluate[0]:
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_mul(self._mpf_, rhs, prec, rnd), prec)
return Number.__mul__(self, other)
@_sympifyit('other', NotImplemented)
def __div__(self, other):
if isinstance(other, Number) and other != 0 and global_evaluate[0]:
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_div(self._mpf_, rhs, prec, rnd), prec)
return Number.__div__(self, other)
__truediv__ = __div__
@_sympifyit('other', NotImplemented)
def __mod__(self, other):
if isinstance(other, Rational) and other.q != 1 and global_evaluate[0]:
# calculate mod with Rationals, *then* round the result
return Float(Rational.__mod__(Rational(self), other),
precision=self._prec)
if isinstance(other, Float) and global_evaluate[0]:
r = self/other
if r == int(r):
return Float(0, precision=max(self._prec, other._prec))
if isinstance(other, Number) and global_evaluate[0]:
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_mod(self._mpf_, rhs, prec, rnd), prec)
return Number.__mod__(self, other)
@_sympifyit('other', NotImplemented)
def __rmod__(self, other):
if isinstance(other, Float) and global_evaluate[0]:
return other.__mod__(self)
if isinstance(other, Number) and global_evaluate[0]:
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_mod(rhs, self._mpf_, prec, rnd), prec)
return Number.__rmod__(self, other)
def _eval_power(self, expt):
"""
expt is symbolic object but not equal to 0, 1
(-p)**r -> exp(r*log(-p)) -> exp(r*(log(p) + I*Pi)) ->
-> p**r*(sin(Pi*r) + cos(Pi*r)*I)
"""
if self == 0:
if expt.is_positive:
return S.Zero
if expt.is_negative:
return Float('inf')
if isinstance(expt, Number):
if isinstance(expt, Integer):
prec = self._prec
return Float._new(
mlib.mpf_pow_int(self._mpf_, expt.p, prec, rnd), prec)
elif isinstance(expt, Rational) and \
expt.p == 1 and expt.q % 2 and self.is_negative:
return Pow(S.NegativeOne, expt, evaluate=False)*(
-self)._eval_power(expt)
expt, prec = expt._as_mpf_op(self._prec)
mpfself = self._mpf_
try:
y = mpf_pow(mpfself, expt, prec, rnd)
return Float._new(y, prec)
except mlib.ComplexResult:
re, im = mlib.mpc_pow(
(mpfself, _mpf_zero), (expt, _mpf_zero), prec, rnd)
return Float._new(re, prec) + \
Float._new(im, prec)*S.ImaginaryUnit
def __abs__(self):
return Float._new(mlib.mpf_abs(self._mpf_), self._prec)
def __int__(self):
if self._mpf_ == _mpf_zero:
return 0
return int(mlib.to_int(self._mpf_)) # uses round_fast = round_down
__long__ = __int__
def __eq__(self, other):
if isinstance(other, float):
# coerce to Float at same precision
o = Float(other)
try:
ompf = o._as_mpf_val(self._prec)
except ValueError:
return False
return bool(mlib.mpf_eq(self._mpf_, ompf))
try:
other = _sympify(other)
except SympifyError:
return NotImplemented
if other.is_NumberSymbol:
if other.is_irrational:
return False
return other.__eq__(self)
if other.is_Float:
return bool(mlib.mpf_eq(self._mpf_, other._mpf_))
if other.is_Number:
# numbers should compare at the same precision;
# all _as_mpf_val routines should be sure to abide
# by the request to change the prec if necessary; if
# they don't, the equality test will fail since it compares
# the mpf tuples
ompf = other._as_mpf_val(self._prec)
return bool(mlib.mpf_eq(self._mpf_, ompf))
return False # Float != non-Number
def __ne__(self, other):
return not self == other
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s > %s" % (self, other))
if other.is_NumberSymbol:
return other.__lt__(self)
if other.is_Rational and not other.is_Integer:
self *= other.q
other = _sympify(other.p)
elif other.is_comparable:
other = other.evalf()
if other.is_Number and other is not S.NaN:
return _sympify(bool(
mlib.mpf_gt(self._mpf_, other._as_mpf_val(self._prec))))
return Expr.__gt__(self, other)
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s >= %s" % (self, other))
if other.is_NumberSymbol:
return other.__le__(self)
if other.is_Rational and not other.is_Integer:
self *= other.q
other = _sympify(other.p)
elif other.is_comparable:
other = other.evalf()
if other.is_Number and other is not S.NaN:
return _sympify(bool(
mlib.mpf_ge(self._mpf_, other._as_mpf_val(self._prec))))
return Expr.__ge__(self, other)
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, other))
if other.is_NumberSymbol:
return other.__gt__(self)
if other.is_Rational and not other.is_Integer:
self *= other.q
other = _sympify(other.p)
elif other.is_comparable:
other = other.evalf()
if other.is_Number and other is not S.NaN:
return _sympify(bool(
mlib.mpf_lt(self._mpf_, other._as_mpf_val(self._prec))))
return Expr.__lt__(self, other)
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s <= %s" % (self, other))
if other.is_NumberSymbol:
return other.__ge__(self)
if other.is_Rational and not other.is_Integer:
self *= other.q
other = _sympify(other.p)
elif other.is_comparable:
other = other.evalf()
if other.is_Number and other is not S.NaN:
return _sympify(bool(
mlib.mpf_le(self._mpf_, other._as_mpf_val(self._prec))))
return Expr.__le__(self, other)
def __hash__(self):
return super(Float, self).__hash__()
def epsilon_eq(self, other, epsilon="1e-15"):
return abs(self - other) < Float(epsilon)
def _sage_(self):
import sage.all as sage
return sage.RealNumber(str(self))
def __format__(self, format_spec):
return format(decimal.Decimal(str(self)), format_spec)
# Add sympify converters
converter[float] = converter[decimal.Decimal] = Float
# this is here to work nicely in Sage
RealNumber = Float
class Rational(Number):
"""Represents integers and rational numbers (p/q) of any size.
Examples
========
>>> from sympy import Rational, nsimplify, S, pi
>>> Rational(3)
3
>>> Rational(1, 2)
1/2
Rational is unprejudiced in accepting input. If a float is passed, the
underlying value of the binary representation will be returned:
>>> Rational(.5)
1/2
>>> Rational(.2)
3602879701896397/18014398509481984
If the simpler representation of the float is desired then consider
limiting the denominator to the desired value or convert the float to
a string (which is roughly equivalent to limiting the denominator to
10**12):
>>> Rational(str(.2))
1/5
>>> Rational(.2).limit_denominator(10**12)
1/5
An arbitrarily precise Rational is obtained when a string literal is
passed:
>>> Rational("1.23")
123/100
>>> Rational('1e-2')
1/100
>>> Rational(".1")
1/10
>>> Rational('1e-2/3.2')
1/320
The conversion of other types of strings can be handled by
the sympify() function, and conversion of floats to expressions
or simple fractions can be handled with nsimplify:
>>> S('.[3]') # repeating digits in brackets
1/3
>>> S('3**2/10') # general expressions
9/10
>>> nsimplify(.3) # numbers that have a simple form
3/10
But if the input does not reduce to a literal Rational, an error will
be raised:
>>> Rational(pi)
Traceback (most recent call last):
...
TypeError: invalid input: pi
Low-level
---------
Access numerator and denominator as .p and .q:
>>> r = Rational(3, 4)
>>> r
3/4
>>> r.p
3
>>> r.q
4
Note that p and q return integers (not SymPy Integers) so some care
is needed when using them in expressions:
>>> r.p/r.q
0.75
See Also
========
sympify, sympy.simplify.simplify.nsimplify
"""
is_real = True
is_integer = False
is_rational = True
is_number = True
__slots__ = ['p', 'q']
is_Rational = True
@cacheit
def __new__(cls, p, q=None, gcd=None):
if q is None:
if isinstance(p, Rational):
return p
if isinstance(p, string_types):
if p.count('/') > 1:
raise TypeError('invalid input: %s' % p)
pq = p.rsplit('/', 1)
if len(pq) == 2:
p, q = pq
fp = fractions.Fraction(p)
fq = fractions.Fraction(q)
f = fp/fq
return Rational(f.numerator, f.denominator, 1)
p = p.replace(' ', '')
try:
p = fractions.Fraction(p)
except ValueError:
pass # error will raise below
if not isinstance(p, string_types):
try:
if isinstance(p, fractions.Fraction):
return Rational(p.numerator, p.denominator, 1)
except NameError:
pass # error will raise below
if isinstance(p, (float, Float)):
return Rational(*_as_integer_ratio(p))
if not isinstance(p, SYMPY_INTS + (Rational,)):
raise TypeError('invalid input: %s' % p)
q = q or S.One
gcd = 1
else:
p = Rational(p)
q = Rational(q)
if isinstance(q, Rational):
p *= q.q
q = q.p
if isinstance(p, Rational):
q *= p.q
p = p.p
# p and q are now integers
if q == 0:
if p == 0:
if _errdict["divide"]:
raise ValueError("Indeterminate 0/0")
else:
return S.NaN
return S.ComplexInfinity
if q < 0:
q = -q
p = -p
if not gcd:
gcd = igcd(abs(p), q)
if gcd > 1:
p //= gcd
q //= gcd
if q == 1:
return Integer(p)
if p == 1 and q == 2:
return S.Half
obj = Expr.__new__(cls)
obj.p = p
obj.q = q
return obj
def limit_denominator(self, max_denominator=1000000):
"""Closest Rational to self with denominator at most max_denominator.
>>> from sympy import Rational
>>> Rational('3.141592653589793').limit_denominator(10)
22/7
>>> Rational('3.141592653589793').limit_denominator(100)
311/99
"""
f = fractions.Fraction(self.p, self.q)
return Rational(f.limit_denominator(fractions.Fraction(int(max_denominator))))
def __getnewargs__(self):
return (self.p, self.q)
def _hashable_content(self):
return (self.p, self.q)
def _eval_is_positive(self):
return self.p > 0
def _eval_is_zero(self):
return self.p == 0
def __neg__(self):
return Rational(-self.p, self.q)
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if global_evaluate[0]:
if isinstance(other, Integer):
return Rational(self.p + self.q*other.p, self.q, 1)
elif isinstance(other, Rational):
#TODO: this can probably be optimized more
return Rational(self.p*other.q + self.q*other.p, self.q*other.q)
elif isinstance(other, Float):
return other + self
else:
return Number.__add__(self, other)
return Number.__add__(self, other)
__radd__ = __add__
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if global_evaluate[0]:
if isinstance(other, Integer):
return Rational(self.p - self.q*other.p, self.q, 1)
elif isinstance(other, Rational):
return Rational(self.p*other.q - self.q*other.p, self.q*other.q)
elif isinstance(other, Float):
return -other + self
else:
return Number.__sub__(self, other)
return Number.__sub__(self, other)
@_sympifyit('other', NotImplemented)
def __rsub__(self, other):
if global_evaluate[0]:
if isinstance(other, Integer):
return Rational(self.q*other.p - self.p, self.q, 1)
elif isinstance(other, Rational):
return Rational(self.q*other.p - self.p*other.q, self.q*other.q)
elif isinstance(other, Float):
return -self + other
else:
return Number.__rsub__(self, other)
return Number.__rsub__(self, other)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if global_evaluate[0]:
if isinstance(other, Integer):
return Rational(self.p*other.p, self.q, igcd(other.p, self.q))
elif isinstance(other, Rational):
return Rational(self.p*other.p, self.q*other.q, igcd(self.p, other.q)*igcd(self.q, other.p))
elif isinstance(other, Float):
return other*self
else:
return Number.__mul__(self, other)
return Number.__mul__(self, other)
__rmul__ = __mul__
@_sympifyit('other', NotImplemented)
def __div__(self, other):
if global_evaluate[0]:
if isinstance(other, Integer):
if self.p and other.p == S.Zero:
return S.ComplexInfinity
else:
return Rational(self.p, self.q*other.p, igcd(self.p, other.p))
elif isinstance(other, Rational):
return Rational(self.p*other.q, self.q*other.p, igcd(self.p, other.p)*igcd(self.q, other.q))
elif isinstance(other, Float):
return self*(1/other)
else:
return Number.__div__(self, other)
return Number.__div__(self, other)
@_sympifyit('other', NotImplemented)
def __rdiv__(self, other):
if global_evaluate[0]:
if isinstance(other, Integer):
return Rational(other.p*self.q, self.p, igcd(self.p, other.p))
elif isinstance(other, Rational):
return Rational(other.p*self.q, other.q*self.p, igcd(self.p, other.p)*igcd(self.q, other.q))
elif isinstance(other, Float):
return other*(1/self)
else:
return Number.__rdiv__(self, other)
return Number.__rdiv__(self, other)
__truediv__ = __div__
@_sympifyit('other', NotImplemented)
def __mod__(self, other):
if global_evaluate[0]:
if isinstance(other, Rational):
n = (self.p*other.q) // (other.p*self.q)
return Rational(self.p*other.q - n*other.p*self.q, self.q*other.q)
if isinstance(other, Float):
# calculate mod with Rationals, *then* round the answer
return Float(self.__mod__(Rational(other)),
precision=other._prec)
return Number.__mod__(self, other)
return Number.__mod__(self, other)
@_sympifyit('other', NotImplemented)
def __rmod__(self, other):
if isinstance(other, Rational):
return Rational.__mod__(other, self)
return Number.__rmod__(self, other)
def _eval_power(self, expt):
if isinstance(expt, Number):
if isinstance(expt, Float):
return self._eval_evalf(expt._prec)**expt
if expt.is_negative:
# (3/4)**-2 -> (4/3)**2
ne = -expt
if (ne is S.One):
return Rational(self.q, self.p)
if self.is_negative:
if expt.q != 1:
return -(S.NegativeOne)**((expt.p % expt.q) /
S(expt.q))*Rational(self.q, -self.p)**ne
else:
return S.NegativeOne**ne*Rational(self.q, -self.p)**ne
else:
return Rational(self.q, self.p)**ne
if expt is S.Infinity: # -oo already caught by test for negative
if self.p > self.q:
# (3/2)**oo -> oo
return S.Infinity
if self.p < -self.q:
# (-3/2)**oo -> oo + I*oo
return S.Infinity + S.Infinity*S.ImaginaryUnit
return S.Zero
if isinstance(expt, Integer):
# (4/3)**2 -> 4**2 / 3**2
return Rational(self.p**expt.p, self.q**expt.p, 1)
if isinstance(expt, Rational):
if self.p != 1:
# (4/3)**(5/6) -> 4**(5/6)*3**(-5/6)
return Integer(self.p)**expt*Integer(self.q)**(-expt)
# as the above caught negative self.p, now self is positive
return Integer(self.q)**Rational(
expt.p*(expt.q - 1), expt.q) / \
Integer(self.q)**Integer(expt.p)
if self.is_negative and expt.is_even:
return (-self)**expt
return
def _as_mpf_val(self, prec):
return mlib.from_rational(self.p, self.q, prec, rnd)
def _mpmath_(self, prec, rnd):
return mpmath.make_mpf(mlib.from_rational(self.p, self.q, prec, rnd))
def __abs__(self):
return Rational(abs(self.p), self.q)
def __int__(self):
p, q = self.p, self.q
if p < 0:
return -int(-p//q)
return int(p//q)
__long__ = __int__
def floor(self):
return Integer(self.p // self.q)
def ceiling(self):
return -Integer(-self.p // self.q)
def __eq__(self, other):
try:
other = _sympify(other)
except SympifyError:
return NotImplemented
if other.is_NumberSymbol:
if other.is_irrational:
return False
return other.__eq__(self)
if other.is_Number:
if other.is_Rational:
# a Rational is always in reduced form so will never be 2/4
# so we can just check equivalence of args
return self.p == other.p and self.q == other.q
if other.is_Float:
return mlib.mpf_eq(self._as_mpf_val(other._prec), other._mpf_)
return False
def __ne__(self, other):
return not self == other
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s > %s" % (self, other))
if other.is_NumberSymbol:
return other.__lt__(self)
expr = self
if other.is_Number:
if other.is_Rational:
return _sympify(bool(self.p*other.q > self.q*other.p))
if other.is_Float:
return _sympify(bool(mlib.mpf_gt(
self._as_mpf_val(other._prec), other._mpf_)))
elif other.is_number and other.is_real:
expr, other = Integer(self.p), self.q*other
return Expr.__gt__(expr, other)
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s >= %s" % (self, other))
if other.is_NumberSymbol:
return other.__le__(self)
expr = self
if other.is_Number:
if other.is_Rational:
return _sympify(bool(self.p*other.q >= self.q*other.p))
if other.is_Float:
return _sympify(bool(mlib.mpf_ge(
self._as_mpf_val(other._prec), other._mpf_)))
elif other.is_number and other.is_real:
expr, other = Integer(self.p), self.q*other
return Expr.__ge__(expr, other)
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, other))
if other.is_NumberSymbol:
return other.__gt__(self)
expr = self
if other.is_Number:
if other.is_Rational:
return _sympify(bool(self.p*other.q < self.q*other.p))
if other.is_Float:
return _sympify(bool(mlib.mpf_lt(
self._as_mpf_val(other._prec), other._mpf_)))
elif other.is_number and other.is_real:
expr, other = Integer(self.p), self.q*other
return Expr.__lt__(expr, other)
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s <= %s" % (self, other))
expr = self
if other.is_NumberSymbol:
return other.__ge__(self)
elif other.is_Number:
if other.is_Rational:
return _sympify(bool(self.p*other.q <= self.q*other.p))
if other.is_Float:
return _sympify(bool(mlib.mpf_le(
self._as_mpf_val(other._prec), other._mpf_)))
elif other.is_number and other.is_real:
expr, other = Integer(self.p), self.q*other
return Expr.__le__(expr, other)
def __hash__(self):
return super(Rational, self).__hash__()
def factors(self, limit=None, use_trial=True, use_rho=False,
use_pm1=False, verbose=False, visual=False):
"""A wrapper to factorint which return factors of self that are
smaller than limit (or cheap to compute). Special methods of
factoring are disabled by default so that only trial division is used.
"""
from sympy.ntheory import factorrat
return factorrat(self, limit=limit, use_trial=use_trial,
use_rho=use_rho, use_pm1=use_pm1,
verbose=verbose).copy()
@_sympifyit('other', NotImplemented)
def gcd(self, other):
if isinstance(other, Rational):
if other is S.Zero:
return other
return Rational(
Integer(igcd(self.p, other.p)),
Integer(ilcm(self.q, other.q)))
return Number.gcd(self, other)
@_sympifyit('other', NotImplemented)
def lcm(self, other):
if isinstance(other, Rational):
return Rational(
self.p*other.p//igcd(self.p, other.p),
igcd(self.q, other.q))
return Number.lcm(self, other)
def as_numer_denom(self):
return Integer(self.p), Integer(self.q)
def _sage_(self):
import sage.all as sage
return sage.Integer(self.p)/sage.Integer(self.q)
def as_content_primitive(self, radical=False, clear=True):
"""Return the tuple (R, self/R) where R is the positive Rational
extracted from self.
Examples
========
>>> from sympy import S
>>> (S(-3)/2).as_content_primitive()
(3/2, -1)
See docstring of Expr.as_content_primitive for more examples.
"""
if self:
if self.is_positive:
return self, S.One
return -self, S.NegativeOne
return S.One, self
def as_coeff_Mul(self, rational=False):
"""Efficiently extract the coefficient of a product. """
return self, S.One
def as_coeff_Add(self, rational=False):
"""Efficiently extract the coefficient of a summation. """
return self, S.Zero
# int -> Integer
_intcache = {}
# TODO move this tracing facility to sympy/core/trace.py ?
def _intcache_printinfo():
ints = sorted(_intcache.keys())
nhit = _intcache_hits
nmiss = _intcache_misses
if nhit == 0 and nmiss == 0:
print()
print('Integer cache statistic was not collected')
return
miss_ratio = float(nmiss) / (nhit + nmiss)
print()
print('Integer cache statistic')
print('-----------------------')
print()
print('#items: %i' % len(ints))
print()
print(' #hit #miss #total')
print()
print('%5i %5i (%7.5f %%) %5i' % (
nhit, nmiss, miss_ratio*100, nhit + nmiss)
)
print()
print(ints)
_intcache_hits = 0
_intcache_misses = 0
def int_trace(f):
import os
if os.getenv('SYMPY_TRACE_INT', 'no').lower() != 'yes':
return f
def Integer_tracer(cls, i):
global _intcache_hits, _intcache_misses
try:
_intcache_hits += 1
return _intcache[i]
except KeyError:
_intcache_hits -= 1
_intcache_misses += 1
return f(cls, i)
# also we want to hook our _intcache_printinfo into sys.atexit
import atexit
atexit.register(_intcache_printinfo)
return Integer_tracer
class Integer(Rational):
q = 1
is_integer = True
is_number = True
is_Integer = True
__slots__ = ['p']
def _as_mpf_val(self, prec):
return mlib.from_int(self.p, prec, rnd)
def _mpmath_(self, prec, rnd):
return mpmath.make_mpf(self._as_mpf_val(prec))
# TODO caching with decorator, but not to degrade performance
@int_trace
def __new__(cls, i):
if isinstance(i, string_types):
i = i.replace(' ', '')
# whereas we cannot, in general, make a Rational from an
# arbitrary expression, we can make an Integer unambiguously
# (except when a non-integer expression happens to round to
# an integer). So we proceed by taking int() of the input and
# let the int routines determine whether the expression can
# be made into an int or whether an error should be raised.
try:
ival = int(i)
except TypeError:
raise TypeError(
'Integer can only work with integer expressions.')
try:
return _intcache[ival]
except KeyError:
# We only work with well-behaved integer types. This converts, for
# example, numpy.int32 instances.
obj = Expr.__new__(cls)
obj.p = ival
_intcache[ival] = obj
return obj
def __getnewargs__(self):
return (self.p,)
# Arithmetic operations are here for efficiency
def __int__(self):
return self.p
__long__ = __int__
def floor(self):
return Integer(self.p)
def ceiling(self):
return Integer(self.p)
def __neg__(self):
return Integer(-self.p)
def __abs__(self):
if self.p >= 0:
return self
else:
return Integer(-self.p)
def __divmod__(self, other):
from .containers import Tuple
if isinstance(other, Integer) and global_evaluate[0]:
return Tuple(*(divmod(self.p, other.p)))
else:
return Number.__divmod__(self, other)
def __rdivmod__(self, other):
from .containers import Tuple
if isinstance(other, integer_types) and global_evaluate[0]:
return Tuple(*(divmod(other, self.p)))
else:
try:
other = Number(other)
except TypeError:
msg = "unsupported operand type(s) for divmod(): '%s' and '%s'"
oname = type(other).__name__
sname = type(self).__name__
raise TypeError(msg % (oname, sname))
return Number.__divmod__(other, self)
# TODO make it decorator + bytecodehacks?
def __add__(self, other):
if global_evaluate[0]:
if isinstance(other, integer_types):
return Integer(self.p + other)
elif isinstance(other, Integer):
return Integer(self.p + other.p)
elif isinstance(other, Rational):
return Rational(self.p*other.q + other.p, other.q, 1)
return Rational.__add__(self, other)
else:
return Add(self, other)
def __radd__(self, other):
if global_evaluate[0]:
if isinstance(other, integer_types):
return Integer(other + self.p)
elif isinstance(other, Rational):
return Rational(other.p + self.p*other.q, other.q, 1)
return Rational.__radd__(self, other)
return Rational.__radd__(self, other)
def __sub__(self, other):
if global_evaluate[0]:
if isinstance(other, integer_types):
return Integer(self.p - other)
elif isinstance(other, Integer):
return Integer(self.p - other.p)
elif isinstance(other, Rational):
return Rational(self.p*other.q - other.p, other.q, 1)
return Rational.__sub__(self, other)
return Rational.__sub__(self, other)
def __rsub__(self, other):
if global_evaluate[0]:
if isinstance(other, integer_types):
return Integer(other - self.p)
elif isinstance(other, Rational):
return Rational(other.p - self.p*other.q, other.q, 1)
return Rational.__rsub__(self, other)
return Rational.__rsub__(self, other)
def __mul__(self, other):
if global_evaluate[0]:
if isinstance(other, integer_types):
return Integer(self.p*other)
elif isinstance(other, Integer):
return Integer(self.p*other.p)
elif isinstance(other, Rational):
return Rational(self.p*other.p, other.q, igcd(self.p, other.q))
return Rational.__mul__(self, other)
return Rational.__mul__(self, other)
def __rmul__(self, other):
if global_evaluate[0]:
if isinstance(other, integer_types):
return Integer(other*self.p)
elif isinstance(other, Rational):
return Rational(other.p*self.p, other.q, igcd(self.p, other.q))
return Rational.__rmul__(self, other)
return Rational.__rmul__(self, other)
def __mod__(self, other):
if global_evaluate[0]:
if isinstance(other, integer_types):
return Integer(self.p % other)
elif isinstance(other, Integer):
return Integer(self.p % other.p)
return Rational.__mod__(self, other)
return Rational.__mod__(self, other)
def __rmod__(self, other):
if global_evaluate[0]:
if isinstance(other, integer_types):
return Integer(other % self.p)
elif isinstance(other, Integer):
return Integer(other.p % self.p)
return Rational.__rmod__(self, other)
return Rational.__rmod__(self, other)
def __eq__(self, other):
if isinstance(other, integer_types):
return (self.p == other)
elif isinstance(other, Integer):
return (self.p == other.p)
return Rational.__eq__(self, other)
def __ne__(self, other):
return not self == other
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s > %s" % (self, other))
if other.is_Integer:
return _sympify(self.p > other.p)
return Rational.__gt__(self, other)
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, other))
if other.is_Integer:
return _sympify(self.p < other.p)
return Rational.__lt__(self, other)
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s >= %s" % (self, other))
if other.is_Integer:
return _sympify(self.p >= other.p)
return Rational.__ge__(self, other)
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s <= %s" % (self, other))
if other.is_Integer:
return _sympify(self.p <= other.p)
return Rational.__le__(self, other)
def __hash__(self):
return hash(self.p)
def __index__(self):
return self.p
########################################
def _eval_is_odd(self):
return bool(self.p % 2)
def _eval_power(self, expt):
"""
Tries to do some simplifications on self**expt
Returns None if no further simplifications can be done
When exponent is a fraction (so we have for example a square root),
we try to find a simpler representation by factoring the argument
up to factors of 2**15, e.g.
- sqrt(4) becomes 2
- sqrt(-4) becomes 2*I
- (2**(3+7)*3**(6+7))**Rational(1,7) becomes 6*18**(3/7)
Further simplification would require a special call to factorint on
the argument which is not done here for sake of speed.
"""
from sympy import perfect_power
if expt is S.Infinity:
if self.p > S.One:
return S.Infinity
# cases -1, 0, 1 are done in their respective classes
return S.Infinity + S.ImaginaryUnit*S.Infinity
if expt is S.NegativeInfinity:
return Rational(1, self)**S.Infinity
if not isinstance(expt, Number):
# simplify when expt is even
# (-2)**k --> 2**k
if self.is_negative and expt.is_even:
return (-self)**expt
if isinstance(expt, Float):
# Rational knows how to exponentiate by a Float
return super(Integer, self)._eval_power(expt)
if not isinstance(expt, Rational):
return
if expt is S.Half and self.is_negative:
# we extract I for this special case since everyone is doing so
return S.ImaginaryUnit*Pow(-self, expt)
if expt.is_negative:
# invert base and change sign on exponent
ne = -expt
if self.is_negative:
if expt.q != 1:
return -(S.NegativeOne)**((expt.p % expt.q) /
S(expt.q))*Rational(1, -self)**ne
else:
return (S.NegativeOne)**ne*Rational(1, -self)**ne
else:
return Rational(1, self.p)**ne
# see if base is a perfect root, sqrt(4) --> 2
x, xexact = integer_nthroot(abs(self.p), expt.q)
if xexact:
# if it's a perfect root we've finished
result = Integer(x**abs(expt.p))
if self.is_negative:
result *= S.NegativeOne**expt
return result
# The following is an algorithm where we collect perfect roots
# from the factors of base.
# if it's not an nth root, it still might be a perfect power
b_pos = int(abs(self.p))
p = perfect_power(b_pos)
if p is not False:
dict = {p[0]: p[1]}
else:
dict = Integer(self).factors(limit=2**15)
# now process the dict of factors
if self.is_negative:
dict[-1] = 1
out_int = 1 # integer part
out_rad = 1 # extracted radicals
sqr_int = 1
sqr_gcd = 0
sqr_dict = {}
for prime, exponent in dict.items():
exponent *= expt.p
# remove multiples of expt.q: (2**12)**(1/10) -> 2*(2**2)**(1/10)
div_e, div_m = divmod(exponent, expt.q)
if div_e > 0:
out_int *= prime**div_e
if div_m > 0:
# see if the reduced exponent shares a gcd with e.q
# (2**2)**(1/10) -> 2**(1/5)
g = igcd(div_m, expt.q)
if g != 1:
out_rad *= Pow(prime, Rational(div_m//g, expt.q//g))
else:
sqr_dict[prime] = div_m
# identify gcd of remaining powers
for p, ex in sqr_dict.items():
if sqr_gcd == 0:
sqr_gcd = ex
else:
sqr_gcd = igcd(sqr_gcd, ex)
if sqr_gcd == 1:
break
for k, v in sqr_dict.items():
sqr_int *= k**(v//sqr_gcd)
if sqr_int == self and out_int == 1 and out_rad == 1:
result = None
else:
result = out_int*out_rad*Pow(sqr_int, Rational(sqr_gcd, expt.q))
return result
def _eval_is_prime(self):
from sympy.ntheory import isprime
return isprime(self)
def _eval_is_composite(self):
if self > 1:
return fuzzy_not(self.is_prime)
else:
return False
def as_numer_denom(self):
return self, S.One
def __floordiv__(self, other):
return Integer(self.p // Integer(other).p)
def __rfloordiv__(self, other):
return Integer(Integer(other).p // self.p)
# Add sympify converters
for i_type in integer_types:
converter[i_type] = Integer
class AlgebraicNumber(Expr):
"""Class for representing algebraic numbers in SymPy. """
__slots__ = ['rep', 'root', 'alias', 'minpoly']
is_AlgebraicNumber = True
is_algebraic = True
is_number = True
def __new__(cls, expr, coeffs=None, alias=None, **args):
"""Construct a new algebraic number. """
from sympy import Poly
from sympy.polys.polyclasses import ANP, DMP
from sympy.polys.numberfields import minimal_polynomial
from sympy.core.symbol import Symbol
expr = sympify(expr)
if isinstance(expr, (tuple, Tuple)):
minpoly, root = expr
if not minpoly.is_Poly:
minpoly = Poly(minpoly)
elif expr.is_AlgebraicNumber:
minpoly, root = expr.minpoly, expr.root
else:
minpoly, root = minimal_polynomial(
expr, args.get('gen'), polys=True), expr
dom = minpoly.get_domain()
if coeffs is not None:
if not isinstance(coeffs, ANP):
rep = DMP.from_sympy_list(sympify(coeffs), 0, dom)
scoeffs = Tuple(*coeffs)
else:
rep = DMP.from_list(coeffs.to_list(), 0, dom)
scoeffs = Tuple(*coeffs.to_list())
if rep.degree() >= minpoly.degree():
rep = rep.rem(minpoly.rep)
else:
rep = DMP.from_list([1, 0], 0, dom)
scoeffs = Tuple(1, 0)
if root.is_negative:
rep = -rep
scoeffs = Tuple(-1, 0)
sargs = (root, scoeffs)
if alias is not None:
if not isinstance(alias, Symbol):
alias = Symbol(alias)
sargs = sargs + (alias,)
obj = Expr.__new__(cls, *sargs)
obj.rep = rep
obj.root = root
obj.alias = alias
obj.minpoly = minpoly
return obj
def __hash__(self):
return super(AlgebraicNumber, self).__hash__()
def _eval_evalf(self, prec):
return self.as_expr()._evalf(prec)
@property
def is_aliased(self):
"""Returns ``True`` if ``alias`` was set. """
return self.alias is not None
def as_poly(self, x=None):
"""Create a Poly instance from ``self``. """
from sympy import Dummy, Poly, PurePoly
if x is not None:
return Poly.new(self.rep, x)
else:
if self.alias is not None:
return Poly.new(self.rep, self.alias)
else:
return PurePoly.new(self.rep, Dummy('x'))
def as_expr(self, x=None):
"""Create a Basic expression from ``self``. """
return self.as_poly(x or self.root).as_expr().expand()
def coeffs(self):
"""Returns all SymPy coefficients of an algebraic number. """
return [ self.rep.dom.to_sympy(c) for c in self.rep.all_coeffs() ]
def native_coeffs(self):
"""Returns all native coefficients of an algebraic number. """
return self.rep.all_coeffs()
def to_algebraic_integer(self):
"""Convert ``self`` to an algebraic integer. """
from sympy import Poly
f = self.minpoly
if f.LC() == 1:
return self
coeff = f.LC()**(f.degree() - 1)
poly = f.compose(Poly(f.gen/f.LC()))
minpoly = poly*coeff
root = f.LC()*self.root
return AlgebraicNumber((minpoly, root), self.coeffs())
def _eval_simplify(self, ratio, measure):
from sympy.polys import CRootOf, minpoly
for r in [r for r in self.minpoly.all_roots() if r.func != CRootOf]:
if minpoly(self.root - r).is_Symbol:
# use the matching root if it's simpler
if measure(r) < ratio*measure(self.root):
return AlgebraicNumber(r)
return self
class RationalConstant(Rational):
"""
Abstract base class for rationals with specific behaviors
Derived classes must define class attributes p and q and should probably all
be singletons.
"""
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
class IntegerConstant(Integer):
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
class Zero(with_metaclass(Singleton, IntegerConstant)):
"""The number zero.
Zero is a singleton, and can be accessed by ``S.Zero``
Examples
========
>>> from sympy import S, Integer, zoo
>>> Integer(0) is S.Zero
True
>>> 1/S.Zero
zoo
References
==========
.. [1] http://en.wikipedia.org/wiki/Zero
"""
p = 0
q = 1
is_positive = False
is_negative = False
is_zero = True
is_number = True
__slots__ = []
@staticmethod
def __abs__():
return S.Zero
@staticmethod
def __neg__():
return S.Zero
def _eval_power(self, expt):
if expt.is_positive:
return self
if expt.is_negative:
return S.ComplexInfinity
if expt.is_real is False:
return S.NaN
# infinities are already handled with pos and neg
# tests above; now throw away leading numbers on Mul
# exponent
coeff, terms = expt.as_coeff_Mul()
if coeff.is_negative:
return S.ComplexInfinity**terms
if coeff is not S.One: # there is a Number to discard
return self**terms
def _eval_order(self, *symbols):
# Order(0,x) -> 0
return self
def __nonzero__(self):
return False
__bool__ = __nonzero__
def as_coeff_Mul(self, rational=False): # XXX this routine should be deleted
"""Efficiently extract the coefficient of a summation. """
return S.One, self
class One(with_metaclass(Singleton, IntegerConstant)):
"""The number one.
One is a singleton, and can be accessed by ``S.One``.
Examples
========
>>> from sympy import S, Integer
>>> Integer(1) is S.One
True
References
==========
.. [1] http://en.wikipedia.org/wiki/1_%28number%29
"""
is_number = True
p = 1
q = 1
__slots__ = []
@staticmethod
def __abs__():
return S.One
@staticmethod
def __neg__():
return S.NegativeOne
def _eval_power(self, expt):
return self
def _eval_order(self, *symbols):
return
@staticmethod
def factors(limit=None, use_trial=True, use_rho=False, use_pm1=False,
verbose=False, visual=False):
if visual:
return S.One
else:
return {}
class NegativeOne(with_metaclass(Singleton, IntegerConstant)):
"""The number negative one.
NegativeOne is a singleton, and can be accessed by ``S.NegativeOne``.
Examples
========
>>> from sympy import S, Integer
>>> Integer(-1) is S.NegativeOne
True
See Also
========
One
References
==========
.. [1] http://en.wikipedia.org/wiki/%E2%88%921_%28number%29
"""
is_number = True
p = -1
q = 1
__slots__ = []
@staticmethod
def __abs__():
return S.One
@staticmethod
def __neg__():
return S.One
def _eval_power(self, expt):
if expt.is_odd:
return S.NegativeOne
if expt.is_even:
return S.One
if isinstance(expt, Number):
if isinstance(expt, Float):
return Float(-1.0)**expt
if expt is S.NaN:
return S.NaN
if expt is S.Infinity or expt is S.NegativeInfinity:
return S.NaN
if expt is S.Half:
return S.ImaginaryUnit
if isinstance(expt, Rational):
if expt.q == 2:
return S.ImaginaryUnit**Integer(expt.p)
i, r = divmod(expt.p, expt.q)
if i:
return self**i*self**Rational(r, expt.q)
return
class Half(with_metaclass(Singleton, RationalConstant)):
"""The rational number 1/2.
Half is a singleton, and can be accessed by ``S.Half``.
Examples
========
>>> from sympy import S, Rational
>>> Rational(1, 2) is S.Half
True
References
==========
.. [1] http://en.wikipedia.org/wiki/One_half
"""
is_number = True
p = 1
q = 2
__slots__ = []
@staticmethod
def __abs__():
return S.Half
class Infinity(with_metaclass(Singleton, Number)):
r"""Positive infinite quantity.
In real analysis the symbol `\infty` denotes an unbounded
limit: `x\to\infty` means that `x` grows without bound.
Infinity is often used not only to define a limit but as a value
in the affinely extended real number system. Points labeled `+\infty`
and `-\infty` can be added to the topological space of the real numbers,
producing the two-point compactification of the real numbers. Adding
algebraic properties to this gives us the extended real numbers.
Infinity is a singleton, and can be accessed by ``S.Infinity``,
or can be imported as ``oo``.
Examples
========
>>> from sympy import oo, exp, limit, Symbol
>>> 1 + oo
oo
>>> 42/oo
0
>>> x = Symbol('x')
>>> limit(exp(x), x, oo)
oo
See Also
========
NegativeInfinity, NaN
References
==========
.. [1] http://en.wikipedia.org/wiki/Infinity
"""
is_commutative = True
is_positive = True
is_infinite = True
is_number = True
is_prime = False
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
def _latex(self, printer):
return r"\infty"
def _eval_subs(self, old, new):
if self == old:
return new
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Number):
if other is S.NegativeInfinity or other is S.NaN:
return S.NaN
elif other.is_Float:
if other == Float('-inf'):
return S.NaN
else:
return Float('inf')
else:
return S.Infinity
return NotImplemented
__radd__ = __add__
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Number):
if other is S.Infinity or other is S.NaN:
return S.NaN
elif other.is_Float:
if other == Float('inf'):
return S.NaN
else:
return Float('inf')
else:
return S.Infinity
return NotImplemented
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Number):
if other is S.Zero or other is S.NaN:
return S.NaN
elif other.is_Float:
if other == 0:
return S.NaN
if other > 0:
return Float('inf')
else:
return Float('-inf')
else:
if other > 0:
return S.Infinity
else:
return S.NegativeInfinity
return NotImplemented
__rmul__ = __mul__
@_sympifyit('other', NotImplemented)
def __div__(self, other):
if isinstance(other, Number):
if other is S.Infinity or \
other is S.NegativeInfinity or \
other is S.NaN:
return S.NaN
elif other.is_Float:
if other == Float('-inf') or \
other == Float('inf'):
return S.NaN
elif other.is_nonnegative:
return Float('inf')
else:
return Float('-inf')
else:
if other >= 0:
return S.Infinity
else:
return S.NegativeInfinity
return NotImplemented
__truediv__ = __div__
def __abs__(self):
return S.Infinity
def __neg__(self):
return S.NegativeInfinity
def _eval_power(self, expt):
"""
``expt`` is symbolic object but not equal to 0 or 1.
================ ======= ==============================
Expression Result Notes
================ ======= ==============================
``oo ** nan`` ``nan``
``oo ** -p`` ``0`` ``p`` is number, ``oo``
================ ======= ==============================
See Also
========
Pow
NaN
NegativeInfinity
"""
from sympy.functions import re
if expt.is_positive:
return S.Infinity
if expt.is_negative:
return S.Zero
if expt is S.NaN:
return S.NaN
if expt is S.ComplexInfinity:
return S.NaN
if expt.is_real is False and expt.is_number:
expt_real = re(expt)
if expt_real.is_positive:
return S.ComplexInfinity
if expt_real.is_negative:
return S.Zero
if expt_real.is_zero:
return S.NaN
return self**expt.evalf()
def _as_mpf_val(self, prec):
return mlib.finf
def _sage_(self):
import sage.all as sage
return sage.oo
def __hash__(self):
return super(Infinity, self).__hash__()
def __eq__(self, other):
return other is S.Infinity
def __ne__(self, other):
return other is not S.Infinity
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, other))
if other.is_real:
return S.false
return Expr.__lt__(self, other)
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s <= %s" % (self, other))
if other.is_real:
if other.is_finite or other is S.NegativeInfinity:
return S.false
elif other.is_nonpositive:
return S.false
elif other.is_infinite and other.is_positive:
return S.true
return Expr.__le__(self, other)
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s > %s" % (self, other))
if other.is_real:
if other.is_finite or other is S.NegativeInfinity:
return S.true
elif other.is_nonpositive:
return S.true
elif other.is_infinite and other.is_positive:
return S.false
return Expr.__gt__(self, other)
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s >= %s" % (self, other))
if other.is_real:
return S.true
return Expr.__ge__(self, other)
def __mod__(self, other):
return S.NaN
__rmod__ = __mod__
def floor(self):
return self
def ceiling(self):
return self
oo = S.Infinity
class NegativeInfinity(with_metaclass(Singleton, Number)):
"""Negative infinite quantity.
NegativeInfinity is a singleton, and can be accessed
by ``S.NegativeInfinity``.
See Also
========
Infinity
"""
is_commutative = True
is_negative = True
is_infinite = True
is_number = True
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
def _latex(self, printer):
return r"-\infty"
def _eval_subs(self, old, new):
if self == old:
return new
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Number):
if other is S.Infinity or other is S.NaN:
return S.NaN
elif other.is_Float:
if other == Float('inf'):
return Float('nan')
else:
return Float('-inf')
else:
return S.NegativeInfinity
return NotImplemented
__radd__ = __add__
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Number):
if other is S.NegativeInfinity or other is S.NaN:
return S.NaN
elif other.is_Float:
if other == Float('-inf'):
return Float('nan')
else:
return Float('-inf')
else:
return S.NegativeInfinity
return NotImplemented
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Number):
if other is S.Zero or other is S.NaN:
return S.NaN
elif other.is_Float:
if other is S.NaN or other.is_zero:
return S.NaN
elif other.is_positive:
return Float('-inf')
else:
return Float('inf')
else:
if other.is_positive:
return S.NegativeInfinity
else:
return S.Infinity
return NotImplemented
__rmul__ = __mul__
@_sympifyit('other', NotImplemented)
def __div__(self, other):
if isinstance(other, Number):
if other is S.Infinity or \
other is S.NegativeInfinity or \
other is S.NaN:
return S.NaN
elif other.is_Float:
if other == Float('-inf') or \
other == Float('inf') or \
other is S.NaN:
return S.NaN
elif other.is_nonnegative:
return Float('-inf')
else:
return Float('inf')
else:
if other >= 0:
return S.NegativeInfinity
else:
return S.Infinity
return NotImplemented
__truediv__ = __div__
def __abs__(self):
return S.Infinity
def __neg__(self):
return S.Infinity
def _eval_power(self, expt):
"""
``expt`` is symbolic object but not equal to 0 or 1.
================ ======= ==============================
Expression Result Notes
================ ======= ==============================
``(-oo) ** nan`` ``nan``
``(-oo) ** oo`` ``nan``
``(-oo) ** -oo`` ``nan``
``(-oo) ** e`` ``oo`` ``e`` is positive even integer
``(-oo) ** o`` ``-oo`` ``o`` is positive odd integer
================ ======= ==============================
See Also
========
Infinity
Pow
NaN
"""
if expt.is_number:
if expt is S.NaN or \
expt is S.Infinity or \
expt is S.NegativeInfinity:
return S.NaN
if isinstance(expt, Integer) and expt.is_positive:
if expt.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
return S.NegativeOne**expt*S.Infinity**expt
def _as_mpf_val(self, prec):
return mlib.fninf
def _sage_(self):
import sage.all as sage
return -(sage.oo)
def __hash__(self):
return super(NegativeInfinity, self).__hash__()
def __eq__(self, other):
return other is S.NegativeInfinity
def __ne__(self, other):
return other is not S.NegativeInfinity
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, other))
if other.is_real:
if other.is_finite or other is S.Infinity:
return S.true
elif other.is_nonnegative:
return S.true
elif other.is_infinite and other.is_negative:
return S.false
return Expr.__lt__(self, other)
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s <= %s" % (self, other))
if other.is_real:
return S.true
return Expr.__le__(self, other)
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s > %s" % (self, other))
if other.is_real:
return S.false
return Expr.__gt__(self, other)
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s >= %s" % (self, other))
if other.is_real:
if other.is_finite or other is S.Infinity:
return S.false
elif other.is_nonnegative:
return S.false
elif other.is_infinite and other.is_negative:
return S.true
return Expr.__ge__(self, other)
def __mod__(self, other):
return S.NaN
__rmod__ = __mod__
def floor(self):
return self
def ceiling(self):
return self
class NaN(with_metaclass(Singleton, Number)):
"""
Not a Number.
This serves as a place holder for numeric values that are indeterminate.
Most operations on NaN, produce another NaN. Most indeterminate forms,
such as ``0/0`` or ``oo - oo` produce NaN. Two exceptions are ``0**0``
and ``oo**0``, which all produce ``1`` (this is consistent with Python's
float).
NaN is loosely related to floating point nan, which is defined in the
IEEE 754 floating point standard, and corresponds to the Python
``float('nan')``. Differences are noted below.
NaN is mathematically not equal to anything else, even NaN itself. This
explains the initially counter-intuitive results with ``Eq`` and ``==`` in
the examples below.
NaN is not comparable so inequalities raise a TypeError. This is in
constrast with floating point nan where all inequalities are false.
NaN is a singleton, and can be accessed by ``S.NaN``, or can be imported
as ``nan``.
Examples
========
>>> from sympy import nan, S, oo, Eq
>>> nan is S.NaN
True
>>> oo - oo
nan
>>> nan + 1
nan
>>> Eq(nan, nan) # mathematical equality
False
>>> nan == nan # structural equality
True
References
==========
.. [1] http://en.wikipedia.org/wiki/NaN
"""
is_commutative = True
is_real = None
is_rational = None
is_algebraic = None
is_transcendental = None
is_integer = None
is_comparable = False
is_finite = None
is_zero = None
is_prime = None
is_positive = None
is_negative = None
is_number = True
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
def _latex(self, printer):
return r"\mathrm{NaN}"
@_sympifyit('other', NotImplemented)
def __add__(self, other):
return self
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
return self
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
return self
@_sympifyit('other', NotImplemented)
def __div__(self, other):
return self
__truediv__ = __div__
def floor(self):
return self
def ceiling(self):
return self
def _as_mpf_val(self, prec):
return _mpf_nan
def _sage_(self):
import sage.all as sage
return sage.NaN
def __hash__(self):
return super(NaN, self).__hash__()
def __eq__(self, other):
# NaN is structurally equal to another NaN
return other is S.NaN
def __ne__(self, other):
return other is not S.NaN
def _eval_Eq(self, other):
# NaN is not mathematically equal to anything, even NaN
return S.false
# Expr will _sympify and raise TypeError
__gt__ = Expr.__gt__
__ge__ = Expr.__ge__
__lt__ = Expr.__lt__
__le__ = Expr.__le__
nan = S.NaN
class ComplexInfinity(with_metaclass(Singleton, AtomicExpr)):
r"""Complex infinity.
In complex analysis the symbol `\tilde\infty`, called "complex
infinity", represents a quantity with infinite magnitude, but
undetermined complex phase.
ComplexInfinity is a singleton, and can be accessed by
``S.ComplexInfinity``, or can be imported as ``zoo``.
Examples
========
>>> from sympy import zoo, oo
>>> zoo + 42
zoo
>>> 42/zoo
0
>>> zoo + zoo
nan
>>> zoo*zoo
zoo
See Also
========
Infinity
"""
is_commutative = True
is_infinite = True
is_number = True
is_prime = False
is_complex = True
is_real = False
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
def _latex(self, printer):
return r"\tilde{\infty}"
@staticmethod
def __abs__():
return S.Infinity
def floor(self):
return self
def ceiling(self):
return self
@staticmethod
def __neg__():
return S.ComplexInfinity
def _eval_power(self, expt):
if expt is S.ComplexInfinity:
return S.NaN
if isinstance(expt, Number):
if expt is S.Zero:
return S.NaN
else:
if expt.is_positive:
return S.ComplexInfinity
else:
return S.Zero
def _sage_(self):
import sage.all as sage
return sage.UnsignedInfinityRing.gen()
zoo = S.ComplexInfinity
class NumberSymbol(AtomicExpr):
is_commutative = True
is_finite = True
is_number = True
__slots__ = []
is_NumberSymbol = True
def __new__(cls):
return AtomicExpr.__new__(cls)
def approximation(self, number_cls):
""" Return an interval with number_cls endpoints
that contains the value of NumberSymbol.
If not implemented, then return None.
"""
def _eval_evalf(self, prec):
return Float._new(self._as_mpf_val(prec), prec)
def __eq__(self, other):
try:
other = _sympify(other)
except SympifyError:
return NotImplemented
if self is other:
return True
if other.is_Number and self.is_irrational:
return False
return False # NumberSymbol != non-(Number|self)
def __ne__(self, other):
return not self == other
def __le__(self, other):
if self is other:
return S.true
return Expr.__le__(self, other)
def __ge__(self, other):
if self is other:
return S.true
return Expr.__ge__(self, other)
def __int__(self):
# subclass with appropriate return value
raise NotImplementedError
def __long__(self):
return self.__int__()
def __hash__(self):
return super(NumberSymbol, self).__hash__()
class Exp1(with_metaclass(Singleton, NumberSymbol)):
r"""The `e` constant.
The transcendental number `e = 2.718281828\ldots` is the base of the
natural logarithm and of the exponential function, `e = \exp(1)`.
Sometimes called Euler's number or Napier's constant.
Exp1 is a singleton, and can be accessed by ``S.Exp1``,
or can be imported as ``E``.
Examples
========
>>> from sympy import exp, log, E
>>> E is exp(1)
True
>>> log(E)
1
References
==========
.. [1] http://en.wikipedia.org/wiki/E_%28mathematical_constant%29
"""
is_real = True
is_positive = True
is_negative = False # XXX Forces is_negative/is_nonnegative
is_irrational = True
is_number = True
is_algebraic = False
is_transcendental = True
__slots__ = []
def _latex(self, printer):
return r"e"
@staticmethod
def __abs__():
return S.Exp1
def __int__(self):
return 2
def _as_mpf_val(self, prec):
return mpf_e(prec)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (Integer(2), Integer(3))
elif issubclass(number_cls, Rational):
pass
def _eval_power(self, expt):
from sympy import exp
return exp(expt)
def _eval_rewrite_as_sin(self):
from sympy import sin
I = S.ImaginaryUnit
return sin(I + S.Pi/2) - I*sin(I)
def _eval_rewrite_as_cos(self):
from sympy import cos
I = S.ImaginaryUnit
return cos(I) + I*cos(I + S.Pi/2)
def _sage_(self):
import sage.all as sage
return sage.e
E = S.Exp1
class Pi(with_metaclass(Singleton, NumberSymbol)):
r"""The `\pi` constant.
The transcendental number `\pi = 3.141592654\ldots` represents the ratio
of a circle's circumference to its diameter, the area of the unit circle,
the half-period of trigonometric functions, and many other things
in mathematics.
Pi is a singleton, and can be accessed by ``S.Pi``, or can
be imported as ``pi``.
Examples
========
>>> from sympy import S, pi, oo, sin, exp, integrate, Symbol
>>> S.Pi
pi
>>> pi > 3
True
>>> pi.is_irrational
True
>>> x = Symbol('x')
>>> sin(x + 2*pi)
sin(x)
>>> integrate(exp(-x**2), (x, -oo, oo))
sqrt(pi)
References
==========
.. [1] http://en.wikipedia.org/wiki/Pi
"""
is_real = True
is_positive = True
is_negative = False
is_irrational = True
is_number = True
is_algebraic = False
is_transcendental = True
__slots__ = []
def _latex(self, printer):
return r"\pi"
@staticmethod
def __abs__():
return S.Pi
def __int__(self):
return 3
def _as_mpf_val(self, prec):
return mpf_pi(prec)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (Integer(3), Integer(4))
elif issubclass(number_cls, Rational):
return (Rational(223, 71), Rational(22, 7))
def _sage_(self):
import sage.all as sage
return sage.pi
pi = S.Pi
class GoldenRatio(with_metaclass(Singleton, NumberSymbol)):
r"""The golden ratio, `\phi`.
`\phi = \frac{1 + \sqrt{5}}{2}` is algebraic number. Two quantities
are in the golden ratio if their ratio is the same as the ratio of
their sum to the larger of the two quantities, i.e. their maximum.
GoldenRatio is a singleton, and can be accessed by ``S.GoldenRatio``.
Examples
========
>>> from sympy import S
>>> S.GoldenRatio > 1
True
>>> S.GoldenRatio.expand(func=True)
1/2 + sqrt(5)/2
>>> S.GoldenRatio.is_irrational
True
References
==========
.. [1] http://en.wikipedia.org/wiki/Golden_ratio
"""
is_real = True
is_positive = True
is_negative = False
is_irrational = True
is_number = True
is_algebraic = True
is_transcendental = False
__slots__ = []
def _latex(self, printer):
return r"\phi"
def __int__(self):
return 1
def _as_mpf_val(self, prec):
# XXX track down why this has to be increased
rv = mlib.from_man_exp(phi_fixed(prec + 10), -prec - 10)
return mpf_norm(rv, prec)
def _eval_expand_func(self, **hints):
from sympy import sqrt
return S.Half + S.Half*sqrt(5)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (S.One, Rational(2))
elif issubclass(number_cls, Rational):
pass
def _sage_(self):
import sage.all as sage
return sage.golden_ratio
_eval_rewrite_as_sqrt = _eval_expand_func
class EulerGamma(with_metaclass(Singleton, NumberSymbol)):
r"""The Euler-Mascheroni constant.
`\gamma = 0.5772157\ldots` (also called Euler's constant) is a mathematical
constant recurring in analysis and number theory. It is defined as the
limiting difference between the harmonic series and the
natural logarithm:
.. math:: \gamma = \lim\limits_{n\to\infty}
\left(\sum\limits_{k=1}^n\frac{1}{k} - \ln n\right)
EulerGamma is a singleton, and can be accessed by ``S.EulerGamma``.
Examples
========
>>> from sympy import S
>>> S.EulerGamma.is_irrational
>>> S.EulerGamma > 0
True
>>> S.EulerGamma > 1
False
References
==========
.. [1] http://en.wikipedia.org/wiki/Euler%E2%80%93Mascheroni_constant
"""
is_real = True
is_positive = True
is_negative = False
is_irrational = None
is_number = True
__slots__ = []
def _latex(self, printer):
return r"\gamma"
def __int__(self):
return 0
def _as_mpf_val(self, prec):
# XXX track down why this has to be increased
v = mlib.libhyper.euler_fixed(prec + 10)
rv = mlib.from_man_exp(v, -prec - 10)
return mpf_norm(rv, prec)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (S.Zero, S.One)
elif issubclass(number_cls, Rational):
return (S.Half, Rational(3, 5))
def _sage_(self):
import sage.all as sage
return sage.euler_gamma
class Catalan(with_metaclass(Singleton, NumberSymbol)):
r"""Catalan's constant.
`K = 0.91596559\ldots` is given by the infinite series
.. math:: K = \sum_{k=0}^{\infty} \frac{(-1)^k}{(2k+1)^2}
Catalan is a singleton, and can be accessed by ``S.Catalan``.
Examples
========
>>> from sympy import S
>>> S.Catalan.is_irrational
>>> S.Catalan > 0
True
>>> S.Catalan > 1
False
References
==========
.. [1] http://en.wikipedia.org/wiki/Catalan%27s_constant
"""
is_real = True
is_positive = True
is_negative = False
is_irrational = None
is_number = True
__slots__ = []
def __int__(self):
return 0
def _as_mpf_val(self, prec):
# XXX track down why this has to be increased
v = mlib.catalan_fixed(prec + 10)
rv = mlib.from_man_exp(v, -prec - 10)
return mpf_norm(rv, prec)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (S.Zero, S.One)
elif issubclass(number_cls, Rational):
return (Rational(9, 10), S.One)
def _sage_(self):
import sage.all as sage
return sage.catalan
class ImaginaryUnit(with_metaclass(Singleton, AtomicExpr)):
r"""The imaginary unit, `i = \sqrt{-1}`.
I is a singleton, and can be accessed by ``S.I``, or can be
imported as ``I``.
Examples
========
>>> from sympy import I, sqrt
>>> sqrt(-1)
I
>>> I*I
-1
>>> 1/I
-I
References
==========
.. [1] http://en.wikipedia.org/wiki/Imaginary_unit
"""
is_commutative = True
is_imaginary = True
is_finite = True
is_number = True
is_algebraic = True
is_transcendental = False
__slots__ = []
def _latex(self, printer):
return r"i"
@staticmethod
def __abs__():
return S.One
def _eval_evalf(self, prec):
return self
def _eval_conjugate(self):
return -S.ImaginaryUnit
def _eval_power(self, expt):
"""
b is I = sqrt(-1)
e is symbolic object but not equal to 0, 1
I**r -> (-1)**(r/2) -> exp(r/2*Pi*I) -> sin(Pi*r/2) + cos(Pi*r/2)*I, r is decimal
I**0 mod 4 -> 1
I**1 mod 4 -> I
I**2 mod 4 -> -1
I**3 mod 4 -> -I
"""
if isinstance(expt, Number):
if isinstance(expt, Integer):
expt = expt.p % 4
if expt == 0:
return S.One
if expt == 1:
return S.ImaginaryUnit
if expt == 2:
return -S.One
return -S.ImaginaryUnit
return (S.NegativeOne)**(expt*S.Half)
return
def as_base_exp(self):
return S.NegativeOne, S.Half
def _sage_(self):
import sage.all as sage
return sage.I
@property
def _mpc_(self):
return (Float(0)._mpf_, Float(1)._mpf_)
I = S.ImaginaryUnit
def sympify_fractions(f):
return Rational(f.numerator, f.denominator)
converter[fractions.Fraction] = sympify_fractions
try:
if HAS_GMPY == 2:
import gmpy2 as gmpy
elif HAS_GMPY == 1:
import gmpy
else:
raise ImportError
def sympify_mpz(x):
return Integer(long(x))
def sympify_mpq(x):
return Rational(long(x.numerator), long(x.denominator))
converter[type(gmpy.mpz(1))] = sympify_mpz
converter[type(gmpy.mpq(1, 2))] = sympify_mpq
except ImportError:
pass
def sympify_mpmath(x):
return Expr._from_mpmath(x, x.context.prec)
converter[mpnumeric] = sympify_mpmath
def sympify_complex(a):
real, imag = list(map(sympify, (a.real, a.imag)))
return real + S.ImaginaryUnit*imag
converter[complex] = sympify_complex
_intcache[0] = S.Zero
_intcache[1] = S.One
_intcache[-1] = S.NegativeOne
from .power import Pow, integer_nthroot
from .mul import Mul
Mul.identity = One()
from .add import Add
Add.identity = Zero()
|
[] |
[] |
[
"SYMPY_TRACE_INT"
] |
[]
|
["SYMPY_TRACE_INT"]
|
python
| 1 | 0 | |
django/test/testcases.py
|
from __future__ import unicode_literals
import difflib
import errno
import json
import os
import posixpath
import socket
import sys
import threading
import unittest
import warnings
from collections import Counter
from contextlib import contextmanager
from copy import copy
from functools import wraps
from unittest.util import safe_repr
from django.apps import apps
from django.conf import settings
from django.core import mail
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.files import locks
from django.core.handlers.wsgi import WSGIHandler, get_path_info
from django.core.management import call_command
from django.core.management.color import no_style
from django.core.management.sql import emit_post_migrate_signal
from django.core.servers.basehttp import WSGIRequestHandler, WSGIServer
from django.db import DEFAULT_DB_ALIAS, connection, connections, transaction
from django.forms.fields import CharField
from django.http import QueryDict
from django.test.client import Client
from django.test.html import HTMLParseError, parse_html
from django.test.signals import setting_changed, template_rendered
from django.test.utils import (
CaptureQueriesContext, ContextList, compare_xml, modify_settings,
override_settings,
)
from django.utils import six
from django.utils.decorators import classproperty
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import (
unquote, urljoin, urlparse, urlsplit, urlunsplit,
)
from django.utils.six.moves.urllib.request import url2pathname
from django.views.static import serve
__all__ = ('TestCase', 'TransactionTestCase',
'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature')
def to_list(value):
"""
Puts value into a list if it's not already one.
Returns an empty list if value is None.
"""
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
return value
def assert_and_parse_html(self, html, user_msg, msg):
try:
dom = parse_html(html)
except HTMLParseError as e:
standardMsg = '%s\n%s' % (msg, e.msg)
self.fail(self._formatMessage(user_msg, standardMsg))
return dom
class _AssertNumQueriesContext(CaptureQueriesContext):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
super(_AssertNumQueriesContext, self).__init__(connection)
def __exit__(self, exc_type, exc_value, traceback):
super(_AssertNumQueriesContext, self).__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
executed = len(self)
self.test_case.assertEqual(
executed, self.num,
"%d queries executed, %d expected\nCaptured queries were:\n%s" % (
executed, self.num,
'\n'.join(
query['sql'] for query in self.captured_queries
)
)
)
class _AssertTemplateUsedContext(object):
def __init__(self, test_case, template_name):
self.test_case = test_case
self.template_name = template_name
self.rendered_templates = []
self.rendered_template_names = []
self.context = ContextList()
def on_template_render(self, sender, signal, template, context, **kwargs):
self.rendered_templates.append(template)
self.rendered_template_names.append(template.name)
self.context.append(copy(context))
def test(self):
return self.template_name in self.rendered_template_names
def message(self):
return '%s was not rendered.' % self.template_name
def __enter__(self):
template_rendered.connect(self.on_template_render)
return self
def __exit__(self, exc_type, exc_value, traceback):
template_rendered.disconnect(self.on_template_render)
if exc_type is not None:
return
if not self.test():
message = self.message()
if len(self.rendered_templates) == 0:
message += ' No template was rendered.'
else:
message += ' Following templates were rendered: %s' % (
', '.join(self.rendered_template_names))
self.test_case.fail(message)
class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext):
def test(self):
return self.template_name not in self.rendered_template_names
def message(self):
return '%s was rendered.' % self.template_name
class _CursorFailure(object):
def __init__(self, cls_name, wrapped):
self.cls_name = cls_name
self.wrapped = wrapped
def __call__(self):
raise AssertionError(
"Database queries aren't allowed in SimpleTestCase. "
"Either use TestCase or TransactionTestCase to ensure proper test isolation or "
"set %s.allow_database_queries to True to silence this failure." % self.cls_name
)
class SimpleTestCase(unittest.TestCase):
# The class we'll use for the test client self.client.
# Can be overridden in derived classes.
client_class = Client
_overridden_settings = None
_modified_settings = None
# Tests shouldn't be allowed to query the database since
# this base class doesn't enforce any isolation.
allow_database_queries = False
@classmethod
def setUpClass(cls):
super(SimpleTestCase, cls).setUpClass()
if cls._overridden_settings:
cls._cls_overridden_context = override_settings(**cls._overridden_settings)
cls._cls_overridden_context.enable()
if cls._modified_settings:
cls._cls_modified_context = modify_settings(cls._modified_settings)
cls._cls_modified_context.enable()
if not cls.allow_database_queries:
for alias in connections:
connection = connections[alias]
connection.cursor = _CursorFailure(cls.__name__, connection.cursor)
@classmethod
def tearDownClass(cls):
if not cls.allow_database_queries:
for alias in connections:
connection = connections[alias]
connection.cursor = connection.cursor.wrapped
if hasattr(cls, '_cls_modified_context'):
cls._cls_modified_context.disable()
delattr(cls, '_cls_modified_context')
if hasattr(cls, '_cls_overridden_context'):
cls._cls_overridden_context.disable()
delattr(cls, '_cls_overridden_context')
super(SimpleTestCase, cls).tearDownClass()
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
testMethod = getattr(self, self._testMethodName)
skipped = (
getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)
)
if not skipped:
try:
self._pre_setup()
except Exception:
result.addError(self, sys.exc_info())
return
super(SimpleTestCase, self).__call__(result)
if not skipped:
try:
self._post_teardown()
except Exception:
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* Creating a test client.
* Clearing the mail test outbox.
"""
self.client = self.client_class()
mail.outbox = []
def _post_teardown(self):
"""Perform any post-test things."""
pass
def settings(self, **kwargs):
"""
A context manager that temporarily sets a setting and reverts to the original value when exiting the context.
"""
return override_settings(**kwargs)
def modify_settings(self, **kwargs):
"""
A context manager that temporarily applies changes a list setting and
reverts back to the original value when exiting the context.
"""
return modify_settings(**kwargs)
def assertRedirects(self, response, expected_url, status_code=302,
target_status_code=200, host=None, msg_prefix='',
fetch_redirect_response=True):
"""Asserts that a response redirected to a specific URL, and that the
redirect URL can be loaded.
Note that assertRedirects won't work for external links since it uses
TestClient to do a request (use fetch_redirect_response=False to check
such links without fetching them).
"""
if host is not None:
warnings.warn(
"The host argument is deprecated and no longer used by assertRedirects",
RemovedInDjango20Warning, stacklevel=2
)
if msg_prefix:
msg_prefix += ": "
if hasattr(response, 'redirect_chain'):
# The request was a followed redirect
self.assertTrue(
len(response.redirect_chain) > 0,
msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)"
% (response.status_code, status_code)
)
self.assertEqual(
response.redirect_chain[0][1], status_code,
msg_prefix + "Initial response didn't redirect as expected: Response code was %d (expected %d)"
% (response.redirect_chain[0][1], status_code)
)
url, status_code = response.redirect_chain[-1]
scheme, netloc, path, query, fragment = urlsplit(url)
self.assertEqual(
response.status_code, target_status_code,
msg_prefix + "Response didn't redirect as expected: Final Response code was %d (expected %d)"
% (response.status_code, target_status_code)
)
else:
# Not a followed redirect
self.assertEqual(
response.status_code, status_code,
msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)"
% (response.status_code, status_code)
)
url = response.url
scheme, netloc, path, query, fragment = urlsplit(url)
# Prepend the request path to handle relative path redirects.
if not path.startswith('/'):
url = urljoin(response.request['PATH_INFO'], url)
path = urljoin(response.request['PATH_INFO'], path)
if fetch_redirect_response:
if netloc:
raise ValueError(
"The Django test client is unable to fetch remote URLs (got %s). "
"Use assertRedirects(..., fetch_redirect_response=False) instead." % url
)
redirect_response = response.client.get(path, QueryDict(query), secure=(scheme == 'https'))
# Get the redirection page, using the same client that was used
# to obtain the original response.
self.assertEqual(
redirect_response.status_code, target_status_code,
msg_prefix + "Couldn't retrieve redirection page '%s': response code was %d (expected %d)"
% (path, redirect_response.status_code, target_status_code)
)
if url != expected_url:
# For temporary backwards compatibility, try to compare with a relative url
e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(expected_url)
relative_url = urlunsplit(('', '', e_path, e_query, e_fragment))
if url == relative_url:
warnings.warn(
"assertRedirects had to strip the scheme and domain from the "
"expected URL, as it was always added automatically to URLs "
"before Django 1.9. Please update your expected URLs by "
"removing the scheme and domain.",
RemovedInDjango20Warning, stacklevel=2)
expected_url = relative_url
self.assertEqual(
url, expected_url,
msg_prefix + "Response redirected to '%s', expected '%s'" % (url, expected_url)
)
def _assert_contains(self, response, text, status_code, msg_prefix, html):
# If the response supports deferred rendering and hasn't been rendered
# yet, then ensure that it does get rendered before proceeding further.
if hasattr(response, 'render') and callable(response.render) and not response.is_rendered:
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(
response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code)
)
if response.streaming:
content = b''.join(response.streaming_content)
else:
content = response.content
if not isinstance(text, bytes) or html:
text = force_text(text, encoding=response.charset)
content = content.decode(response.charset)
text_repr = "'%s'" % text
else:
text_repr = repr(text)
if html:
content = assert_and_parse_html(self, content, None, "Response's content is not valid HTML:")
text = assert_and_parse_html(self, text, None, "Second argument is not valid HTML:")
real_count = content.count(text)
return (text_repr, real_count, msg_prefix)
def assertContains(self, response, text, count=None, status_code=200, msg_prefix='', html=False):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` occurs ``count`` times in the content of the response.
If ``count`` is None, the count doesn't matter - the assertion is true
if the text occurs at least once in the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
if count is not None:
self.assertEqual(
real_count, count,
msg_prefix + "Found %d instances of %s in response (expected %d)" % (real_count, text_repr, count)
)
else:
self.assertTrue(real_count != 0, msg_prefix + "Couldn't find %s in response" % text_repr)
def assertNotContains(self, response, text, status_code=200, msg_prefix='', html=False):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` doesn't occurs in the content of the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
self.assertEqual(real_count, 0, msg_prefix + "Response should not contain %s" % text_repr)
def assertFormError(self, response, form, field, errors, msg_prefix=''):
"""
Asserts that a form used to render the response has a specific field
error.
"""
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + "Response did not use any contexts to render the response")
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_form = False
for i, context in enumerate(contexts):
if form not in context:
continue
found_form = True
for err in errors:
if field:
if field in context[form].errors:
field_errors = context[form].errors[field]
self.assertTrue(
err in field_errors,
msg_prefix + "The field '%s' on form '%s' in"
" context %d does not contain the error '%s'"
" (actual errors: %s)" %
(field, form, i, err, repr(field_errors))
)
elif field in context[form].fields:
self.fail(
msg_prefix + "The field '%s' on form '%s' in context %d contains no errors" %
(field, form, i)
)
else:
self.fail(
msg_prefix + "The form '%s' in context %d does not contain the field '%s'" %
(form, i, field)
)
else:
non_field_errors = context[form].non_field_errors()
self.assertTrue(
err in non_field_errors,
msg_prefix + "The form '%s' in context %d does not"
" contain the non-field error '%s'"
" (actual errors: %s)" %
(form, i, err, non_field_errors)
)
if not found_form:
self.fail(msg_prefix + "The form '%s' was not used to render the response" % form)
def assertFormsetError(self, response, formset, form_index, field, errors,
msg_prefix=''):
"""
Asserts that a formset used to render the response has a specific error.
For field errors, specify the ``form_index`` and the ``field``.
For non-field errors, specify the ``form_index`` and the ``field`` as
None.
For non-form errors, specify ``form_index`` as None and the ``field``
as None.
"""
# Add punctuation to msg_prefix
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + 'Response did not use any contexts to '
'render the response')
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_formset = False
for i, context in enumerate(contexts):
if formset not in context:
continue
found_formset = True
for err in errors:
if field is not None:
if field in context[formset].forms[form_index].errors:
field_errors = context[formset].forms[form_index].errors[field]
self.assertTrue(
err in field_errors,
msg_prefix + "The field '%s' on formset '%s', "
"form %d in context %d does not contain the "
"error '%s' (actual errors: %s)" %
(field, formset, form_index, i, err, repr(field_errors))
)
elif field in context[formset].forms[form_index].fields:
self.fail(
msg_prefix + "The field '%s' on formset '%s', form %d in context %d contains no errors"
% (field, formset, form_index, i)
)
else:
self.fail(
msg_prefix + "The formset '%s', form %d in context %d does not contain the field '%s'"
% (formset, form_index, i, field)
)
elif form_index is not None:
non_field_errors = context[formset].forms[form_index].non_field_errors()
self.assertFalse(
len(non_field_errors) == 0,
msg_prefix + "The formset '%s', form %d in context %d "
"does not contain any non-field errors." % (formset, form_index, i)
)
self.assertTrue(
err in non_field_errors,
msg_prefix + "The formset '%s', form %d in context %d "
"does not contain the non-field error '%s' (actual errors: %s)"
% (formset, form_index, i, err, repr(non_field_errors))
)
else:
non_form_errors = context[formset].non_form_errors()
self.assertFalse(
len(non_form_errors) == 0,
msg_prefix + "The formset '%s' in context %d does not "
"contain any non-form errors." % (formset, i)
)
self.assertTrue(
err in non_form_errors,
msg_prefix + "The formset '%s' in context %d does not "
"contain the non-form error '%s' (actual errors: %s)"
% (formset, i, err, repr(non_form_errors))
)
if not found_formset:
self.fail(msg_prefix + "The formset '%s' was not used to render the response" % formset)
def _assert_template_used(self, response, template_name, msg_prefix):
if response is None and template_name is None:
raise TypeError('response and/or template_name argument must be provided')
if msg_prefix:
msg_prefix += ": "
if template_name is not None and response is not None and not hasattr(response, 'templates'):
raise ValueError(
"assertTemplateUsed() and assertTemplateNotUsed() are only "
"usable on responses fetched using the Django test Client."
)
if not hasattr(response, 'templates') or (response is None and template_name):
if response:
template_name = response
response = None
# use this template with context manager
return template_name, None, msg_prefix
template_names = [t.name for t in response.templates if t.name is not None]
return None, template_names, msg_prefix
def assertTemplateUsed(self, response=None, template_name=None, msg_prefix='', count=None):
"""
Asserts that the template with the provided name was used in rendering
the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix)
if context_mgr_template:
# Use assertTemplateUsed as context manager.
return _AssertTemplateUsedContext(self, context_mgr_template)
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(
template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s"
% (template_name, ', '.join(template_names))
)
if count is not None:
self.assertEqual(
template_names.count(template_name), count,
msg_prefix + "Template '%s' was expected to be rendered %d "
"time(s) but was actually rendered %d time(s)."
% (template_name, count, template_names.count(template_name))
)
def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''):
"""
Asserts that the template with the provided name was NOT used in
rendering the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix
)
if context_mgr_template:
# Use assertTemplateNotUsed as context manager.
return _AssertTemplateNotUsedContext(self, context_mgr_template)
self.assertFalse(
template_name in template_names,
msg_prefix + "Template '%s' was used unexpectedly in rendering the response" % template_name
)
@contextmanager
def _assert_raises_message_cm(self, expected_exception, expected_message):
with self.assertRaises(expected_exception) as cm:
yield cm
self.assertIn(expected_message, str(cm.exception))
def assertRaisesMessage(self, expected_exception, expected_message, *args, **kwargs):
"""
Asserts that expected_message is found in the the message of a raised
exception.
Args:
expected_exception: Exception class expected to be raised.
expected_message: expected error message string value.
args: Function to be called and extra positional args.
kwargs: Extra kwargs.
"""
# callable_obj was a documented kwarg in Django 1.8 and older.
callable_obj = kwargs.pop('callable_obj', None)
if callable_obj:
warnings.warn(
'The callable_obj kwarg is deprecated. Pass the callable '
'as a positional argument instead.', RemovedInDjango20Warning
)
elif len(args):
callable_obj = args[0]
args = args[1:]
cm = self._assert_raises_message_cm(expected_exception, expected_message)
# Assertion used in context manager fashion.
if callable_obj is None:
return cm
# Assertion was passed a callable.
with cm:
callable_obj(*args, **kwargs)
def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None,
field_kwargs=None, empty_value=''):
"""
Asserts that a form field behaves correctly with various inputs.
Args:
fieldclass: the class of the field to be tested.
valid: a dictionary mapping valid inputs to their expected
cleaned values.
invalid: a dictionary mapping invalid inputs to one or more
raised error messages.
field_args: the args passed to instantiate the field
field_kwargs: the kwargs passed to instantiate the field
empty_value: the expected clean output for inputs in empty_values
"""
if field_args is None:
field_args = []
if field_kwargs is None:
field_kwargs = {}
required = fieldclass(*field_args, **field_kwargs)
optional = fieldclass(*field_args, **dict(field_kwargs, required=False))
# test valid inputs
for input, output in valid.items():
self.assertEqual(required.clean(input), output)
self.assertEqual(optional.clean(input), output)
# test invalid inputs
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
required.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
with self.assertRaises(ValidationError) as context_manager:
optional.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
# test required inputs
error_required = [force_text(required.error_messages['required'])]
for e in required.empty_values:
with self.assertRaises(ValidationError) as context_manager:
required.clean(e)
self.assertEqual(context_manager.exception.messages, error_required)
self.assertEqual(optional.clean(e), empty_value)
# test that max_length and min_length are always accepted
if issubclass(fieldclass, CharField):
field_kwargs.update({'min_length': 2, 'max_length': 20})
self.assertIsInstance(fieldclass(*field_args, **field_kwargs), fieldclass)
def assertHTMLEqual(self, html1, html2, msg=None):
"""
Asserts that two HTML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid HTML.
"""
dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:')
if dom1 != dom2:
standardMsg = '%s != %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
six.text_type(dom1).splitlines(),
six.text_type(dom2).splitlines(),
)))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertHTMLNotEqual(self, html1, html2, msg=None):
"""Asserts that two HTML snippets are not semantically equivalent."""
dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:')
if dom1 == dom2:
standardMsg = '%s == %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertInHTML(self, needle, haystack, count=None, msg_prefix=''):
needle = assert_and_parse_html(self, needle, None, 'First argument is not valid HTML:')
haystack = assert_and_parse_html(self, haystack, None, 'Second argument is not valid HTML:')
real_count = haystack.count(needle)
if count is not None:
self.assertEqual(
real_count, count,
msg_prefix + "Found %d instances of '%s' in response (expected %d)" % (real_count, needle, count)
)
else:
self.assertTrue(real_count != 0, msg_prefix + "Couldn't find '%s' in response" % needle)
def assertJSONEqual(self, raw, expected_data, msg=None):
"""
Asserts that the JSON fragments raw and expected_data are equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except ValueError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, six.string_types):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertEqual(data, expected_data, msg=msg)
def assertJSONNotEqual(self, raw, expected_data, msg=None):
"""
Asserts that the JSON fragments raw and expected_data are not equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except ValueError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, six.string_types):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertNotEqual(data, expected_data, msg=msg)
def assertXMLEqual(self, xml1, xml2, msg=None):
"""
Asserts that two XML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if not result:
standardMsg = '%s != %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
diff = ('\n' + '\n'.join(
difflib.ndiff(
six.text_type(xml1).splitlines(),
six.text_type(xml2).splitlines(),
)
))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertXMLNotEqual(self, xml1, xml2, msg=None):
"""
Asserts that two XML snippets are not semantically equivalent.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if result:
standardMsg = '%s == %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
self.fail(self._formatMessage(msg, standardMsg))
class TransactionTestCase(SimpleTestCase):
# Subclasses can ask for resetting of auto increment sequence before each
# test case
reset_sequences = False
# Subclasses can enable only a subset of apps for faster tests
available_apps = None
# Subclasses can define fixtures which will be automatically installed.
fixtures = None
# If transactions aren't available, Django will serialize the database
# contents into a fixture during setup and flush and reload them
# during teardown (as flush does not restore data from migrations).
# This can be slow; this flag allows enabling on a per-case basis.
serialized_rollback = False
# Since tests will be wrapped in a transaction, or serialized if they
# are not available, we allow queries to be run.
allow_database_queries = True
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* If the class has an 'available_apps' attribute, restricting the app
registry to these applications, then firing post_migrate -- it must
run with the correct set of applications for the test case.
* If the class has a 'fixtures' attribute, installing these fixtures.
"""
super(TransactionTestCase, self)._pre_setup()
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
setting_changed.send(
sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=self.available_apps,
enter=True,
)
for db_name in self._databases_names(include_mirrors=False):
emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name)
try:
self._fixture_setup()
except Exception:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(
sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False,
)
raise
@classmethod
def _databases_names(cls, include_mirrors=True):
# If the test case has a multi_db=True flag, act on all databases,
# including mirrors or not. Otherwise, just on the default DB.
if getattr(cls, 'multi_db', False):
return [
alias for alias in connections
if include_mirrors or not connections[alias].settings_dict['TEST']['MIRROR']
]
else:
return [DEFAULT_DB_ALIAS]
def _reset_sequences(self, db_name):
conn = connections[db_name]
if conn.features.supports_sequence_reset:
sql_list = conn.ops.sequence_reset_by_name_sql(
no_style(), conn.introspection.sequence_list())
if sql_list:
with transaction.atomic(using=db_name):
cursor = conn.cursor()
for sql in sql_list:
cursor.execute(sql)
def _fixture_setup(self):
for db_name in self._databases_names(include_mirrors=False):
# Reset sequences
if self.reset_sequences:
self._reset_sequences(db_name)
# If we need to provide replica initial data from migrated apps,
# then do so.
if self.serialized_rollback and hasattr(connections[db_name], "_test_serialized_contents"):
if self.available_apps is not None:
apps.unset_available_apps()
connections[db_name].creation.deserialize_db_from_string(
connections[db_name]._test_serialized_contents
)
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
if self.fixtures:
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures,
**{'verbosity': 0, 'database': db_name})
def _should_reload_connections(self):
return True
def _post_teardown(self):
"""Performs any post-test things. This includes:
* Flushing the contents of the database, to leave a clean slate. If
the class has an 'available_apps' attribute, post_migrate isn't fired.
* Force-closing the connection, so the next test gets a clean cursor.
"""
try:
self._fixture_teardown()
super(TransactionTestCase, self)._post_teardown()
if self._should_reload_connections():
# Some DB cursors include SQL statements as part of cursor
# creation. If you have a test that does a rollback, the effect
# of these statements is lost, which can affect the operation of
# tests (e.g., losing a timezone setting causing objects to be
# created with the wrong time). To make sure this doesn't
# happen, get a clean connection at the start of every test.
for conn in connections.all():
conn.close()
finally:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False)
def _fixture_teardown(self):
# Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal
# when flushing only a subset of the apps
for db_name in self._databases_names(include_mirrors=False):
# Flush the database
inhibit_post_migrate = (
self.available_apps is not None or
( # Inhibit the post_migrate signal when using serialized
# rollback to avoid trying to recreate the serialized data.
self.serialized_rollback and
hasattr(connections[db_name], '_test_serialized_contents')
)
)
call_command('flush', verbosity=0, interactive=False,
database=db_name, reset_sequences=False,
allow_cascade=self.available_apps is not None,
inhibit_post_migrate=inhibit_post_migrate)
def assertQuerysetEqual(self, qs, values, transform=repr, ordered=True, msg=None):
items = six.moves.map(transform, qs)
if not ordered:
return self.assertEqual(Counter(items), Counter(values), msg=msg)
values = list(values)
# For example qs.iterator() could be passed as qs, but it does not
# have 'ordered' attribute.
if len(values) > 1 and hasattr(qs, 'ordered') and not qs.ordered:
raise ValueError("Trying to compare non-ordered queryset "
"against more than one ordered values")
return self.assertEqual(list(items), values, msg=msg)
def assertNumQueries(self, num, func=None, *args, **kwargs):
using = kwargs.pop("using", DEFAULT_DB_ALIAS)
conn = connections[using]
context = _AssertNumQueriesContext(self, num, conn)
if func is None:
return context
with context:
func(*args, **kwargs)
def connections_support_transactions():
"""
Returns True if all connections support transactions.
"""
return all(conn.features.supports_transactions
for conn in connections.all())
class TestCase(TransactionTestCase):
"""
Similar to TransactionTestCase, but uses `transaction.atomic()` to achieve
test isolation.
In most situations, TestCase should be preferred to TransactionTestCase as
it allows faster execution. However, there are some situations where using
TransactionTestCase might be necessary (e.g. testing some transactional
behavior).
On database backends with no transaction support, TestCase behaves as
TransactionTestCase.
"""
@classmethod
def _enter_atomics(cls):
"""Helper method to open atomic blocks for multiple databases"""
atomics = {}
for db_name in cls._databases_names():
atomics[db_name] = transaction.atomic(using=db_name)
atomics[db_name].__enter__()
return atomics
@classmethod
def _rollback_atomics(cls, atomics):
"""Rollback atomic blocks opened through the previous method"""
for db_name in reversed(cls._databases_names()):
transaction.set_rollback(True, using=db_name)
atomics[db_name].__exit__(None, None, None)
@classmethod
def setUpClass(cls):
super(TestCase, cls).setUpClass()
if not connections_support_transactions():
return
cls.cls_atomics = cls._enter_atomics()
if cls.fixtures:
for db_name in cls._databases_names(include_mirrors=False):
try:
call_command('loaddata', *cls.fixtures, **{
'verbosity': 0,
'commit': False,
'database': db_name,
})
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
try:
cls.setUpTestData()
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
@classmethod
def tearDownClass(cls):
if connections_support_transactions():
cls._rollback_atomics(cls.cls_atomics)
for conn in connections.all():
conn.close()
super(TestCase, cls).tearDownClass()
@classmethod
def setUpTestData(cls):
"""Load initial data for the TestCase"""
pass
def _should_reload_connections(self):
if connections_support_transactions():
return False
return super(TestCase, self)._should_reload_connections()
def _fixture_setup(self):
if not connections_support_transactions():
# If the backend does not support transactions, we should reload
# class data before each test
self.setUpTestData()
return super(TestCase, self)._fixture_setup()
assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances'
self.atomics = self._enter_atomics()
def _fixture_teardown(self):
if not connections_support_transactions():
return super(TestCase, self)._fixture_teardown()
try:
for db_name in reversed(self._databases_names()):
if self._should_check_constraints(connections[db_name]):
connections[db_name].check_constraints()
finally:
self._rollback_atomics(self.atomics)
def _should_check_constraints(self, connection):
return (
connection.features.can_defer_constraint_checks and
not connection.needs_rollback and connection.is_usable()
)
class CheckCondition(object):
"""Descriptor class for deferred condition checking"""
def __init__(self, cond_func):
self.cond_func = cond_func
def __get__(self, instance, cls=None):
return self.cond_func()
def _deferredSkip(condition, reason):
def decorator(test_func):
if not (isinstance(test_func, type) and
issubclass(test_func, unittest.TestCase)):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if condition():
raise unittest.SkipTest(reason)
return test_func(*args, **kwargs)
test_item = skip_wrapper
else:
# Assume a class is decorated
test_item = test_func
test_item.__unittest_skip__ = CheckCondition(condition)
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIfDBFeature(*features):
"""
Skip a test if a database has at least one of the named features.
"""
return _deferredSkip(
lambda: any(getattr(connection.features, feature, False) for feature in features),
"Database has feature(s) %s" % ", ".join(features)
)
def skipUnlessDBFeature(*features):
"""
Skip a test unless a database has all the named features.
"""
return _deferredSkip(
lambda: not all(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support feature(s): %s" % ", ".join(features)
)
def skipUnlessAnyDBFeature(*features):
"""
Skip a test unless a database has any of the named features.
"""
return _deferredSkip(
lambda: not any(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support any of the feature(s): %s" % ", ".join(features)
)
class QuietWSGIRequestHandler(WSGIRequestHandler):
"""
Just a regular WSGIRequestHandler except it doesn't log to the standard
output any of the requests received, so as to not clutter the output for
the tests' results.
"""
def log_message(*args):
pass
class FSFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to a directory, as defined by one of
the *_ROOT settings, and serves those files, publishing them under *_URL.
"""
def __init__(self, application):
self.application = application
self.base_url = urlparse(self.get_base_url())
super(FSFilesHandler, self).__init__()
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""
Returns the relative path to the file on disk for the given URL.
"""
relative_url = url[len(self.base_url[2]):]
return url2pathname(relative_url)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404:
pass
return super(FSFilesHandler, self).get_response(request)
def serve(self, request):
os_rel_path = self.file_path(request.path)
os_rel_path = posixpath.normpath(unquote(os_rel_path))
# Emulate behavior of django.contrib.staticfiles.views.serve() when it
# invokes staticfiles' finders functionality.
# TODO: Modify if/when that internal API is refactored
final_rel_path = os_rel_path.replace('\\', '/').lstrip('/')
return serve(request, final_rel_path, document_root=self.get_base_dir())
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super(FSFilesHandler, self).__call__(environ, start_response)
class _StaticFilesHandler(FSFilesHandler):
"""
Handler for serving static files. A private class that is meant to be used
solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
return settings.STATIC_URL
class _MediaFilesHandler(FSFilesHandler):
"""
Handler for serving the media files. A private class that is meant to be
used solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.MEDIA_ROOT
def get_base_url(self):
return settings.MEDIA_URL
class LiveServerThread(threading.Thread):
"""
Thread for running a live http server while the tests are running.
"""
def __init__(self, host, possible_ports, static_handler, connections_override=None):
self.host = host
self.port = None
self.possible_ports = possible_ports
self.is_ready = threading.Event()
self.error = None
self.static_handler = static_handler
self.connections_override = connections_override
super(LiveServerThread, self).__init__()
def run(self):
"""
Sets up the live server and databases, and then loops over handling
http requests.
"""
if self.connections_override:
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
# Create the handler for serving static and media files
handler = self.static_handler(_MediaFilesHandler(WSGIHandler()))
# Go through the list of possible ports, hoping that we can find
# one that is free to use for the WSGI server.
for index, port in enumerate(self.possible_ports):
try:
self.httpd = self._create_server(port)
except socket.error as e:
if (index + 1 < len(self.possible_ports) and
e.errno == errno.EADDRINUSE):
# This port is already in use, so we go on and try with
# the next one in the list.
continue
else:
# Either none of the given ports are free or the error
# is something else than "Address already in use". So
# we let that error bubble up to the main thread.
raise
else:
# A free port was found.
self.port = port
break
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
def _create_server(self, port):
return WSGIServer((self.host, port), QuietWSGIRequestHandler, allow_reuse_address=False)
def terminate(self):
if hasattr(self, 'httpd'):
# Stop the WSGI server
self.httpd.shutdown()
self.httpd.server_close()
class LiveServerTestCase(TransactionTestCase):
"""
Does basically the same as TransactionTestCase but also launches a live
http server in a separate thread so that the tests may use another testing
framework, such as Selenium for example, instead of the built-in dummy
client.
Note that it inherits from TransactionTestCase instead of TestCase because
the threads do not share the same transactions (unless if using in-memory
sqlite) and each thread needs to commit all their transactions so that the
other thread can see the changes.
"""
static_handler = _StaticFilesHandler
@classproperty
def live_server_url(cls):
return 'http://%s:%s' % (
cls.server_thread.host, cls.server_thread.port)
@classmethod
def setUpClass(cls):
super(LiveServerTestCase, cls).setUpClass()
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if conn.vendor == 'sqlite' and conn.is_in_memory_db(conn.settings_dict['NAME']):
# Explicitly enable thread-shareability for this connection
conn.allow_thread_sharing = True
connections_override[conn.alias] = conn
# Launch the live server's thread
specified_address = os.environ.get(
'DJANGO_LIVE_TEST_SERVER_ADDRESS', 'localhost:8081-8179')
# The specified ports may be of the form '8000-8010,8080,9200-9300'
# i.e. a comma-separated list of ports or ranges of ports, so we break
# it down into a detailed list of all possible ports.
possible_ports = []
try:
host, port_ranges = specified_address.split(':')
for port_range in port_ranges.split(','):
# A port range can be of either form: '8000' or '8000-8010'.
extremes = list(map(int, port_range.split('-')))
assert len(extremes) in [1, 2]
if len(extremes) == 1:
# Port range of the form '8000'
possible_ports.append(extremes[0])
else:
# Port range of the form '8000-8010'
for port in range(extremes[0], extremes[1] + 1):
possible_ports.append(port)
except Exception:
msg = 'Invalid address ("%s") for live server.' % specified_address
six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg), sys.exc_info()[2])
cls.server_thread = cls._create_server_thread(host, possible_ports, connections_override)
cls.server_thread.daemon = True
cls.server_thread.start()
# Wait for the live server to be ready
cls.server_thread.is_ready.wait()
if cls.server_thread.error:
# Clean up behind ourselves, since tearDownClass won't get called in
# case of errors.
cls._tearDownClassInternal()
raise cls.server_thread.error
@classmethod
def _create_server_thread(cls, host, possible_ports, connections_override):
return LiveServerThread(
host,
possible_ports,
cls.static_handler,
connections_override=connections_override,
)
@classmethod
def _tearDownClassInternal(cls):
# There may not be a 'server_thread' attribute if setUpClass() for some
# reasons has raised an exception.
if hasattr(cls, 'server_thread'):
# Terminate the live server's thread
cls.server_thread.terminate()
cls.server_thread.join()
# Restore sqlite in-memory database connections' non-shareability
for conn in connections.all():
if conn.vendor == 'sqlite' and conn.is_in_memory_db(conn.settings_dict['NAME']):
conn.allow_thread_sharing = False
@classmethod
def tearDownClass(cls):
cls._tearDownClassInternal()
super(LiveServerTestCase, cls).tearDownClass()
class SerializeMixin(object):
"""
Mixin to enforce serialization of TestCases that share a common resource.
Define a common 'lockfile' for each set of TestCases to serialize. This
file must exist on the filesystem.
Place it early in the MRO in order to isolate setUpClass / tearDownClass.
"""
lockfile = None
@classmethod
def setUpClass(cls):
if cls.lockfile is None:
raise ValueError(
"{}.lockfile isn't set. Set it to a unique value "
"in the base class.".format(cls.__name__))
cls._lockfile = open(cls.lockfile)
locks.lock(cls._lockfile, locks.LOCK_EX)
super(SerializeMixin, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(SerializeMixin, cls).tearDownClass()
cls._lockfile.close()
|
[] |
[] |
[
"DJANGO_LIVE_TEST_SERVER_ADDRESS"
] |
[]
|
["DJANGO_LIVE_TEST_SERVER_ADDRESS"]
|
python
| 1 | 0 | |
az/src/main/java/com/github/rmee/az/AzPlugin.java
|
package com.github.rmee.az;
import com.github.rmee.az.aks.AzGetKubernetesCredentialsTask;
import com.github.rmee.cli.base.Cli;
import com.github.rmee.cli.base.CliExecExtension;
import org.gradle.api.Plugin;
import org.gradle.api.Project;
import java.io.File;
public class AzPlugin implements Plugin<Project> {
public void apply(Project project) {
File azureConfigDir = new File(project.getBuildDir(), ".azure");
AzExtension extension = project.getExtensions().create("az", AzExtension.class);
extension.setProject(project);
// TODO azure-cli image insufficient by default: https://github.com/Azure/AKS/issues/469
//extension.getCli().setImageName("microsoft/azure-cli");
extension.getCli().setImageName("remmeier/azure-cli-kubectl");
extension.getCli().setImageTag("2.0.38");
AzLoginTask login = project.getTasks().create("azLogin", AzLoginTask.class);
AzGetKubernetesCredentialsTask getCredentials =
project.getTasks().create("azGetKubernetesCredentials", AzGetKubernetesCredentialsTask.class);
getCredentials.dependsOn(login);
extension.setSubscriptionId(System.getenv("AZ_SUBSCRIPTION_ID"));
extension.setServicePrincipal(Boolean.parseBoolean(System.getenv("AZ_SERVICE_PRINCIPLE")));
extension.setUserName(System.getenv("AZ_USER"));
extension.setPassword(System.getenv("AZ_PASS"));
extension.setTenantId(System.getenv("AZ_TENANT_ID"));
extension.getAks().setKubeDir(new File(project.getRootProject().getProjectDir(), "build/.kube"));
CliExecExtension cliExec = project.getExtensions().getByType(CliExecExtension.class);
cliExec.register("az", extension.getCli());
}
}
|
[
"\"AZ_SUBSCRIPTION_ID\"",
"\"AZ_SERVICE_PRINCIPLE\"",
"\"AZ_USER\"",
"\"AZ_PASS\"",
"\"AZ_TENANT_ID\""
] |
[] |
[
"AZ_PASS",
"AZ_SUBSCRIPTION_ID",
"AZ_TENANT_ID",
"AZ_SERVICE_PRINCIPLE",
"AZ_USER"
] |
[]
|
["AZ_PASS", "AZ_SUBSCRIPTION_ID", "AZ_TENANT_ID", "AZ_SERVICE_PRINCIPLE", "AZ_USER"]
|
java
| 5 | 0 | |
kotsadm/pkg/airgap/airgap.go
|
package airgap
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"github.com/pkg/errors"
"github.com/replicatedhq/kots/kotsadm/pkg/airgap/types"
kotsadmconfig "github.com/replicatedhq/kots/kotsadm/pkg/config"
"github.com/replicatedhq/kots/kotsadm/pkg/downstream"
"github.com/replicatedhq/kots/kotsadm/pkg/identity"
"github.com/replicatedhq/kots/kotsadm/pkg/logger"
"github.com/replicatedhq/kots/kotsadm/pkg/preflight"
"github.com/replicatedhq/kots/kotsadm/pkg/registry"
registrytypes "github.com/replicatedhq/kots/kotsadm/pkg/registry/types"
"github.com/replicatedhq/kots/kotsadm/pkg/store"
"github.com/replicatedhq/kots/kotsadm/pkg/supportbundle"
"github.com/replicatedhq/kots/kotsadm/pkg/version"
kotsv1beta1 "github.com/replicatedhq/kots/kotskinds/apis/kots/v1beta1"
"github.com/replicatedhq/kots/pkg/archives"
"github.com/replicatedhq/kots/pkg/crypto"
"github.com/replicatedhq/kots/pkg/kotsutil"
"github.com/replicatedhq/kots/pkg/pull"
"k8s.io/client-go/kubernetes/scheme"
)
// CreateAppFromAirgap does a lot. Maybe too much. Definitely too much.
// This function assumes that there's an app in the database that doesn't have a version
// After execution, there will be a sequence 0 of the app, and all clusters in the database
// will also have a version
func CreateAppFromAirgap(pendingApp *types.PendingApp, airgapPath string, registryHost string, namespace string, username string, password string, isAutomated bool, skipPreflights bool) (finalError error) {
if err := store.GetStore().SetTaskStatus("airgap-install", "Processing package...", "running"); err != nil {
return errors.Wrap(err, "failed to set task status")
}
finishedCh := make(chan struct{})
defer close(finishedCh)
go func() {
for {
select {
case <-time.After(time.Second):
if err := store.GetStore().UpdateTaskStatusTimestamp("airgap-install"); err != nil {
logger.Error(err)
}
case <-finishedCh:
return
}
}
}()
defer func() {
if finalError == nil {
if err := store.GetStore().ClearTaskStatus("airgap-install"); err != nil {
logger.Error(errors.Wrap(err, "failed to clear install task status"))
}
if err := store.GetStore().SetAppInstallState(pendingApp.ID, "installed"); err != nil {
logger.Error(errors.Wrap(err, "failed to set app status to installed"))
}
} else {
if err := store.GetStore().SetTaskStatus("airgap-install", finalError.Error(), "failed"); err != nil {
logger.Error(errors.Wrap(err, "failed to set error on install task status"))
}
if err := store.GetStore().SetAppInstallState(pendingApp.ID, "airgap_upload_error"); err != nil {
logger.Error(errors.Wrap(err, "failed to set app status to error"))
}
}
}()
if err := store.GetStore().SetAppIsAirgap(pendingApp.ID, true); err != nil {
return errors.Wrap(err, "failed to set app is airgap")
}
// Extract it
if err := store.GetStore().SetTaskStatus("airgap-install", "Extracting files...", "running"); err != nil {
return errors.Wrap(err, "failed to set task status")
}
archiveDir := airgapPath
if strings.ToLower(filepath.Ext(airgapPath)) == ".airgap" {
// on the api side, headless intalls don't have the airgap file
dir, err := version.ExtractArchiveToTempDirectory(airgapPath)
if err != nil {
return errors.Wrap(err, "failed to extract archive")
}
defer os.RemoveAll(dir)
archiveDir = dir
}
// extract the release
workspace, err := ioutil.TempDir("", "kots-airgap")
if err != nil {
return errors.Wrap(err, "failed to create workspace")
}
defer os.RemoveAll(workspace)
releaseDir, err := extractAppRelease(workspace, archiveDir)
if err != nil {
return errors.Wrap(err, "failed to extract app dir")
}
tmpRoot, err := ioutil.TempDir("", "kots")
if err != nil {
return errors.Wrap(err, "failed to create temp root")
}
defer os.RemoveAll(tmpRoot)
if err := store.GetStore().SetTaskStatus("airgap-install", "Reading license data...", "running"); err != nil {
return errors.Wrap(err, "failed to set task status")
}
decode := scheme.Codecs.UniversalDeserializer().Decode
obj, _, err := decode([]byte(pendingApp.LicenseData), nil, nil)
if err != nil {
return errors.Wrap(err, "failed to read pending license data")
}
license := obj.(*kotsv1beta1.License)
licenseFile, err := ioutil.TempFile("", "kotsadm")
if err != nil {
return errors.Wrap(err, "failed to create temp file")
}
if err := ioutil.WriteFile(licenseFile.Name(), []byte(pendingApp.LicenseData), 0644); err != nil {
os.Remove(licenseFile.Name())
return errors.Wrapf(err, "failed to write license to temp file")
}
pipeReader, pipeWriter := io.Pipe()
go func() {
scanner := bufio.NewScanner(pipeReader)
for scanner.Scan() {
if err := store.GetStore().SetTaskStatus("airgap-install", scanner.Text(), "running"); err != nil {
logger.Error(err)
}
}
pipeReader.CloseWithError(scanner.Err())
}()
appNamespace := os.Getenv("POD_NAMESPACE")
if os.Getenv("KOTSADM_TARGET_NAMESPACE") != "" {
appNamespace = os.Getenv("KOTSADM_TARGET_NAMESPACE")
}
configValues, err := kotsadmconfig.ReadConfigValuesFromInClusterSecret()
if err != nil {
return errors.Wrap(err, "failed to read config values from in cluster")
}
configFile := ""
if configValues != "" {
tmpFile, err := ioutil.TempFile("", "kots")
if err != nil {
return errors.Wrap(err, "failed to create temp file for config values")
}
defer os.RemoveAll(tmpFile.Name())
if err := ioutil.WriteFile(tmpFile.Name(), []byte(configValues), 0644); err != nil {
return errors.Wrap(err, "failed to write config values to temp file")
}
configFile = tmpFile.Name()
}
identityConfigFile, err := identity.InitAppIdentityConfig(pendingApp.Slug, kotsv1beta1.Storage{}, crypto.AESCipher{})
if err != nil {
return errors.Wrap(err, "failed to init identity config")
}
defer os.Remove(identityConfigFile)
pullOptions := pull.PullOptions{
Downstreams: []string{"this-cluster"},
LocalPath: releaseDir,
Namespace: appNamespace,
LicenseFile: licenseFile.Name(),
ConfigFile: configFile,
IdentityConfigFile: identityConfigFile,
AirgapRoot: archiveDir,
Silent: true,
ExcludeKotsKinds: true,
RootDir: tmpRoot,
ExcludeAdminConsole: true,
RewriteImages: true,
ReportWriter: pipeWriter,
RewriteImageOptions: pull.RewriteImageOptions{
ImageFiles: filepath.Join(archiveDir, "images"),
Host: registryHost,
Namespace: namespace,
Username: username,
Password: password,
},
AppSlug: pendingApp.Slug,
AppSequence: 0,
}
if _, err := pull.Pull(fmt.Sprintf("replicated://%s", license.Spec.AppSlug), pullOptions); err != nil {
return errors.Wrap(err, "failed to pull")
}
if err := store.GetStore().AddAppToAllDownstreams(pendingApp.ID); err != nil {
return errors.Wrap(err, "failed to add app to all downstreams")
}
a, err := store.GetStore().GetApp(pendingApp.ID)
if err != nil {
return errors.Wrap(err, "failed to get app from pending app")
}
if password == registrytypes.PasswordMask {
// On initial install, registry info can be copied from kotsadm config,
// and password in this case will not be included in the request.
kotsadmSettings, err := registry.GetKotsadmRegistry()
if err != nil {
logger.Error(errors.Wrap(err, "failed to load kotsadm config"))
} else if kotsadmSettings.Hostname == registryHost {
password = kotsadmSettings.Password
}
}
if err := store.GetStore().UpdateRegistry(pendingApp.ID, registryHost, username, password, namespace); err != nil {
return errors.Wrap(err, "failed to update registry")
}
// yes, again in case of errors
if err := store.GetStore().SetAppIsAirgap(pendingApp.ID, true); err != nil {
return errors.Wrap(err, "failed to set app is airgap the second time")
}
newSequence, err := version.CreateFirstVersion(a.ID, tmpRoot, "Airgap Upload", skipPreflights)
if err != nil {
return errors.Wrap(err, "failed to create new version")
}
kotsKinds, err := kotsutil.LoadKotsKindsFromPath(tmpRoot)
if err != nil {
return errors.Wrap(err, "failed to load kotskinds from path")
}
err = supportbundle.CreateRenderedSpec(a.ID, a.CurrentSequence, "", true, kotsKinds)
if err != nil {
return errors.Wrap(err, "failed to create rendered support bundle spec")
}
if isAutomated && kotsKinds.Config != nil {
// bypass the config screen if no configuration is required
licenseSpec, err := kotsKinds.Marshal("kots.io", "v1beta1", "License")
if err != nil {
return errors.Wrap(err, "failed to marshal license spec")
}
configSpec, err := kotsKinds.Marshal("kots.io", "v1beta1", "Config")
if err != nil {
return errors.Wrap(err, "failed to marshal config spec")
}
configValuesSpec, err := kotsKinds.Marshal("kots.io", "v1beta1", "ConfigValues")
if err != nil {
return errors.Wrap(err, "failed to marshal configvalues spec")
}
identityConfigSpec, err := kotsKinds.Marshal("kots.io", "v1beta1", "IdentityConfig")
if err != nil {
return errors.Wrap(err, "failed to marshal identityconfig spec")
}
configOpts := kotsadmconfig.ConfigOptions{
ConfigSpec: configSpec,
ConfigValuesSpec: configValuesSpec,
LicenseSpec: licenseSpec,
IdentityConfigSpec: identityConfigSpec,
RegistryHost: registryHost,
RegistryNamespace: namespace,
RegistryUser: username,
RegistryPassword: password,
}
needsConfig, err := kotsadmconfig.NeedsConfiguration(configOpts)
if err != nil {
return errors.Wrap(err, "failed to check if app needs configuration")
}
if !needsConfig {
if skipPreflights {
if err := version.DeployVersion(pendingApp.ID, newSequence); err != nil {
return errors.Wrap(err, "failed to deploy version")
}
} else {
err := downstream.SetDownstreamVersionPendingPreflight(pendingApp.ID, newSequence)
if err != nil {
return errors.Wrap(err, "failed to set downstream version status to 'pending preflight'")
}
}
}
}
if !skipPreflights {
if err := preflight.Run(pendingApp.ID, pendingApp.Slug, newSequence, true, tmpRoot); err != nil {
return errors.Wrap(err, "failed to start preflights")
}
}
return nil
}
func extractAppRelease(workspace string, airgapDir string) (string, error) {
files, err := ioutil.ReadDir(airgapDir)
if err != nil {
return "", errors.Wrapf(err, "failed to read airgap dir")
}
destDir := filepath.Join(workspace, "extracted-app-release")
if err := os.Mkdir(destDir, 0744); err != nil {
return "", errors.Wrap(err, "failed to create tmp dir")
}
numExtracted := 0
for _, file := range files {
if file.IsDir() { // TODO: support nested dirs?
continue
}
err := archives.ExtractTGZArchiveFromFile(filepath.Join(airgapDir, file.Name()), destDir)
if err != nil {
fmt.Printf("ignoring file %q: %v\n", file.Name(), err)
continue
}
numExtracted++
}
if numExtracted == 0 {
return "", errors.New("no release found in airgap archive")
}
return destDir, nil
}
|
[
"\"POD_NAMESPACE\"",
"\"KOTSADM_TARGET_NAMESPACE\"",
"\"KOTSADM_TARGET_NAMESPACE\""
] |
[] |
[
"POD_NAMESPACE",
"KOTSADM_TARGET_NAMESPACE"
] |
[]
|
["POD_NAMESPACE", "KOTSADM_TARGET_NAMESPACE"]
|
go
| 2 | 0 | |
benchmark-common/src/main/java/com/ss/benchmark/httpclient/common/BasePerformanceTest.java
|
package com.ss.benchmark.httpclient.common;
import com.codahale.metrics.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.*;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Method;
import java.time.Instant;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.*;
import java.util.function.Supplier;
/**
* Base class for perform some rudimentary performance tests of
* HTTP client libraries. Orchestrated by TestNG.
* <p></p>
* Each client library needs to implement {@link HttpClientEngine}, which
* exposes both synchronous and asynchronous modes.
* </p>
* The test names have the following convention:
* <dl>
* <dt>testBlockingSyncXyx</dt>
* <dd>Test the client's synchronous mode in blocking scenarios</dd>
* <dt>testBlockingAsyncXyx</dt>
* <dd>Test the client's asynchronous mode in blocking scenarios</dd>
* <dt>testNonBlockingAsyncXyx</dt>
* <dd>Test the client's asynchronous mode in non-blocking scenarios</dd>
* </dl>
* </dl>
* @author sharath.srinivasa
*/
@Test(groups = "performance")
public abstract class BasePerformanceTest {
protected static final String HELLO_URL = "/hello";
protected static final String MOCK_SHORT_URL = "/short";
protected static final String MOCK_LONG_URL = "/long";
protected static final String SERVER_HOST = System.getProperty("bm.host", "localhost");
protected static final int SERVER_PORT = Integer.parseInt(System.getProperty("bm.port", "8080"));
protected static final int DROPWIZARD_REPORTER_SECONDS =
Integer.parseInt(System.getProperty("bm.dropwizard.seconds", "30"));
public static class BlockingVars {
protected static final int EXECUTIONS = 10_000;
protected static final int WORKERS = 40;
}
public static class NonBlockingVars {
static final int EXECUTIONS = 1_000;
}
private static final Logger LOGGER = LoggerFactory.getLogger(BasePerformanceTest.class);
protected final MetricRegistry metricRegistry = new MetricRegistry();
protected final ScheduledReporter reporter = ConsoleReporter.forRegistry(metricRegistry).convertDurationsTo(TimeUnit.MILLISECONDS).build();
private ScheduledReporter csvReporter;
// These blockingLatches are for the blocking cases.
private ConcurrentHashMap<String, CountDownLatch> blockingLatches = new ConcurrentHashMap<>();
private Set<CountDownLatch> nonBlockingLatches = new HashSet<>();
/**
* HTTP client under test.
*/
private HttpClientEngine client;
/**
* HTTP client libraries implement this to be tested.
*/
protected abstract HttpClientEngine getClient();
@BeforeTest
public void beforeTest() {
// output metrics on a schedule
if (DROPWIZARD_REPORTER_SECONDS > 0) {
reporter.start(DROPWIZARD_REPORTER_SECONDS, TimeUnit.SECONDS);
}
File csvParentDir = new File(
java.util.Optional.ofNullable(System.getenv("BM.METRICS.DIR"))
.orElse("metrics-csv")
);
if (csvParentDir.isFile()) {
throw new RuntimeException("Expected " + csvParentDir.getAbsolutePath() + " to be a directory.");
}
File csvDir = new File(csvParentDir, Instant.now().toString());
if (!csvDir.mkdirs()) {
throw new RuntimeException("Could not create the directory: " + csvDir.getAbsolutePath());
}
csvReporter = CsvReporter.forRegistry(metricRegistry).convertDurationsTo(TimeUnit.MILLISECONDS).build(csvDir);
csvReporter.start(365, TimeUnit.DAYS); // the goal is to just get the end numbers.
client = getClient();
client.createClient(SERVER_HOST, SERVER_PORT);
}
@AfterTest
public void afterTest() throws IOException {
reporter.report();
reporter.stop();
reporter.close();
csvReporter.report();
csvReporter.stop();
csvReporter.close();
client.close();
}
@BeforeMethod
public void beforeMethod() {
}
@AfterMethod
public void afterMethod() {
// Yes, this sucks, but I haven't thought of a low-cost refactor.
Exceptions.rethrowChecked(() -> {
CountDownLatch blockingLatch = blockingLatches.remove(Thread.currentThread().getName());
if (blockingLatch != null)
blockingLatch.await();
for (CountDownLatch latch : nonBlockingLatches) {
latch.await();
}
return null;
});
LOGGER.debug("Completed");
}
@Test(priority = 0)
public void testWarmupCache(Method m) {
String method = m.getName();
LOGGER.debug("Start " + method);
for (int i = 0; i < HttpClientEngine.MAX_CONNECTION_POOL_SIZE; i++) {
syncGET(
MOCK_SHORT_URL,
Payloads.SHORT,
metricRegistry.timer(MetricRegistry.name(this.getClass(), method, "timing")),
metricRegistry.counter(MetricRegistry.name(this.getClass(), method, "errorRate")));
}
}
@Test(priority = 1, invocationCount = BlockingVars.EXECUTIONS, threadPoolSize = BlockingVars.WORKERS, groups = {"blocking"})
public void testBlockingSyncShortGET(Method m) {
String method = m.getName();
LOGGER.debug("Start " + method);
syncGET(
MOCK_SHORT_URL,
Payloads.SHORT,
metricRegistry.timer(MetricRegistry.name(this.getClass(), method, "timing")),
metricRegistry.counter(MetricRegistry.name(this.getClass(), method, "errorRate")));
}
@Test(priority = 1, invocationCount = BlockingVars.EXECUTIONS, threadPoolSize = BlockingVars.WORKERS, groups = {"blocking"})
public void testBlockingSyncShortShortPOST(Method m) {
String method = m.getName();
LOGGER.debug("Start " + method);
syncPOST(MOCK_SHORT_URL,
Payloads.SHORT,
Payloads.SHORT,
metricRegistry.timer(MetricRegistry.name(this.getClass(), method, "timing")),
metricRegistry.counter(MetricRegistry.name(this.getClass(), method, "errorRate")));
}
@Test(priority = 1, invocationCount = BlockingVars.EXECUTIONS, threadPoolSize = BlockingVars.WORKERS, groups = {"blocking", "sync"})
public void testBlockingSyncShortLongPOST(Method m) {
String method = m.getName();
LOGGER.debug("Start " + method);
syncPOST(MOCK_LONG_URL,
Payloads.SHORT,
Payloads.LONG,
metricRegistry.timer(MetricRegistry.name(this.getClass(), method, "timing")),
metricRegistry.counter(MetricRegistry.name(this.getClass(), method, "errorRate")));
}
@Test(priority = 1, invocationCount = BlockingVars.EXECUTIONS, threadPoolSize = BlockingVars.WORKERS, groups = {"blocking", "sync"})
public void testBlockingSyncLongLongPOST(Method m) {
String method = m.getName();
LOGGER.debug("Start " + method);
syncPOST(MOCK_LONG_URL,
Payloads.LONG,
Payloads.LONG,
metricRegistry.timer(MetricRegistry.name(this.getClass(), method, "timing")),
metricRegistry.counter(MetricRegistry.name(this.getClass(), method, "errorRate")));
}
@Test(priority = 1, invocationCount = BlockingVars.EXECUTIONS, threadPoolSize = BlockingVars.WORKERS, groups = {"locking", "async"})
public void testBlockingAsyncShortGET(Method m) {
String method = m.getName();
LOGGER.debug("Start " + method);
blockingAsyncGET(
MOCK_SHORT_URL,
Payloads.SHORT,
metricRegistry.timer(MetricRegistry.name(this.getClass(), method, "timing")),
metricRegistry.counter(MetricRegistry.name(this.getClass(), method, "errorRate")));
}
@Test(priority = 1, invocationCount = BlockingVars.EXECUTIONS, threadPoolSize = BlockingVars.WORKERS, groups = {"blocking", "async"})
public void testBlockingAsyncShortShortPOST(Method m) {
String method = m.getName();
LOGGER.debug("Start " + method);
blockingAsyncPOST(MOCK_SHORT_URL,
Payloads.SHORT,
Payloads.SHORT,
metricRegistry.timer(MetricRegistry.name(this.getClass(), method, "timing")),
metricRegistry.counter(MetricRegistry.name(this.getClass(), method, "errorRate")));
}
@Test(priority = 1, invocationCount = BlockingVars.EXECUTIONS, threadPoolSize = BlockingVars.WORKERS, groups = {"blocking", "async"})
public void testBlockingAsyncShortLongPOST(Method m) {
String method = m.getName();
LOGGER.debug("Start " + method);
blockingAsyncPOST(MOCK_LONG_URL,
Payloads.SHORT,
Payloads.LONG,
metricRegistry.timer(MetricRegistry.name(this.getClass(), method, "timing")),
metricRegistry.counter(MetricRegistry.name(this.getClass(), method, "errorRate")));
}
@Test(priority = 1, invocationCount = BlockingVars.EXECUTIONS, threadPoolSize = BlockingVars.WORKERS, groups = {"blocking", "async"})
public void testBlockingAsyncLongLongPOST(Method m) {
String method = m.getName();
LOGGER.debug("Start " + method);
blockingAsyncPOST(MOCK_LONG_URL,
Payloads.LONG,
Payloads.LONG,
metricRegistry.timer(MetricRegistry.name(this.getClass(), method, "timing")),
metricRegistry.counter(MetricRegistry.name(this.getClass(), method, "errorRate")));
}
@Test(priority = 2, dataProvider = "nonblocking-executions", groups = {"nonblocking", "async"})
public void testNonBlockingAsyncShortGET(Method m, String executionSizeName, Integer executions) {
String method = parameterizedName(m, executionSizeName);
LOGGER.debug("Start " + method);
nonBlockingAsyncGET(
executions,
MOCK_SHORT_URL,
Payloads.SHORT,
metricRegistry.timer(MetricRegistry.name(this.getClass(), method, "timing")),
metricRegistry.counter(MetricRegistry.name(this.getClass(), method, "errorRate")));
}
@Test(priority = 2, dataProvider = "nonblocking-executions", groups = {"nonblocking", "async"})
public void testNonBlockingAsyncShortShortPOST(Method m, String executionSizeName, Integer executions) {
String method = parameterizedName(m, executionSizeName);
LOGGER.debug("Start " + method);
nonBlockingAsyncPOST(
executions,
MOCK_SHORT_URL,
Payloads.SHORT,
Payloads.SHORT,
metricRegistry.timer(MetricRegistry.name(this.getClass(), method, "timing")),
metricRegistry.counter(MetricRegistry.name(this.getClass(), method, "errorRate")));
}
@Test(priority = 2, dataProvider = "nonblocking-executions", groups = {"nonblocking", "async"})
public void testNonBlockingAsyncLongLongPOST(Method m, String executionSizeName, Integer executions) {
String method = parameterizedName(m, executionSizeName);
LOGGER.debug("Start " + method);
nonBlockingAsyncPOST(
executions,
MOCK_LONG_URL,
Payloads.LONG,
Payloads.LONG,
metricRegistry.timer(MetricRegistry.name(this.getClass(), method, "timing")),
metricRegistry.counter(MetricRegistry.name(this.getClass(), method, "errorRate")));
}
private void nonBlockingAsyncGET(
int executions,
String url,
String expectedResponsePayload,
Timer timer,
Counter errors
) {
CountDownLatch latch = new CountDownLatch(executions);
nonBlockingLatches.add(latch);
for (int i = 0; i < executions; i++) {
asyncGET(url, expectedResponsePayload, latch, timer, errors);
}
}
private void nonBlockingAsyncPOST(
int executions,
String url,
String payload,
String expectedResponsePayload,
Timer timer,
Counter errors
) {
CountDownLatch latch = new CountDownLatch(executions);
nonBlockingLatches.add(latch);
for (int i = 0; i < executions; i++) {
asyncPOST(url, payload, expectedResponsePayload, latch, timer, errors);
}
}
private void blockingAsyncGET(
String url,
String expectedResponsePayload,
Timer timer,
Counter errors
) {
CountDownLatch latch = new CountDownLatch(1);
blockingLatches.putIfAbsent(Thread.currentThread().getName(), latch);
asyncGET(url, expectedResponsePayload, latch, timer, errors);
}
private void blockingAsyncPOST(
String url,
String payload,
String expectedResponsePayload,
Timer timer,
Counter errors
) {
CountDownLatch latch = new CountDownLatch(1);
blockingLatches.putIfAbsent(Thread.currentThread().getName(), latch);
asyncPOST(url, payload, expectedResponsePayload, latch, timer, errors);
}
private void asyncGET(String url, String expectedResponsePayload, CountDownLatch latch, Timer timer, Counter errors) {
doAsync(
() -> client.nonblockingGET(url),
expectedResponsePayload,
latch,
timer,
errors
);
}
private void asyncPOST(String url, String payload, String expect, CountDownLatch latch, Timer timer, Counter errors) {
doAsync(
() -> client.nonblockingPOST(url, payload),
expect,
latch,
timer,
errors
);
}
private void syncGET(String url, String expectedResponsePayload, Timer timer, Counter errors) {
doSync(
() -> client.blockingGET(url),
expectedResponsePayload,
timer,
errors
);
}
private void syncPOST(String url, String payload, String expectedResponsePayload, Timer timer, Counter errors) {
doSync(
() -> client.blockingPOST(url, payload),
expectedResponsePayload,
timer,
errors
);
}
// I felt like the code below was tricky enough to not duplicate it between the (a)syncXYZ cases; however,
// if you feel this is adversely affecting performance, we can go back to duplicating it..
private void doAsync(
Supplier<CompletableFuture<String>> op,
String expectedResponsePayload,
CountDownLatch latch,
Timer timer,
Counter errors
) {
Timer.Context ctx = timer.time();
try {
CompletableFuture<String> cf = op.get();
cf.handle((result, ex) -> {
if (ex != null || !expectedResponsePayload.equals(result)) {
errors.inc();
} else {
ctx.stop(); // the goal is to not count error cases in the timing metrics
}
latch.countDown();
return result;
});
} catch (Exception e) {
errors.inc();
latch.countDown(); // not sure on this..
}
}
private void doSync(Supplier<String> op, String expectedResponsePayload, Timer timer, Counter errors) {
Timer.Context ctx = timer.time();
String response = null;
try {
response = op.get();
ctx.stop();
} catch (Exception e) {
LOGGER.error(e.getMessage());
} finally {
// I guess if an exception is thrown, this will be true..
if (!expectedResponsePayload.equals(response)) {
errors.inc();
}
}
}
@DataProvider(name = "nonblocking-executions")
public static Object[][] dataProviderMethod() {
return new Object[][] {
{ "Parameterized", NonBlockingVars.EXECUTIONS },
{ "Pool_Size" , HttpClientEngine.MAX_CONNECTION_POOL_SIZE }
};
}
private String parameterizedName(Method m, String executionSizeName) {
return m.getName() + "-" + executionSizeName;
}
}
|
[
"\"BM.METRICS.DIR\""
] |
[] |
[
"BM.METRICS.DIR"
] |
[]
|
["BM.METRICS.DIR"]
|
java
| 1 | 0 | |
Data/Juliet-Java/Juliet-Java-v103/000/137/914/CWE400_Resource_Exhaustion__Environment_for_loop_51a.java
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE400_Resource_Exhaustion__Environment_for_loop_51a.java
Label Definition File: CWE400_Resource_Exhaustion.label.xml
Template File: sources-sinks-51a.tmpl.java
*/
/*
* @description
* CWE: 400 Resource Exhaustion
* BadSource: Environment Read count from an environment variable
* GoodSource: A hardcoded non-zero, non-min, non-max, even number
* Sinks: for_loop
* GoodSink: Validate count before using it as the loop variant in a for loop
* BadSink : Use count as the loop variant in a for loop
* Flow Variant: 51 Data flow: data passed as an argument from one function to another in different classes in the same package
*
* */
import java.util.logging.Level;
public class CWE400_Resource_Exhaustion__Environment_for_loop_51a extends AbstractTestCase
{
public void bad() throws Throwable
{
int count;
count = Integer.MIN_VALUE; /* Initialize count */
/* get environment variable ADD */
/* POTENTIAL FLAW: Read count from an environment variable */
{
String stringNumber = System.getenv("ADD");
if (stringNumber != null) // avoid NPD incidental warnings
{
try
{
count = Integer.parseInt(stringNumber.trim());
}
catch(NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing count from string", exceptNumberFormat);
}
}
}
(new CWE400_Resource_Exhaustion__Environment_for_loop_51b()).badSink(count );
}
public void good() throws Throwable
{
goodG2B();
goodB2G();
}
/* goodG2B() - use goodsource and badsink */
private void goodG2B() throws Throwable
{
int count;
/* FIX: Use a hardcoded number that won't cause underflow, overflow, divide by zero, or loss-of-precision issues */
count = 2;
(new CWE400_Resource_Exhaustion__Environment_for_loop_51b()).goodG2BSink(count );
}
/* goodB2G() - use badsource and goodsink */
private void goodB2G() throws Throwable
{
int count;
count = Integer.MIN_VALUE; /* Initialize count */
/* get environment variable ADD */
/* POTENTIAL FLAW: Read count from an environment variable */
{
String stringNumber = System.getenv("ADD");
if (stringNumber != null) // avoid NPD incidental warnings
{
try
{
count = Integer.parseInt(stringNumber.trim());
}
catch(NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing count from string", exceptNumberFormat);
}
}
}
(new CWE400_Resource_Exhaustion__Environment_for_loop_51b()).goodB2GSink(count );
}
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
public static void main(String[] args) throws ClassNotFoundException,
InstantiationException, IllegalAccessException
{
mainFromParent(args);
}
}
|
[
"\"ADD\"",
"\"ADD\""
] |
[] |
[
"ADD"
] |
[]
|
["ADD"]
|
java
| 1 | 0 | |
vendor/github.com/markbates/pop/soda/cmd/root.go
|
package cmd
import (
"fmt"
"os"
"path/filepath"
"github.com/markbates/going/defaults"
"github.com/markbates/pop"
"github.com/spf13/cobra"
)
var cfgFile string
var env string
var version bool
var RootCmd = &cobra.Command{
Short: "A tasty treat for all your database needs",
PersistentPreRun: func(c *cobra.Command, args []string) {
fmt.Printf("v%s\n\n", Version)
env = defaults.String(os.Getenv("GO_ENV"), env)
setConfigLocation()
},
Run: func(cmd *cobra.Command, args []string) {
if !version {
cmd.Help()
}
},
}
func Execute() {
if err := RootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(-1)
}
}
func init() {
RootCmd.Flags().BoolVarP(&version, "version", "v", false, "Show version information")
RootCmd.PersistentFlags().StringVarP(&cfgFile, "config", "c", "", "The configuration file you would like to use.")
RootCmd.PersistentFlags().StringVarP(&env, "env", "e", "development", "The environment you want to run migrations against. Will use $GO_ENV if set.")
RootCmd.PersistentFlags().BoolVarP(&pop.Debug, "debug", "d", false, "Use debug/verbose mode")
}
func setConfigLocation() {
if cfgFile != "" {
abs, err := filepath.Abs(cfgFile)
if err != nil {
return
}
dir, file := filepath.Split(abs)
pop.AddLookupPaths(dir)
pop.ConfigName = file
}
pop.LoadConfigFile()
}
func getConn() *pop.Connection {
conn := pop.Connections[env]
if conn == nil {
fmt.Printf("There is no connection named %s defined!\n", env)
os.Exit(1)
}
return conn
}
|
[
"\"GO_ENV\""
] |
[] |
[
"GO_ENV"
] |
[]
|
["GO_ENV"]
|
go
| 1 | 0 | |
interop/client/main.go
|
package main
import (
"crypto/tls"
"errors"
"flag"
"fmt"
"io"
"log"
"net/http"
"os"
"strings"
"time"
"golang.org/x/sync/errgroup"
"github.com/Psiphon-Labs/quic-go"
"github.com/Psiphon-Labs/quic-go/http3"
"github.com/Psiphon-Labs/quic-go/internal/handshake"
"github.com/Psiphon-Labs/quic-go/internal/protocol"
"github.com/Psiphon-Labs/quic-go/interop/http09"
"github.com/Psiphon-Labs/quic-go/interop/utils"
"github.com/Psiphon-Labs/quic-go/qlog"
)
var errUnsupported = errors.New("unsupported test case")
var tlsConf *tls.Config
func main() {
logFile, err := os.Create("/logs/log.txt")
if err != nil {
fmt.Printf("Could not create log file: %s\n", err.Error())
os.Exit(1)
}
defer logFile.Close()
log.SetOutput(logFile)
keyLog, err := utils.GetSSLKeyLog()
if err != nil {
fmt.Printf("Could not create key log: %s\n", err.Error())
os.Exit(1)
}
if keyLog != nil {
defer keyLog.Close()
}
tlsConf = &tls.Config{
InsecureSkipVerify: true,
KeyLogWriter: keyLog,
}
testcase := os.Getenv("TESTCASE")
if err := runTestcase(testcase); err != nil {
if err == errUnsupported {
fmt.Printf("unsupported test case: %s\n", testcase)
os.Exit(127)
}
fmt.Printf("Downloading files failed: %s\n", err.Error())
os.Exit(1)
}
}
func runTestcase(testcase string) error {
flag.Parse()
urls := flag.Args()
getLogWriter, err := utils.GetQLOGWriter()
if err != nil {
return err
}
quicConf := &quic.Config{
Tracer: qlog.NewTracer(getLogWriter),
Versions: []quic.VersionNumber{quic.VersionDraft29},
}
if testcase == "http3" {
r := &http3.RoundTripper{
TLSClientConfig: tlsConf,
QuicConfig: quicConf,
}
defer r.Close()
return downloadFiles(r, urls, false)
}
r := &http09.RoundTripper{
TLSClientConfig: tlsConf,
QuicConfig: quicConf,
}
defer r.Close()
switch testcase {
case "handshake", "transfer", "retry":
case "keyupdate":
handshake.KeyUpdateInterval = 100
case "chacha20":
tlsConf.CipherSuites = []uint16{tls.TLS_CHACHA20_POLY1305_SHA256}
case "multiconnect":
return runMultiConnectTest(r, urls)
case "versionnegotiation":
return runVersionNegotiationTest(r, urls)
case "resumption":
return runResumptionTest(r, urls, false)
case "zerortt":
return runResumptionTest(r, urls, true)
default:
return errUnsupported
}
return downloadFiles(r, urls, false)
}
func runVersionNegotiationTest(r *http09.RoundTripper, urls []string) error {
if len(urls) != 1 {
return errors.New("expected at least 2 URLs")
}
protocol.SupportedVersions = []protocol.VersionNumber{0x1a2a3a4a}
err := downloadFile(r, urls[0], false)
if err == nil {
return errors.New("expected version negotiation to fail")
}
if !strings.Contains(err.Error(), "No compatible QUIC version found") {
return fmt.Errorf("expect version negotiation error, got: %s", err.Error())
}
return nil
}
func runMultiConnectTest(r *http09.RoundTripper, urls []string) error {
for _, url := range urls {
if err := downloadFile(r, url, false); err != nil {
return err
}
if err := r.Close(); err != nil {
return err
}
}
return nil
}
type sessionCache struct {
tls.ClientSessionCache
put chan<- struct{}
}
func newSessionCache(c tls.ClientSessionCache) (tls.ClientSessionCache, <-chan struct{}) {
put := make(chan struct{}, 100)
return &sessionCache{ClientSessionCache: c, put: put}, put
}
func (c *sessionCache) Put(key string, cs *tls.ClientSessionState) {
c.ClientSessionCache.Put(key, cs)
c.put <- struct{}{}
}
func runResumptionTest(r *http09.RoundTripper, urls []string, use0RTT bool) error {
if len(urls) < 2 {
return errors.New("expected at least 2 URLs")
}
var put <-chan struct{}
tlsConf.ClientSessionCache, put = newSessionCache(tls.NewLRUClientSessionCache(1))
// do the first transfer
if err := downloadFiles(r, urls[:1], false); err != nil {
return err
}
// wait for the session ticket to arrive
select {
case <-time.NewTimer(10 * time.Second).C:
return errors.New("expected to receive a session ticket within 10 seconds")
case <-put:
}
if err := r.Close(); err != nil {
return err
}
// reestablish the connection, using the session ticket that the server (hopefully provided)
defer r.Close()
return downloadFiles(r, urls[1:], use0RTT)
}
func downloadFiles(cl http.RoundTripper, urls []string, use0RTT bool) error {
var g errgroup.Group
for _, u := range urls {
url := u
g.Go(func() error {
return downloadFile(cl, url, use0RTT)
})
}
return g.Wait()
}
func downloadFile(cl http.RoundTripper, url string, use0RTT bool) error {
method := http.MethodGet
if use0RTT {
method = http09.MethodGet0RTT
}
req, err := http.NewRequest(method, url, nil)
if err != nil {
return err
}
rsp, err := cl.RoundTrip(req)
if err != nil {
return err
}
defer rsp.Body.Close()
file, err := os.Create("/downloads" + req.URL.Path)
if err != nil {
return err
}
defer file.Close()
_, err = io.Copy(file, rsp.Body)
return err
}
|
[
"\"TESTCASE\""
] |
[] |
[
"TESTCASE"
] |
[]
|
["TESTCASE"]
|
go
| 1 | 0 | |
examples/app/run/runAnApp/main.go
|
package main
import (
"fmt"
"os"
"go.m3o.com"
"go.m3o.com/app"
)
func main() {
client := m3o.New(os.Getenv("M3O_API_TOKEN"))
rsp, err := client.App.Run(&app.RunRequest{
Name: "helloworld",
Repo: "github.com/asim/helloworld",
Branch: "master",
Port: 8080,
Region: "europe-west1",
})
fmt.Println(rsp, err)
}
|
[
"\"M3O_API_TOKEN\""
] |
[] |
[
"M3O_API_TOKEN"
] |
[]
|
["M3O_API_TOKEN"]
|
go
| 1 | 0 | |
main/asgi.py
|
"""
ASGI config for the project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/
"""
import os
# from channels.auth import AuthMiddlewareStack
# from channels.routing import ProtocolTypeRouter, URLRouter
from channels.routing import ProtocolTypeRouter # type: ignore
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'main.settings')
application = ProtocolTypeRouter({
"http": get_asgi_application(),
# add websocket protocol here
})
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
tests/gcp/operators/test_compute_system_helper.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import os
from tests.contrib.utils.logging_command_executor import LoggingCommandExecutor
from tests.gcp.utils.gcp_authenticator import GCP_COMPUTE_KEY, GcpAuthenticator
GCE_INSTANCE = os.environ.get('GCE_INSTANCE', 'testinstance')
GCP_PROJECT_ID = os.environ.get('GCP_PROJECT_ID', 'example-project')
GCE_INSTANCE_GROUP_MANAGER_NAME = os.environ.get('GCE_INSTANCE_GROUP_MANAGER_NAME',
'instance-group-test')
GCE_ZONE = os.environ.get('GCE_ZONE', 'europe-west1-b')
GCE_TEMPLATE_NAME = os.environ.get('GCE_TEMPLATE_NAME',
'instance-template-test')
GCE_NEW_TEMPLATE_NAME = os.environ.get('GCE_NEW_TEMPLATE_NAME',
'instance-template-test-new')
class GCPComputeTestHelper(LoggingCommandExecutor):
def delete_instance(self):
self.execute_cmd([
'gcloud', 'beta', 'compute', '--project', GCP_PROJECT_ID,
'--quiet', '--verbosity=none',
'instances', 'delete', GCE_INSTANCE, '--zone', GCE_ZONE,
])
def create_instance(self):
self.execute_cmd([
'gcloud', 'beta', 'compute', '--project', GCP_PROJECT_ID, '--quiet',
'instances', 'create', GCE_INSTANCE,
'--zone', GCE_ZONE
])
def delete_instance_group_and_template(self, silent=False):
self.execute_cmd([
'gcloud', 'beta', 'compute', '--project', GCP_PROJECT_ID,
'--quiet', '--verbosity=none',
'instance-groups', 'managed', 'delete', GCE_INSTANCE_GROUP_MANAGER_NAME,
'--zone', GCE_ZONE
], silent=silent)
self.execute_cmd([
'gcloud', 'beta', 'compute', '--project', GCP_PROJECT_ID,
'--quiet', '--verbosity=none',
'instance-templates', 'delete', GCE_NEW_TEMPLATE_NAME
], silent=silent)
self.execute_cmd([
'gcloud', 'beta', 'compute',
'--project', GCP_PROJECT_ID,
'--quiet', '--verbosity=none',
'instance-templates', 'delete', GCE_TEMPLATE_NAME
], silent=silent)
def create_instance_group_and_template(self):
self.execute_cmd([
'gcloud', 'beta', 'compute', '--project', GCP_PROJECT_ID, '--quiet',
'instance-templates', 'create', GCE_TEMPLATE_NAME
])
self.execute_cmd([
'gcloud', 'beta', 'compute', '--project', GCP_PROJECT_ID, '--quiet',
'instance-groups', 'managed', 'create', GCE_INSTANCE_GROUP_MANAGER_NAME,
'--template', GCE_TEMPLATE_NAME,
'--zone', GCE_ZONE, '--size=1'
])
self.execute_cmd([
'gcloud', 'beta', 'compute', '--project', GCP_PROJECT_ID, '--quiet',
'instance-groups', 'managed', 'wait-until-stable',
GCE_INSTANCE_GROUP_MANAGER_NAME,
'--zone', GCE_ZONE
])
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Create or delete GCE instances/instance groups for system tests.')
parser.add_argument('--action', dest='action', required=True,
choices=('create-instance', 'delete-instance',
'create-instance-group', 'delete-instance-group',
'before-tests', 'after-tests'))
action = parser.parse_args().action
helper = GCPComputeTestHelper()
gcp_authenticator = GcpAuthenticator(GCP_COMPUTE_KEY)
helper.log.info('Starting action: {}'.format(action))
gcp_authenticator.gcp_store_authentication()
try:
gcp_authenticator.gcp_authenticate()
if action == 'before-tests':
pass
elif action == 'after-tests':
pass
elif action == 'create-instance':
helper.create_instance()
elif action == 'delete-instance':
helper.delete_instance()
elif action == 'create-instance-group':
helper.create_instance_group_and_template()
elif action == 'delete-instance-group':
helper.delete_instance_group_and_template()
else:
raise Exception("Unknown action: {}".format(action))
finally:
gcp_authenticator.gcp_restore_authentication()
helper.log.info('Finishing action: {}'.format(action))
|
[] |
[] |
[
"GCE_INSTANCE_GROUP_MANAGER_NAME",
"GCE_INSTANCE",
"GCE_ZONE",
"GCE_TEMPLATE_NAME",
"GCP_PROJECT_ID",
"GCE_NEW_TEMPLATE_NAME"
] |
[]
|
["GCE_INSTANCE_GROUP_MANAGER_NAME", "GCE_INSTANCE", "GCE_ZONE", "GCE_TEMPLATE_NAME", "GCP_PROJECT_ID", "GCE_NEW_TEMPLATE_NAME"]
|
python
| 6 | 0 | |
deepfence_backend/websocket_api/scope_websocket_client/main.go
|
package main
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"os/signal"
"strings"
"sync"
"time"
"github.com/gomodule/redigo/redis"
"github.com/gorilla/websocket"
"github.com/olivere/elastic/v7"
)
type WebsocketClient struct {
topologyOptionsScope TopologyOptions
topologyOptionsDf TopologyOptions
filterRedisKey string
nodeType string
wsURL string
//
// Maintain current topology (and dump it to redis), so that when a new websocket connection is created from api,
// first reply will include all nodes in current state, and subsequent responses will give only diffs
//
topologyScope map[string]ScopeTopology
topologyDf map[string]DeepfenceTopology
redisPool *redis.Pool
redisDbNum int
dfIdToScopeIdMap map[string]string
nodeStatus NodeStatus
topologyStats TopologyStats
}
type TopologyStats struct {
NodeCount int
EdgeCount int
sync.RWMutex
}
type NodeStatus struct {
VulnerabilityScanStatus map[string]string
VulnerabilityScanStatusTime map[string]string
SecretScanStatus map[string]string
SecretScanStatusTime map[string]string
sync.RWMutex
}
func (wsCli *WebsocketClient) Init(nodeType string) {
wsCli.redisPool, wsCli.redisDbNum = newRedisPool()
wsCli.nodeType = nodeType
wsCli.topologyOptionsScope = TopologyOptions{NodeType: nodeType, Params: TopologyParams{Format: TopologyFormatScope}}
wsCli.topologyOptionsScope.TopologyOptionsValidate()
wsCli.topologyOptionsDf = TopologyOptions{NodeType: nodeType, Params: TopologyParams{Format: TopologyFormatDeepfence}}
wsCli.topologyOptionsDf.TopologyOptionsValidate()
wsCli.filterRedisKey = TopologyFilterPrefix + strings.ToUpper(nodeType)
wsURL := ScopeWebSocketUrl[wsCli.nodeType]
wsCli.wsURL = wsURL.String()
wsCli.topologyScope = make(map[string]ScopeTopology)
wsCli.topologyDf = make(map[string]DeepfenceTopology)
wsCli.dfIdToScopeIdMap = make(map[string]string)
wsCli.nodeStatus = NodeStatus{VulnerabilityScanStatus: make(map[string]string), SecretScanStatus: make(map[string]string)}
wsCli.topologyStats = TopologyStats{NodeCount: 0, EdgeCount: 0}
}
func (wsCli *WebsocketClient) parseMessage(message []byte) {
var scopeTopologyDiff ScopeTopologyDiff
err := json.Unmarshal(message, &scopeTopologyDiff)
if err != nil {
log.Println("err:", err)
return
}
if scopeTopologyDiff.Reset == true {
wsCli.topologyScope = make(map[string]ScopeTopology)
}
scopeTopologyDiff.Options = wsCli.topologyOptionsScope
// Add / update / delete node details using topology diff
addIds := make([]string, len(scopeTopologyDiff.Add))
updateIds := make([]string, len(scopeTopologyDiff.Update))
removeIds := make([]string, len(scopeTopologyDiff.Remove))
for i, scopeID := range scopeTopologyDiff.Remove {
_, ok := wsCli.topologyScope[scopeID]
if ok {
delete(wsCli.topologyScope, scopeID)
}
removeIds[i] = wsCli.getDfIdFromScopeId(scopeID)
}
scopeTopologyDiff.Add = wsCli.scopeTopologyFixes(scopeTopologyDiff.Add)
scopeTopologyDiff.Update = wsCli.scopeTopologyFixes(scopeTopologyDiff.Update)
for i, nodeDetail := range scopeTopologyDiff.Add {
wsCli.topologyScope[nodeDetail.ID] = nodeDetail
addIds[i] = wsCli.getDfIdFromScopeId(nodeDetail.ID)
}
for i, nodeDetail := range scopeTopologyDiff.Update {
wsCli.topologyScope[nodeDetail.ID] = nodeDetail
updateIds[i] = wsCli.getDfIdFromScopeId(nodeDetail.ID)
}
//
// Set current scope data
//
redisConn := wsCli.redisPool.Get()
defer redisConn.Close()
topologyScopeJson, _ := JsonEncode(wsCli.topologyScope)
_, err = redisConn.Do("SETEX", wsCli.topologyOptionsScope.Channel, RedisExpiryTime, string(topologyScopeJson))
if err != nil {
log.Printf("Error: SETEX %s: %v\n", wsCli.topologyOptionsScope.Channel, err)
}
//
// Scope format to Deepfence format
//
wsCli.topologyDf = make(map[string]DeepfenceTopology)
wsCli.dfIdToScopeIdMap = make(map[string]string)
//
// Call node specific formatter
//
switch wsCli.nodeType {
case NodeTypeHost:
wsCli.formatTopologyHostData()
case NodeTypeContainer:
wsCli.formatTopologyContainerData()
case NodeTypeContainerImage:
wsCli.formatTopologyContainerImageData()
case NodeTypeContainerByName:
wsCli.formatTopologyContainerByNameData()
case NodeTypeProcess:
wsCli.formatTopologyProcessData()
case NodeTypeProcessByName:
wsCli.formatTopologyProcessByNameData()
case NodeTypePod:
wsCli.formatTopologyPodData()
case NodeTypeKubeService:
wsCli.formatTopologyKubeServiceData()
case NodeTypeKubeController:
wsCli.formatTopologyKubeControllerData()
case NodeTypeSwarmService:
wsCli.formatTopologySwarmServiceData()
}
//
// Set current df format data
//
topologyDfJson, _ := JsonEncode(wsCli.topologyDf)
_, err = redisConn.Do("SETEX", wsCli.topologyOptionsDf.Channel, RedisExpiryTime, string(topologyDfJson))
if err != nil {
log.Println(fmt.Sprintf("Error: SETEX %s:", wsCli.topologyOptionsDf.Channel), err)
}
//
// Df node id to scope id
//
dfIdToScopeIdKey := dfIdToScopeIdRedisKeyPrefix + strings.ToUpper(wsCli.nodeType)
if len(wsCli.dfIdToScopeIdMap) > 0 {
redisArgs := redis.Args{}
redisArgs = redisArgs.Add(dfIdToScopeIdKey)
for dfId, scopeId := range wsCli.dfIdToScopeIdMap {
redisArgs = redisArgs.Add(dfId)
redisArgs = redisArgs.Add(scopeId)
}
_, err = redisConn.Do("HMSET", redisArgs...)
if err != nil {
log.Println("Error: HMSET "+dfIdToScopeIdKey, err)
}
} else {
_, err = redisConn.Do("HMSET", dfIdToScopeIdKey, "", "")
if err != nil {
log.Println("Error: HMSET "+dfIdToScopeIdKey, err)
}
}
_, err = redisConn.Do("EXPIRE", dfIdToScopeIdKey, RedisExpiryTime)
if err != nil {
log.Println("Error: EXPIRE "+dfIdToScopeIdKey, err)
}
return
}
func (wsCli *WebsocketClient) publishFilteredTopology(deleteDfIds []string, deleteScopeIds []string, optionsDf TopologyOptions, optionsScope TopologyOptions, dfTopologyDiff DeepfenceTopologyDiff, scopeTopologyDiff ScopeTopologyDiff) {
//
// Deepfence format
//
redisConn := wsCli.redisPool.Get()
defer redisConn.Close()
topologyDf := DeepCopyDfTopology(wsCli.topologyDf)
for _, delId := range deleteDfIds {
delete(topologyDf, delId)
}
// Set filtered data in redis
topologyDfJsonTmp, _ := JsonEncode(topologyDf)
_, err := redisConn.Do("SETEX", optionsDf.Channel, RedisExpiryTime, string(topologyDfJsonTmp))
if err != nil {
log.Printf("Error: SETEX %s: %v\n", optionsDf.Channel, err)
}
// topology diff
dfTopologyDiff.deleteIdsFromDfTopologyDiff(deleteDfIds)
dfTopologyDiff.Options = optionsDf
dfTopologyDiffTmpJson, _ := JsonEncode(dfTopologyDiff)
// Publish diff in redis pubsub
_, err = redisConn.Do("PUBLISH", fmt.Sprintf("%s_%d", optionsDf.Channel, wsCli.redisDbNum), string(dfTopologyDiffTmpJson))
if err != nil {
log.Printf("Error: PUBLISH %s: %v\n", fmt.Sprintf("%s_%d", optionsDf.Channel, wsCli.redisDbNum), err)
}
//
// Scope format
//
topologyScope := DeepCopyScopeTopology(wsCli.topologyScope)
for _, delId := range deleteScopeIds {
delete(topologyScope, delId)
}
// Set filtered data in redis
topologyScopeJsonTmp, _ := JsonEncode(topologyScope)
_, err = redisConn.Do("SETEX", optionsScope.Channel, RedisExpiryTime, string(topologyScopeJsonTmp))
if err != nil {
log.Printf("Error: SETEX %s: %v\n", optionsScope.Channel, err)
}
// topology diff
scopeTopologyDiff.deleteIdsFromScopeTopologyDiff(deleteScopeIds)
scopeTopologyDiff.Options = optionsScope
scopeTopologyDiffJson, _ := JsonEncode(scopeTopologyDiff)
// Publish diff in redis pubsub
_, err = redisConn.Do("PUBLISH", fmt.Sprintf("%s_%d", optionsScope.Channel, wsCli.redisDbNum), string(scopeTopologyDiffJson))
if err != nil {
log.Printf("Error: PUBLISH %s: %v\n", fmt.Sprintf("%s_%d", optionsScope.Channel, wsCli.redisDbNum), err)
}
}
func (wsCli *WebsocketClient) updateScanStatusData(esClient *elastic.Client) error {
var err error
var ok bool
mSearch := elastic.NewMultiSearchService(esClient)
nodeIdAggs := elastic.NewTermsAggregation().Field("node_id.keyword").Size(esAggsSize)
statusAggs := elastic.NewTermsAggregation().Field("action.keyword").Size(50)
recentTimestampAggs := elastic.NewMaxAggregation().Field("@timestamp")
statusAggs.SubAggregation("scan_recent_timestamp", recentTimestampAggs)
nodeIdAggs.SubAggregation("action", statusAggs)
esQuery := elastic.NewSearchRequest().Index(cveScanLogsEsIndex).Query(elastic.NewMatchAllQuery()).Size(0).Aggregation("node_id", nodeIdAggs)
mSearch.Add(esQuery)
nodeIdAggs = elastic.NewTermsAggregation().Field("node_id.keyword").Size(esAggsSize)
statusAggs = elastic.NewTermsAggregation().Field("scan_status.keyword").Size(50)
recentTimestampAggs = elastic.NewMaxAggregation().Field("@timestamp")
statusAggs.SubAggregation("secret_scan_timestamp", recentTimestampAggs)
nodeIdAggs.SubAggregation("secret_scan_status", statusAggs)
esQuery = elastic.NewSearchRequest().Index(secretScanLogsEsIndex).Query(elastic.NewMatchAllQuery()).Size(0).Aggregation("node_id", nodeIdAggs)
mSearch.Add(esQuery)
mSearchResult, err := mSearch.Do(context.Background())
if err != nil {
return err
}
nodeIdVulnerabilityStatusMap := make(map[string]string)
nodeIdVulnerabilityStatusTimeMap := make(map[string]string)
cveResp := mSearchResult.Responses[0]
nodeIdAggsBkt, ok := cveResp.Aggregations.Terms("node_id")
if !ok {
return nil
}
for _, nodeIdAggs := range nodeIdAggsBkt.Buckets {
latestScanTime := 0.0
var latestStatus, latestScanTimeStr string
scanStatusBkt, ok := nodeIdAggs.Aggregations.Terms("action")
if !ok {
continue
}
for _, scanStatusAggs := range scanStatusBkt.Buckets {
recentTimestampBkt, ok := scanStatusAggs.Aggregations.Max("scan_recent_timestamp")
if !ok || recentTimestampBkt == nil || recentTimestampBkt.Value == nil {
continue
}
if *recentTimestampBkt.Value > latestScanTime {
latestScanTime = *recentTimestampBkt.Value
latestStatus = scanStatusAggs.Key.(string)
valueAsStr, ok := recentTimestampBkt.Aggregations["value_as_string"]
if ok {
val, err := valueAsStr.MarshalJSON()
if err != nil {
latestScanTimeStr = strings.ReplaceAll(string(val), "\"", "")
}
}
}
}
latestStatus, ok = statusMap[latestStatus]
if !ok {
latestStatus = scanStatusNeverScanned
}
nodeIdVulnerabilityStatusMap[nodeIdAggs.Key.(string)] = latestStatus
nodeIdVulnerabilityStatusTimeMap[nodeIdAggs.Key.(string)] = latestScanTimeStr
}
wsCli.nodeStatus.Lock()
wsCli.nodeStatus.VulnerabilityScanStatus = nodeIdVulnerabilityStatusMap
wsCli.nodeStatus.VulnerabilityScanStatusTime = nodeIdVulnerabilityStatusTimeMap
wsCli.nodeStatus.Unlock()
nodeIdSecretStatusMap := make(map[string]string)
nodeIdSecretStatusTimeMap := make(map[string]string)
secretResp := mSearchResult.Responses[1]
nodeIdAggsBkt, ok = secretResp.Aggregations.Terms("node_id")
if !ok {
return nil
}
for _, nodeIdAggs := range nodeIdAggsBkt.Buckets {
if nodeIdAggs.Key.(string) == "" {
continue
}
latestScanTime := 0.0
var latestStatus, latestScanTimeStr string
scanStatusBkt, ok := nodeIdAggs.Aggregations.Terms("secret_scan_status")
if !ok {
continue
}
for _, scanStatusAggs := range scanStatusBkt.Buckets {
recentTimestampBkt, ok := scanStatusAggs.Aggregations.Max("secret_scan_timestamp")
if !ok || recentTimestampBkt == nil || recentTimestampBkt.Value == nil {
continue
}
if *recentTimestampBkt.Value > latestScanTime {
latestScanTime = *recentTimestampBkt.Value
latestStatus = scanStatusAggs.Key.(string)
valueAsStr, ok := recentTimestampBkt.Aggregations["value_as_string"]
if ok {
latestScanTimeStr = strings.ReplaceAll(string(valueAsStr), "\"", "")
}
}
}
latestStatus, ok = statusMap[latestStatus]
if !ok {
latestStatus = scanStatusNeverScanned
}
nodeIdSecretStatusMap[strings.Split(nodeIdAggs.Key.(string), ";")[0]] = latestStatus
nodeIdSecretStatusTimeMap[strings.Split(nodeIdAggs.Key.(string), ";")[0]] = latestScanTimeStr
}
wsCli.nodeStatus.Lock()
wsCli.nodeStatus.SecretScanStatus = nodeIdSecretStatusMap
wsCli.nodeStatus.SecretScanStatusTime = nodeIdSecretStatusTimeMap
wsCli.nodeStatus.Unlock()
return nil
}
func (wsCli *WebsocketClient) updateNodeCount() error {
client := &http.Client{Transport: &http.Transport{TLSClientConfig: &tls.Config{}}}
req, err := http.NewRequest(http.MethodGet, "http://deepfence-topology:8004/topology-api/topology", nil)
if err != nil {
return err
}
req.Header.Add("Content-Type", "application/json")
resp, err := client.Do(req)
if err != nil {
return err
}
if resp.StatusCode == http.StatusOK {
bodyBytes, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
return err
}
var topologyStats []TopologyStatistics
err = json.Unmarshal(bodyBytes, &topologyStats)
if err != nil {
return err
}
for _, topologyStat := range topologyStats {
topologyId := strings.ReplaceAll(topologyStat.URL, "/topology-api/topology/", "")
nodeType, ok := TopologyIdNodeTypeMap[topologyId]
if ok {
if nodeType == wsCli.nodeType {
wsCli.topologyStats.Lock()
wsCli.topologyStats.NodeCount = topologyStat.Stats.NodeCount
wsCli.topologyStats.EdgeCount = topologyStat.Stats.EdgeCount
wsCli.topologyStats.Unlock()
break
}
}
for _, subTopologyStat := range topologyStat.SubTopologies {
topologyId = strings.ReplaceAll(subTopologyStat.URL, "/topology-api/topology/", "")
nodeType, ok = TopologyIdNodeTypeMap[topologyId]
if ok {
if nodeType == wsCli.nodeType {
wsCli.topologyStats.Lock()
wsCli.topologyStats.NodeCount = subTopologyStat.Stats.NodeCount
wsCli.topologyStats.EdgeCount = subTopologyStat.Stats.EdgeCount
wsCli.topologyStats.Unlock()
break
}
}
}
}
}
return nil
}
func (wsCli *WebsocketClient) getEsData() error {
esHost := os.Getenv("ELASTICSEARCH_HOST")
if esHost == "" {
esHost = "deepfence-es"
}
esPort := os.Getenv("ELASTICSEARCH_PORT")
if esPort == "" {
esPort = "9200"
}
esClient, err := elastic.NewClient(
elastic.SetHealthcheck(false),
elastic.SetSniff(false),
elastic.SetURL("http://"+esHost+":"+esPort),
)
if err != nil {
return err
}
// Update status first time before ticker starts
err = wsCli.updateScanStatusData(esClient)
if err != nil {
return err
}
// Update every 10 seconds
ticker := time.NewTicker(10 * time.Second)
for {
select {
case <-ticker.C:
err = wsCli.updateScanStatusData(esClient)
if err != nil {
log.Println("Error updating scan details from elasticsearch: ", err)
}
}
}
}
func (wsCli *WebsocketClient) ConnectToScopeWebSocket() {
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
go func() {
if wsCli.nodeType == NodeTypeHost || wsCli.nodeType == NodeTypeContainer || wsCli.nodeType == NodeTypeContainerImage || wsCli.nodeType == NodeTypePod {
for {
err := wsCli.getEsData()
if err != nil {
log.Println(err)
}
time.Sleep(15 * time.Second)
}
}
}()
go func() {
err := wsCli.updateNodeCount()
if err != nil {
log.Println(err)
}
ticker := time.NewTicker(5 * time.Minute)
for {
select {
case <-ticker.C:
err := wsCli.updateNodeCount()
if err != nil {
log.Println(err)
}
}
}
}()
log.Printf("connecting to %s", wsCli.wsURL)
header := http.Header{}
wsConn, _, err := websocket.DefaultDialer.Dial(wsCli.wsURL, header)
if err != nil {
log.Println("Error: dial:", err)
GracefulExit()
}
defer wsConn.Close()
done := make(chan struct{})
go func() {
defer close(done)
for {
err = wsConn.SetReadDeadline(time.Now().UTC().Add(45 * time.Second))
if err != nil {
log.Println("Error: SetReadDeadline", err)
GracefulExit()
}
_, message, err := wsConn.ReadMessage()
if err != nil {
log.Println("Error: wsConn.ReadMessage:", err)
GracefulExit()
}
wsCli.parseMessage(message)
}
}()
for {
select {
case <-done:
return
case <-interrupt:
log.Println("interrupt")
// Cleanly close the connection by sending a close message and then
// waiting (with timeout) for the server to close the connection.
err := wsConn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
if err != nil {
log.Println("write close:", err)
return
}
select {
case <-done:
case <-time.After(time.Second):
}
return
}
}
}
|
[
"\"ELASTICSEARCH_HOST\"",
"\"ELASTICSEARCH_PORT\""
] |
[] |
[
"ELASTICSEARCH_PORT",
"ELASTICSEARCH_HOST"
] |
[]
|
["ELASTICSEARCH_PORT", "ELASTICSEARCH_HOST"]
|
go
| 2 | 0 | |
soracom/generated/cmd/event_handlers_list_for_subscriber.go
|
// Code generated by soracom-cli generate-cmd. DO NOT EDIT.
package cmd
import (
"net/url"
"os"
"github.com/spf13/cobra"
)
// EventHandlersListForSubscriberCmdImsi holds value of 'imsi' option
var EventHandlersListForSubscriberCmdImsi string
func init() {
EventHandlersListForSubscriberCmd.Flags().StringVar(&EventHandlersListForSubscriberCmdImsi, "imsi", "", TRAPI("imsi"))
EventHandlersCmd.AddCommand(EventHandlersListForSubscriberCmd)
}
// EventHandlersListForSubscriberCmd defines 'list-for-subscriber' subcommand
var EventHandlersListForSubscriberCmd = &cobra.Command{
Use: "list-for-subscriber",
Short: TRAPI("/event_handlers/subscribers/{imsi}:get:summary"),
Long: TRAPI(`/event_handlers/subscribers/{imsi}:get:description`),
RunE: func(cmd *cobra.Command, args []string) error {
opt := &apiClientOptions{
BasePath: "/v1",
Language: getSelectedLanguage(),
}
ac := newAPIClient(opt)
if v := os.Getenv("SORACOM_VERBOSE"); v != "" {
ac.SetVerbose(true)
}
err := authHelper(ac, cmd, args)
if err != nil {
cmd.SilenceUsage = true
return err
}
param, err := collectEventHandlersListForSubscriberCmdParams(ac)
if err != nil {
return err
}
body, err := ac.callAPI(param)
if err != nil {
cmd.SilenceUsage = true
return err
}
if body == "" {
return nil
}
if rawOutput {
_, err = os.Stdout.Write([]byte(body))
} else {
return prettyPrintStringAsJSON(body)
}
return err
},
}
func collectEventHandlersListForSubscriberCmdParams(ac *apiClient) (*apiParams, error) {
var parsedBody interface{}
var err error
err = checkIfRequiredStringParameterIsSupplied("imsi", "imsi", "path", parsedBody, EventHandlersListForSubscriberCmdImsi)
if err != nil {
return nil, err
}
return &apiParams{
method: "GET",
path: buildPathForEventHandlersListForSubscriberCmd("/event_handlers/subscribers/{imsi}"),
query: buildQueryForEventHandlersListForSubscriberCmd(),
noRetryOnError: noRetryOnError,
}, nil
}
func buildPathForEventHandlersListForSubscriberCmd(path string) string {
escapedImsi := url.PathEscape(EventHandlersListForSubscriberCmdImsi)
path = strReplace(path, "{"+"imsi"+"}", escapedImsi, -1)
return path
}
func buildQueryForEventHandlersListForSubscriberCmd() url.Values {
result := url.Values{}
return result
}
|
[
"\"SORACOM_VERBOSE\""
] |
[] |
[
"SORACOM_VERBOSE"
] |
[]
|
["SORACOM_VERBOSE"]
|
go
| 1 | 0 | |
daemon/daemon.go
|
// Package daemon exposes the functions that occur on the host server
// that the Docker daemon is running.
//
// In implementing the various functions of the daemon, there is often
// a method-specific struct for configuring the runtime behavior.
package daemon
import (
"fmt"
"io/ioutil"
"net"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
"github.com/Sirupsen/logrus"
containerd "github.com/docker/containerd/api/grpc/types"
"github.com/docker/docker/api"
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/daemon/discovery"
"github.com/docker/docker/daemon/events"
"github.com/docker/docker/daemon/exec"
"github.com/docker/docker/daemon/logger"
// register graph drivers
_ "github.com/docker/docker/daemon/graphdriver/register"
"github.com/docker/docker/daemon/initlayer"
"github.com/docker/docker/daemon/stats"
dmetadata "github.com/docker/docker/distribution/metadata"
"github.com/docker/docker/distribution/xfer"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/docker/libcontainerd"
"github.com/docker/docker/migrate/v1"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/pkg/registrar"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/truncindex"
"github.com/docker/docker/plugin"
refstore "github.com/docker/docker/reference"
"github.com/docker/docker/registry"
"github.com/docker/docker/runconfig"
volumedrivers "github.com/docker/docker/volume/drivers"
"github.com/docker/docker/volume/local"
"github.com/docker/docker/volume/store"
"github.com/docker/libnetwork"
"github.com/docker/libnetwork/cluster"
nwconfig "github.com/docker/libnetwork/config"
"github.com/docker/libtrust"
"github.com/pkg/errors"
)
var (
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "docker-runc"
errSystemNotSupported = errors.New("The Docker daemon is not supported on this platform.")
)
// Daemon holds information about the Docker daemon.
type Daemon struct {
ID string
repository string
containers container.Store
execCommands *exec.Store
referenceStore refstore.Store
downloadManager *xfer.LayerDownloadManager
uploadManager *xfer.LayerUploadManager
distributionMetadataStore dmetadata.Store
trustKey libtrust.PrivateKey
idIndex *truncindex.TruncIndex
configStore *config.Config
statsCollector *stats.Collector
defaultLogConfig containertypes.LogConfig
RegistryService registry.Service
EventsService *events.Events
netController libnetwork.NetworkController
volumes *store.VolumeStore
discoveryWatcher discovery.Reloader
root string
seccompEnabled bool
apparmorEnabled bool
shutdown bool
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
layerStore layer.Store
imageStore image.Store
PluginStore *plugin.Store // todo: remove
pluginManager *plugin.Manager
nameIndex *registrar.Registrar
linkIndex *linkIndex
containerd libcontainerd.Client
containerdRemote libcontainerd.Remote
defaultIsolation containertypes.Isolation // Default isolation mode on Windows
clusterProvider cluster.Provider
cluster Cluster
metricsPluginListener net.Listener
machineMemory uint64
seccompProfile []byte
seccompProfilePath string
diskUsageRunning int32
pruneRunning int32
}
// HasExperimental returns whether the experimental features of the daemon are enabled or not
func (daemon *Daemon) HasExperimental() bool {
if daemon.configStore != nil && daemon.configStore.Experimental {
return true
}
return false
}
func (daemon *Daemon) restore() error {
var (
currentDriver = daemon.GraphDriverName()
containers = make(map[string]*container.Container)
)
logrus.Info("Loading containers: start.")
dir, err := ioutil.ReadDir(daemon.repository)
if err != nil {
return err
}
for _, v := range dir {
id := v.Name()
container, err := daemon.load(id)
if err != nil {
logrus.Errorf("Failed to load container %v: %v", id, err)
continue
}
// Ignore the container if it does not support the current driver being used by the graph
if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver {
rwlayer, err := daemon.layerStore.GetRWLayer(container.ID)
if err != nil {
logrus.Errorf("Failed to load container mount %v: %v", id, err)
continue
}
container.RWLayer = rwlayer
logrus.Debugf("Loaded container %v", container.ID)
containers[container.ID] = container
} else {
logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
}
}
removeContainers := make(map[string]*container.Container)
restartContainers := make(map[*container.Container]chan struct{})
activeSandboxes := make(map[string]interface{})
for id, c := range containers {
if err := daemon.registerName(c); err != nil {
logrus.Errorf("Failed to register container %s: %s", c.ID, err)
delete(containers, id)
continue
}
daemon.Register(c)
// verify that all volumes valid and have been migrated from the pre-1.7 layout
if err := daemon.verifyVolumesInfo(c); err != nil {
// don't skip the container due to error
logrus.Errorf("Failed to verify volumes for container '%s': %v", c.ID, err)
}
// The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver.
// We should rewrite it to use the daemon defaults.
// Fixes https://github.com/docker/docker/issues/22536
if c.HostConfig.LogConfig.Type == "" {
if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil {
logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err)
continue
}
}
}
var wg sync.WaitGroup
var mapLock sync.Mutex
for _, c := range containers {
wg.Add(1)
go func(c *container.Container) {
defer wg.Done()
daemon.backportMountSpec(c)
if err := c.ToDiskLocking(); err != nil {
logrus.WithError(err).WithField("container", c.ID).Error("error saving backported mountspec to disk")
}
daemon.setStateCounter(c)
if c.IsRunning() || c.IsPaused() {
c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking
if err := daemon.containerd.Restore(c.ID, c.InitializeStdio); err != nil {
logrus.Errorf("Failed to restore %s with containerd: %s", c.ID, err)
return
}
// we call Mount and then Unmount to get BaseFs of the container
if err := daemon.Mount(c); err != nil {
// The mount is unlikely to fail. However, in case mount fails
// the container should be allowed to restore here. Some functionalities
// (like docker exec -u user) might be missing but container is able to be
// stopped/restarted/removed.
// See #29365 for related information.
// The error is only logged here.
logrus.Warnf("Failed to mount container on getting BaseFs path %v: %v", c.ID, err)
} else {
if err := daemon.Unmount(c); err != nil {
logrus.Warnf("Failed to umount container on getting BaseFs path %v: %v", c.ID, err)
}
}
c.ResetRestartManager(false)
if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() {
options, err := daemon.buildSandboxOptions(c)
if err != nil {
logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err)
}
mapLock.Lock()
activeSandboxes[c.NetworkSettings.SandboxID] = options
mapLock.Unlock()
}
}
// fixme: only if not running
// get list of containers we need to restart
if !c.IsRunning() && !c.IsPaused() {
// Do not autostart containers which
// has endpoints in a swarm scope
// network yet since the cluster is
// not initialized yet. We will start
// it after the cluster is
// initialized.
if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint {
mapLock.Lock()
restartContainers[c] = make(chan struct{})
mapLock.Unlock()
} else if c.HostConfig != nil && c.HostConfig.AutoRemove {
mapLock.Lock()
removeContainers[c.ID] = c
mapLock.Unlock()
}
}
if c.RemovalInProgress {
// We probably crashed in the middle of a removal, reset
// the flag.
//
// We DO NOT remove the container here as we do not
// know if the user had requested for either the
// associated volumes, network links or both to also
// be removed. So we put the container in the "dead"
// state and leave further processing up to them.
logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID)
c.ResetRemovalInProgress()
c.SetDead()
c.ToDisk()
}
}(c)
}
wg.Wait()
daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes)
if err != nil {
return fmt.Errorf("Error initializing network controller: %v", err)
}
// Now that all the containers are registered, register the links
for _, c := range containers {
if err := daemon.registerLinks(c, c.HostConfig); err != nil {
logrus.Errorf("failed to register link for container %s: %v", c.ID, err)
}
}
group := sync.WaitGroup{}
for c, notifier := range restartContainers {
group.Add(1)
go func(c *container.Container, chNotify chan struct{}) {
defer group.Done()
logrus.Debugf("Starting container %s", c.ID)
// ignore errors here as this is a best effort to wait for children to be
// running before we try to start the container
children := daemon.children(c)
timeout := time.After(5 * time.Second)
for _, child := range children {
if notifier, exists := restartContainers[child]; exists {
select {
case <-notifier:
case <-timeout:
}
}
}
// Make sure networks are available before starting
daemon.waitForNetworks(c)
if err := daemon.containerStart(c, "", "", true); err != nil {
logrus.Errorf("Failed to start container %s: %s", c.ID, err)
}
close(chNotify)
}(c, notifier)
}
group.Wait()
removeGroup := sync.WaitGroup{}
for id := range removeContainers {
removeGroup.Add(1)
go func(cid string) {
if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
logrus.Errorf("Failed to remove container %s: %s", cid, err)
}
removeGroup.Done()
}(id)
}
removeGroup.Wait()
// any containers that were started above would already have had this done,
// however we need to now prepare the mountpoints for the rest of the containers as well.
// This shouldn't cause any issue running on the containers that already had this run.
// This must be run after any containers with a restart policy so that containerized plugins
// can have a chance to be running before we try to initialize them.
for _, c := range containers {
// if the container has restart policy, do not
// prepare the mountpoints since it has been done on restarting.
// This is to speed up the daemon start when a restart container
// has a volume and the volume driver is not available.
if _, ok := restartContainers[c]; ok {
continue
} else if _, ok := removeContainers[c.ID]; ok {
// container is automatically removed, skip it.
continue
}
group.Add(1)
go func(c *container.Container) {
defer group.Done()
if err := daemon.prepareMountPoints(c); err != nil {
logrus.Error(err)
}
}(c)
}
group.Wait()
logrus.Info("Loading containers: done.")
return nil
}
// RestartSwarmContainers restarts any autostart container which has a
// swarm endpoint.
func (daemon *Daemon) RestartSwarmContainers() {
group := sync.WaitGroup{}
for _, c := range daemon.List() {
if !c.IsRunning() && !c.IsPaused() {
// Autostart all the containers which has a
// swarm endpoint now that the cluster is
// initialized.
if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint {
group.Add(1)
go func(c *container.Container) {
defer group.Done()
if err := daemon.containerStart(c, "", "", true); err != nil {
logrus.Error(err)
}
}(c)
}
}
}
group.Wait()
}
// waitForNetworks is used during daemon initialization when starting up containers
// It ensures that all of a container's networks are available before the daemon tries to start the container.
// In practice it just makes sure the discovery service is available for containers which use a network that require discovery.
func (daemon *Daemon) waitForNetworks(c *container.Container) {
if daemon.discoveryWatcher == nil {
return
}
// Make sure if the container has a network that requires discovery that the discovery service is available before starting
for netName := range c.NetworkSettings.Networks {
// If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready
// Most likely this is because the K/V store used for discovery is in a container and needs to be started
if _, err := daemon.netController.NetworkByName(netName); err != nil {
if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok {
continue
}
// use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host
// FIXME: why is this slow???
logrus.Debugf("Container %s waiting for network to be ready", c.Name)
select {
case <-daemon.discoveryWatcher.ReadyCh():
case <-time.After(60 * time.Second):
}
return
}
}
}
func (daemon *Daemon) children(c *container.Container) map[string]*container.Container {
return daemon.linkIndex.children(c)
}
// parents returns the names of the parent containers of the container
// with the given name.
func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container {
return daemon.linkIndex.parents(c)
}
func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error {
fullName := path.Join(parent.Name, alias)
if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil {
if err == registrar.ErrNameReserved {
logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err)
return nil
}
return err
}
daemon.linkIndex.link(parent, child, fullName)
return nil
}
// DaemonJoinsCluster informs the daemon has joined the cluster and provides
// the handler to query the cluster component
func (daemon *Daemon) DaemonJoinsCluster(clusterProvider cluster.Provider) {
daemon.setClusterProvider(clusterProvider)
}
// DaemonLeavesCluster informs the daemon has left the cluster
func (daemon *Daemon) DaemonLeavesCluster() {
// Daemon is in charge of removing the attachable networks with
// connected containers when the node leaves the swarm
daemon.clearAttachableNetworks()
// We no longer need the cluster provider, stop it now so that
// the network agent will stop listening to cluster events.
daemon.setClusterProvider(nil)
// Wait for the networking cluster agent to stop
daemon.netController.AgentStopWait()
// Daemon is in charge of removing the ingress network when the
// node leaves the swarm. Wait for job to be done or timeout.
// This is called also on graceful daemon shutdown. We need to
// wait, because the ingress release has to happen before the
// network controller is stopped.
if done, err := daemon.ReleaseIngress(); err == nil {
select {
case <-done:
case <-time.After(5 * time.Second):
logrus.Warnf("timeout while waiting for ingress network removal")
}
} else {
logrus.Warnf("failed to initiate ingress network removal: %v", err)
}
}
// setClusterProvider sets a component for querying the current cluster state.
func (daemon *Daemon) setClusterProvider(clusterProvider cluster.Provider) {
daemon.clusterProvider = clusterProvider
daemon.netController.SetClusterProvider(clusterProvider)
}
// IsSwarmCompatible verifies if the current daemon
// configuration is compatible with the swarm mode
func (daemon *Daemon) IsSwarmCompatible() error {
if daemon.configStore == nil {
return nil
}
return daemon.configStore.IsSwarmCompatible()
}
// NewDaemon sets up everything for the daemon to be able to service
// requests from the webserver.
func NewDaemon(config *config.Config, registryService registry.Service, containerdRemote libcontainerd.Remote, pluginStore *plugin.Store) (daemon *Daemon, err error) {
setDefaultMtu(config)
// Ensure that we have a correct root key limit for launching containers.
if err := ModifyRootKeyLimit(); err != nil {
logrus.Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err)
}
// Ensure we have compatible and valid configuration options
if err := verifyDaemonSettings(config); err != nil {
return nil, err
}
// Do we have a disabled network?
config.DisableBridge = isBridgeNetworkDisabled(config)
// Verify the platform is supported as a daemon
if !platformSupported {
return nil, errSystemNotSupported
}
// Validate platform-specific requirements
if err := checkSystem(); err != nil {
return nil, err
}
uidMaps, gidMaps, err := setupRemappedRoot(config)
if err != nil {
return nil, err
}
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
return nil, err
}
if err := setupDaemonProcess(config); err != nil {
return nil, err
}
// set up the tmpDir to use a canonical path
tmp, err := prepareTempDir(config.Root, rootUID, rootGID)
if err != nil {
return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err)
}
realTmp, err := fileutils.ReadSymlinkedDirectory(tmp)
if err != nil {
return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err)
}
os.Setenv("TMPDIR", realTmp)
d := &Daemon{configStore: config}
// Ensure the daemon is properly shutdown if there is a failure during
// initialization
defer func() {
if err != nil {
if err := d.Shutdown(); err != nil {
logrus.Error(err)
}
}
}()
// set up SIGUSR1 handler on Unix-like systems, or a Win32 global event
// on Windows to dump Go routine stacks
stackDumpDir := config.Root
if execRoot := config.GetExecRoot(); execRoot != "" {
stackDumpDir = execRoot
}
d.setupDumpStackTrap(stackDumpDir)
if err := d.setupSeccompProfile(); err != nil {
return nil, err
}
// Set the default isolation mode (only applicable on Windows)
if err := d.setDefaultIsolation(); err != nil {
return nil, fmt.Errorf("error setting default isolation mode: %v", err)
}
logrus.Debugf("Using default logging driver %s", config.LogConfig.Type)
if err := configureMaxThreads(config); err != nil {
logrus.Warnf("Failed to configure golang's threads limit: %v", err)
}
if err := ensureDefaultAppArmorProfile(); err != nil {
logrus.Errorf(err.Error())
}
daemonRepo := filepath.Join(config.Root, "containers")
if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
return nil, err
}
if runtime.GOOS == "windows" {
if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0); err != nil && !os.IsExist(err) {
return nil, err
}
}
driverName := os.Getenv("DOCKER_DRIVER")
if driverName == "" {
driverName = config.GraphDriver
}
d.RegistryService = registryService
d.PluginStore = pluginStore
logger.RegisterPluginGetter(d.PluginStore)
metricsSockPath, err := d.listenMetricsSock()
if err != nil {
return nil, err
}
registerMetricsPluginCallback(d.PluginStore, metricsSockPath)
// Plugin system initialization should happen before restore. Do not change order.
d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{
Root: filepath.Join(config.Root, "plugins"),
ExecRoot: getPluginExecRoot(config.Root),
Store: d.PluginStore,
Executor: containerdRemote,
RegistryService: registryService,
LiveRestoreEnabled: config.LiveRestoreEnabled,
LogPluginEvent: d.LogPluginEvent, // todo: make private
AuthzMiddleware: config.AuthzMiddleware,
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create plugin manager")
}
d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{
StorePath: config.Root,
MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"),
GraphDriver: driverName,
GraphDriverOptions: config.GraphOptions,
UIDMaps: uidMaps,
GIDMaps: gidMaps,
PluginGetter: d.PluginStore,
ExperimentalEnabled: config.Experimental,
})
if err != nil {
return nil, err
}
graphDriver := d.layerStore.DriverName()
imageRoot := filepath.Join(config.Root, "image", graphDriver)
// Configure and validate the kernels security support
if err := configureKernelSecuritySupport(config, graphDriver); err != nil {
return nil, err
}
logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads)
d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, *config.MaxConcurrentDownloads)
logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads)
d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads)
ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb"))
if err != nil {
return nil, err
}
d.imageStore, err = image.NewImageStore(ifs, d.layerStore)
if err != nil {
return nil, err
}
// Configure the volumes driver
volStore, err := d.configureVolumes(rootUID, rootGID)
if err != nil {
return nil, err
}
trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath)
if err != nil {
return nil, err
}
trustDir := filepath.Join(config.Root, "trust")
if err := system.MkdirAll(trustDir, 0700); err != nil {
return nil, err
}
distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution"))
if err != nil {
return nil, err
}
eventsService := events.New()
referenceStore, err := refstore.NewReferenceStore(filepath.Join(imageRoot, "repositories.json"))
if err != nil {
return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err)
}
migrationStart := time.Now()
if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil {
logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err)
}
logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds())
// Discovery is only enabled when the daemon is launched with an address to advertise. When
// initialized, the daemon is registered and we can store the discovery backend as it's read-only
if err := d.initDiscovery(config); err != nil {
return nil, err
}
sysInfo := sysinfo.New(false)
// Check if Devices cgroup is mounted, it is hard requirement for container security,
// on Linux.
if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled {
return nil, errors.New("Devices cgroup isn't mounted")
}
d.ID = trustKey.PublicKey().KeyID()
d.repository = daemonRepo
d.containers = container.NewMemoryStore()
d.execCommands = exec.NewStore()
d.referenceStore = referenceStore
d.distributionMetadataStore = distributionMetadataStore
d.trustKey = trustKey
d.idIndex = truncindex.NewTruncIndex([]string{})
d.statsCollector = d.newStatsCollector(1 * time.Second)
d.defaultLogConfig = containertypes.LogConfig{
Type: config.LogConfig.Type,
Config: config.LogConfig.Config,
}
d.EventsService = eventsService
d.volumes = volStore
d.root = config.Root
d.uidMaps = uidMaps
d.gidMaps = gidMaps
d.seccompEnabled = sysInfo.Seccomp
d.apparmorEnabled = sysInfo.AppArmor
d.nameIndex = registrar.NewRegistrar()
d.linkIndex = newLinkIndex()
d.containerdRemote = containerdRemote
go d.execCommandGC()
d.containerd, err = containerdRemote.Client(d)
if err != nil {
return nil, err
}
if err := d.restore(); err != nil {
return nil, err
}
// FIXME: this method never returns an error
info, _ := d.SystemInfo()
engineInfo.WithValues(
dockerversion.Version,
dockerversion.GitCommit,
info.Architecture,
info.Driver,
info.KernelVersion,
info.OperatingSystem,
info.OSType,
info.ID,
).Set(1)
engineCpus.Set(float64(info.NCPU))
engineMemory.Set(float64(info.MemTotal))
return d, nil
}
func (daemon *Daemon) shutdownContainer(c *container.Container) error {
stopTimeout := c.StopTimeout()
// TODO(windows): Handle docker restart with paused containers
if c.IsPaused() {
// To terminate a process in freezer cgroup, we should send
// SIGTERM to this process then unfreeze it, and the process will
// force to terminate immediately.
logrus.Debugf("Found container %s is paused, sending SIGTERM before unpausing it", c.ID)
sig, ok := signal.SignalMap["TERM"]
if !ok {
return errors.New("System does not support SIGTERM")
}
if err := daemon.kill(c, int(sig)); err != nil {
return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err)
}
if err := daemon.containerUnpause(c); err != nil {
return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err)
}
if _, err := c.WaitStop(time.Duration(stopTimeout) * time.Second); err != nil {
logrus.Debugf("container %s failed to exit in %d second of SIGTERM, sending SIGKILL to force", c.ID, stopTimeout)
sig, ok := signal.SignalMap["KILL"]
if !ok {
return errors.New("System does not support SIGKILL")
}
if err := daemon.kill(c, int(sig)); err != nil {
logrus.Errorf("Failed to SIGKILL container %s", c.ID)
}
c.WaitStop(-1 * time.Second)
return err
}
}
// If container failed to exit in stopTimeout seconds of SIGTERM, then using the force
if err := daemon.containerStop(c, stopTimeout); err != nil {
return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err)
}
c.WaitStop(-1 * time.Second)
return nil
}
// ShutdownTimeout returns the shutdown timeout based on the max stopTimeout of the containers,
// and is limited by daemon's ShutdownTimeout.
func (daemon *Daemon) ShutdownTimeout() int {
// By default we use daemon's ShutdownTimeout.
shutdownTimeout := daemon.configStore.ShutdownTimeout
graceTimeout := 5
if daemon.containers != nil {
for _, c := range daemon.containers.List() {
if shutdownTimeout >= 0 {
stopTimeout := c.StopTimeout()
if stopTimeout < 0 {
shutdownTimeout = -1
} else {
if stopTimeout+graceTimeout > shutdownTimeout {
shutdownTimeout = stopTimeout + graceTimeout
}
}
}
}
}
return shutdownTimeout
}
// Shutdown stops the daemon.
func (daemon *Daemon) Shutdown() error {
daemon.shutdown = true
// Keep mounts and networking running on daemon shutdown if
// we are to keep containers running and restore them.
if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil {
// check if there are any running containers, if none we should do some cleanup
if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil {
// metrics plugins still need some cleanup
daemon.cleanupMetricsPlugins()
return nil
}
}
if daemon.containers != nil {
logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.configStore.ShutdownTimeout)
daemon.containers.ApplyAll(func(c *container.Container) {
if !c.IsRunning() {
return
}
logrus.Debugf("stopping %s", c.ID)
if err := daemon.shutdownContainer(c); err != nil {
logrus.Errorf("Stop container error: %v", err)
return
}
if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil {
daemon.cleanupMountsByID(mountid)
}
logrus.Debugf("container stopped %s", c.ID)
})
}
if daemon.volumes != nil {
if err := daemon.volumes.Shutdown(); err != nil {
logrus.Errorf("Error shutting down volume store: %v", err)
}
}
if daemon.layerStore != nil {
if err := daemon.layerStore.Cleanup(); err != nil {
logrus.Errorf("Error during layer Store.Cleanup(): %v", err)
}
}
// If we are part of a cluster, clean up cluster's stuff
if daemon.clusterProvider != nil {
logrus.Debugf("start clean shutdown of cluster resources...")
daemon.DaemonLeavesCluster()
}
daemon.cleanupMetricsPlugins()
// Shutdown plugins after containers and layerstore. Don't change the order.
daemon.pluginShutdown()
// trigger libnetwork Stop only if it's initialized
if daemon.netController != nil {
daemon.netController.Stop()
}
if err := daemon.cleanupMounts(); err != nil {
return err
}
return nil
}
// Mount sets container.BaseFS
// (is it not set coming in? why is it unset?)
func (daemon *Daemon) Mount(container *container.Container) error {
dir, err := container.RWLayer.Mount(container.GetMountLabel())
if err != nil {
return err
}
logrus.Debugf("container mounted via layerStore: %v", dir)
if container.BaseFS != dir {
// The mount path reported by the graph driver should always be trusted on Windows, since the
// volume path for a given mounted layer may change over time. This should only be an error
// on non-Windows operating systems.
if container.BaseFS != "" && runtime.GOOS != "windows" {
daemon.Unmount(container)
return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
daemon.GraphDriverName(), container.ID, container.BaseFS, dir)
}
}
container.BaseFS = dir // TODO: combine these fields
return nil
}
// Unmount unsets the container base filesystem
func (daemon *Daemon) Unmount(container *container.Container) error {
if err := container.RWLayer.Unmount(); err != nil {
logrus.Errorf("Error unmounting container %s: %s", container.ID, err)
return err
}
return nil
}
// Subnets return the IPv4 and IPv6 subnets of networks that are manager by Docker.
func (daemon *Daemon) Subnets() ([]net.IPNet, []net.IPNet) {
var v4Subnets []net.IPNet
var v6Subnets []net.IPNet
managedNetworks := daemon.netController.Networks()
for _, managedNetwork := range managedNetworks {
v4infos, v6infos := managedNetwork.Info().IpamInfo()
for _, info := range v4infos {
if info.IPAMData.Pool != nil {
v4Subnets = append(v4Subnets, *info.IPAMData.Pool)
}
}
for _, info := range v6infos {
if info.IPAMData.Pool != nil {
v6Subnets = append(v6Subnets, *info.IPAMData.Pool)
}
}
}
return v4Subnets, v6Subnets
}
// GraphDriverName returns the name of the graph driver used by the layer.Store
func (daemon *Daemon) GraphDriverName() string {
return daemon.layerStore.DriverName()
}
// GetUIDGIDMaps returns the current daemon's user namespace settings
// for the full uid and gid maps which will be applied to containers
// started in this instance.
func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) {
return daemon.uidMaps, daemon.gidMaps
}
// GetRemappedUIDGID returns the current daemon's uid and gid values
// if user namespaces are in use for this daemon instance. If not
// this function will return "real" root values of 0, 0.
func (daemon *Daemon) GetRemappedUIDGID() (int, int) {
uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps)
return uid, gid
}
// prepareTempDir prepares and returns the default directory to use
// for temporary files.
// If it doesn't exist, it is created. If it exists, its content is removed.
func prepareTempDir(rootDir string, rootUID, rootGID int) (string, error) {
var tmpDir string
if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" {
tmpDir = filepath.Join(rootDir, "tmp")
newName := tmpDir + "-old"
if err := os.Rename(tmpDir, newName); err == nil {
go func() {
if err := os.RemoveAll(newName); err != nil {
logrus.Warnf("failed to delete old tmp directory: %s", newName)
}
}()
} else {
logrus.Warnf("failed to rename %s for background deletion: %s. Deleting synchronously", tmpDir, err)
if err := os.RemoveAll(tmpDir); err != nil {
logrus.Warnf("failed to delete old tmp directory: %s", tmpDir)
}
}
}
// We don't remove the content of tmpdir if it's not the default,
// it may hold things that do not belong to us.
return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID)
}
func (daemon *Daemon) setupInitLayer(initPath string) error {
rootUID, rootGID := daemon.GetRemappedUIDGID()
return initlayer.Setup(initPath, rootUID, rootGID)
}
func setDefaultMtu(conf *config.Config) {
// do nothing if the config does not have the default 0 value.
if conf.Mtu != 0 {
return
}
conf.Mtu = config.DefaultNetworkMtu
}
func (daemon *Daemon) configureVolumes(rootUID, rootGID int) (*store.VolumeStore, error) {
volumesDriver, err := local.New(daemon.configStore.Root, rootUID, rootGID)
if err != nil {
return nil, err
}
volumedrivers.RegisterPluginGetter(daemon.PluginStore)
if !volumedrivers.Register(volumesDriver, volumesDriver.Name()) {
return nil, errors.New("local volume driver could not be registered")
}
return store.New(daemon.configStore.Root)
}
// IsShuttingDown tells whether the daemon is shutting down or not
func (daemon *Daemon) IsShuttingDown() bool {
return daemon.shutdown
}
// initDiscovery initializes the discovery watcher for this daemon.
func (daemon *Daemon) initDiscovery(conf *config.Config) error {
advertise, err := config.ParseClusterAdvertiseSettings(conf.ClusterStore, conf.ClusterAdvertise)
if err != nil {
if err == discovery.ErrDiscoveryDisabled {
return nil
}
return err
}
conf.ClusterAdvertise = advertise
discoveryWatcher, err := discovery.Init(conf.ClusterStore, conf.ClusterAdvertise, conf.ClusterOpts)
if err != nil {
return fmt.Errorf("discovery initialization failed (%v)", err)
}
daemon.discoveryWatcher = discoveryWatcher
return nil
}
func isBridgeNetworkDisabled(conf *config.Config) bool {
return conf.BridgeConfig.Iface == config.DisableNetworkBridge
}
func (daemon *Daemon) networkOptions(dconfig *config.Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) {
options := []nwconfig.Option{}
if dconfig == nil {
return options, nil
}
options = append(options, nwconfig.OptionExperimental(dconfig.Experimental))
options = append(options, nwconfig.OptionDataDir(dconfig.Root))
options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot()))
dd := runconfig.DefaultDaemonNetworkMode()
dn := runconfig.DefaultDaemonNetworkMode().NetworkName()
options = append(options, nwconfig.OptionDefaultDriver(string(dd)))
options = append(options, nwconfig.OptionDefaultNetwork(dn))
if strings.TrimSpace(dconfig.ClusterStore) != "" {
kv := strings.Split(dconfig.ClusterStore, "://")
if len(kv) != 2 {
return nil, errors.New("kv store daemon config must be of the form KV-PROVIDER://KV-URL")
}
options = append(options, nwconfig.OptionKVProvider(kv[0]))
options = append(options, nwconfig.OptionKVProviderURL(kv[1]))
}
if len(dconfig.ClusterOpts) > 0 {
options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts))
}
if daemon.discoveryWatcher != nil {
options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher))
}
if dconfig.ClusterAdvertise != "" {
options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise))
}
options = append(options, nwconfig.OptionLabels(dconfig.Labels))
options = append(options, driverOptions(dconfig)...)
if daemon.configStore != nil && daemon.configStore.LiveRestoreEnabled && len(activeSandboxes) != 0 {
options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes))
}
if pg != nil {
options = append(options, nwconfig.OptionPluginGetter(pg))
}
return options, nil
}
func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry {
out := make([]types.BlkioStatEntry, len(entries))
for i, re := range entries {
out[i] = types.BlkioStatEntry{
Major: re.Major,
Minor: re.Minor,
Op: re.Op,
Value: re.Value,
}
}
return out
}
// GetCluster returns the cluster
func (daemon *Daemon) GetCluster() Cluster {
return daemon.cluster
}
// SetCluster sets the cluster
func (daemon *Daemon) SetCluster(cluster Cluster) {
daemon.cluster = cluster
}
func (daemon *Daemon) pluginShutdown() {
manager := daemon.pluginManager
// Check for a valid manager object. In error conditions, daemon init can fail
// and shutdown called, before plugin manager is initialized.
if manager != nil {
manager.Shutdown()
}
}
// PluginManager returns current pluginManager associated with the daemon
func (daemon *Daemon) PluginManager() *plugin.Manager { // set up before daemon to avoid this method
return daemon.pluginManager
}
// PluginGetter returns current pluginStore associated with the daemon
func (daemon *Daemon) PluginGetter() *plugin.Store {
return daemon.PluginStore
}
// CreateDaemonRoot creates the root for the daemon
func CreateDaemonRoot(config *config.Config) error {
// get the canonical path to the Docker root directory
var realRoot string
if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) {
realRoot = config.Root
} else {
realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root)
if err != nil {
return fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err)
}
}
uidMaps, gidMaps, err := setupRemappedRoot(config)
if err != nil {
return err
}
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
return err
}
if err := setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil {
return err
}
return nil
}
|
[
"\"DOCKER_DRIVER\"",
"\"DOCKER_TMPDIR\""
] |
[] |
[
"DOCKER_DRIVER",
"DOCKER_TMPDIR"
] |
[]
|
["DOCKER_DRIVER", "DOCKER_TMPDIR"]
|
go
| 2 | 0 | |
var/spack/repos/builtin/packages/gnuplot/package.py
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
class Gnuplot(AutotoolsPackage):
"""Gnuplot is a portable command-line driven graphing utility for Linux,
OS/2, MS Windows, OSX, VMS, and many other platforms. The source
code is copyrighted but freely distributed (i.e., you don't have
to pay for it). It was originally created to allow scientists and
students to visualize mathematical functions and data
interactively, but has grown to support many non-interactive uses
such as web scripting. It is also used as a plotting engine by
third-party applications like Octave. Gnuplot has been supported
and under active development since 1986
"""
homepage = "http://www.gnuplot.info"
url = "http://downloads.sourceforge.net/project/gnuplot/gnuplot/5.0.6/gnuplot-5.0.6.tar.gz"
# There is a conflict in term.h between gnuplot and ncurses, which is a
# dependency of readline. Fix it with a small patch
patch('term_include.patch')
version('5.2.7', sha256='97fe503ff3b2e356fe2ae32203fc7fd2cf9cef1f46b60fe46dc501a228b9f4ed')
version('5.2.5', sha256='039db2cce62ddcfd31a6696fe576f4224b3bc3f919e66191dfe2cdb058475caa')
version('5.2.2', sha256='a416d22f02bdf3873ef82c5eb7f8e94146795811ef808e12b035ada88ef7b1a1')
version('5.2.0', sha256='7dfe6425a1a6b9349b1fb42dae46b2e52833b13e807a78a613024d6a99541e43')
version('5.0.7', sha256='0ad760ff013b4a9cf29853fa9b50c50030a33cd8fb86220a23abb466655136fc')
version('5.0.6', sha256='5bbe4713e555c2e103b7d4ffd45fca69551fff09cf5c3f9cb17428aaacc9b460')
version('5.0.5', sha256='25f3e0bf192e01115c580f278c3725d7a569eb848786e12b455a3fda70312053')
version('5.0.1', sha256='7cbc557e71df581ea520123fb439dea5f073adcc9010a2885dc80d4ed28b3c47')
variant('wx', default=False,
description='Activates wxWidgets terminal')
variant('gd', default=True,
description='Activates gd based terminal')
variant('cairo', default=True,
description='Activates cairo based terminal')
variant('X', default=False,
description='Build with X11')
variant('libcerf', default=True,
description='Build with libcerf support')
variant('pbm', default=False,
description='Enable PBM (Portable Bit Map) and other older bitmap terminals') # NOQA: ignore=E501
variant('qt', default=False,
description='Build with QT')
# required dependencies
depends_on('readline')
depends_on('pkgconfig', type='build')
depends_on('libxpm')
depends_on('libiconv')
# optional dependencies:
depends_on('libcerf', when='+libcerf')
depends_on('libgd', when='+gd')
depends_on('[email protected]:', when='+cairo')
depends_on('wxwidgets', when='+wx')
depends_on('[email protected]:', when='+wx')
depends_on('[email protected]:', when='+cairo')
depends_on('libx11', when='+X')
depends_on('[email protected]:+opengl', when='+qt')
depends_on('qt+framework', when='+qt platform=darwin')
def configure_args(self):
# see https://github.com/Homebrew/homebrew-core/blob/master/Formula/gnuplot.rb
# and https://github.com/macports/macports-ports/blob/master/math/gnuplot/Portfile
spec = self.spec
options = [
'--disable-dependency-tracking',
'--disable-silent-rules',
# Per upstream: "--with-tutorial is horribly out of date."
'--without-tutorial',
'--with-readline=%s' % spec['readline'].prefix
]
if '+pbm' in spec:
options.append('--with-bitmap-terminals')
else:
options.append('--without-bitmap-terminals')
if '+X' in spec:
# It seems there's an open bug for wxWidgets support
# See : http://sourceforge.net/p/gnuplot/bugs/1694/
os.environ['TERMLIBS'] = '-lX11'
options.append('--with-x')
else:
options.append('--without-x')
if '+qt' in spec:
options.append('--with-qt=qt5')
# QT needs C++11 compiler:
os.environ['CXXFLAGS'] = '{0}'.format(self.compiler.cxx11_flag)
if spec.satisfies('platform=darwin'):
qt_path = spec['qt'].prefix
# see
# http://gnuplot.10905.n7.nabble.com/Building-with-Qt-depends-on-pkg-config-Qt-5-term-doesn-t-work-on-OS-X-td18063.html
os.environ['QT_LIBS'] = (
'-F{0}/lib ' +
'-framework QtCore ' +
'-framework QtGui ' +
'-framework QtWidgets ' +
'-framework QtNetwork ' +
'-framework QtSvg ' +
'-framework QtPrintSupport').format(qt_path)
os.environ['QT_CFLAGS'] = (
'-F{0}/lib ' +
'-I{0}/lib/QtCore.framework/Headers ' +
'-I{0}/lib/QtGui.framework/Headers ' +
'-I{0}/lib/QtWidgets.framework/Headers ' +
'-I{0}/lib/QtNetwork.framework/Headers ' +
'-I{0}/lib/QtSvg.framework/Headers').format(qt_path)
else:
options.append('--with-qt=no')
if '+wx' in spec:
options.append('--with-wx=%s' % spec['wx'].prefix)
else:
options.append('--disable-wxwidgets')
if '+gd' in spec:
options.append('--with-gd=%s' % spec['libgd'].prefix)
else:
options.append('--without-gd')
if '+cairo' in spec:
options.append('--with-cairo')
else:
options.append('--without-cairo')
if '+libcerf' in spec:
options.append('--with-libcerf')
else:
options.append('--without-libcerf')
# TODO: Enable pdflib-based pdf terminal
# '--with-pdf=%s' % spec['pdflib-lite'].prefix (or pdflib)
options.append('--without-pdf')
# TODO: Enable lua-based terminals
options.append('--without-lua')
# TODO: --with-latex
options.append('--without-latex')
# TODO: --with-aquaterm depends_on('aquaterm')
options.append('--without-aquaterm')
return options
|
[] |
[] |
[
"QT_CFLAGS",
"TERMLIBS",
"CXXFLAGS",
"QT_LIBS"
] |
[]
|
["QT_CFLAGS", "TERMLIBS", "CXXFLAGS", "QT_LIBS"]
|
python
| 4 | 0 | |
main.go
|
package main
import (
"crypto/x509"
"encoding/pem"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/fsouza/go-dockerclient"
log "github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/terminal"
"golang.org/x/crypto/ssh/agent"
"gopkg.in/alecthomas/kingpin.v2"
)
type Config struct {
Hostname *string
Username *string
DockerSock *string
KeyPath *string
Ports *[]string
LocalAddress *string
ImageName *string
ContainerID *string
SSHKeyPath *string
SSHPort *string
ForwardEPMD *bool
Debug *bool
}
// configure parses the passed command line options and populates the Config
// struct.
func configure() *Config {
var opts Config
username := os.Getenv("USER")
homeDir := os.Getenv("HOME")
keyPath := filepath.Join(homeDir, ".ssh", "id_rsa")
opts.Hostname = kingpin.Flag("hostname", "The remote hostname to connect to").Required().Short('h').String()
opts.Ports = kingpin.Flag("port", "The local:remote port to connect to. e.g. 8080:80").Default("8080:80").Short('p').Strings()
opts.LocalAddress = kingpin.Flag("local-address", "The local IP address to listen on").Default("127.0.0.1").Short('a').String()
opts.Username = kingpin.Flag("username", "The ssh username on the remote host").Default(username).Short('l').String()
opts.DockerSock = kingpin.Flag("docker-sock", "The Docker socket address on the remote host").Default("unix:///var/run/docker.sock").Short('s').String()
opts.ImageName = kingpin.Flag("image-name", "The Docker image to match on for this application").Short('n').String()
opts.ContainerID = kingpin.Flag("container-id", "The Docker container ID to match for this application").Short('c').String()
opts.SSHKeyPath = kingpin.Flag("ssh-key", "Path to the ssh private key to use").Default(keyPath).Short('i').String()
opts.SSHPort = kingpin.Flag("ssh-port", "Port to connect to ssh on the remote host").Default("22").Short('P').String()
opts.ForwardEPMD = kingpin.Flag("forward-epmd", "Shall we also forward the EPMD port?").Default("true").Short('e').Bool()
opts.Debug = kingpin.Flag("debug", "Turn on debug logging").Default("false").Short('d').Bool()
kingpin.Parse()
// We have to have gotten at least one of these
if (len(*opts.ContainerID) < 1 && len(*opts.ImageName) < 1) || (len(*opts.ContainerID) > 0 && len(*opts.ImageName) > 0) {
kingpin.FatalUsage("You must supply either image-name or container-id")
}
return &opts
}
// decrypt decodes SSH keys using the supplied passphrase
func decrypt(key []byte, passphrase []byte) []byte {
block, rest := pem.Decode(key)
if len(rest) > 0 {
log.Fatalf("Extra data included in key")
}
if x509.IsEncryptedPEMBlock(block) {
der, err := x509.DecryptPEMBlock(block, passphrase)
if err != nil {
log.Fatalf("Decrypt failed: %v", err)
}
return pem.EncodeToMemory(&pem.Block{Type: block.Type, Bytes: der})
}
return key
}
// sshDialer is a wrapper to let the Docker client library make calls
// over the SSH tunnel to the remote Unix socket.
type sshDialer struct {
Client *ssh.Client
}
func (d *sshDialer) Dial(ignored, socketPath string) (net.Conn, error) {
var proto string
if strings.HasPrefix(socketPath, "tcp") {
proto = "tcp"
} else {
proto = "unix"
}
c, err := d.Client.Dial(proto, socketPath)
if err != nil {
return nil, fmt.Errorf("Dial error: %s", err)
}
return c, nil
}
// findContainerByImageName takes a Docker container image name and looks
// for a match on the remote host. It returns the last matching container.
func findContainerByImageName(client *docker.Client, name string) (*docker.APIContainers, error) {
containers, err := client.ListContainers(docker.ListContainersOptions{All: false})
if err != nil {
return nil, fmt.Errorf("Unable to find container: %s", err)
}
var lastContainer docker.APIContainers
for _, cntnr := range containers {
if strings.Contains(cntnr.Image, name) {
lastContainer = cntnr
log.Info("Found matching container:")
log.Infof(" - id: %s", cntnr.ID[0:12])
log.Infof(" - image: %s", cntnr.Image)
log.Infof(" - name: %s", cntnr.Names[0])
log.Infof(" - up: %s", time.Now().UTC().Sub(time.Unix(cntnr.Created, 0)))
}
}
if len(lastContainer.ID) > 0 {
return &lastContainer, nil
}
return nil, fmt.Errorf("Unable to match container image: %s", name)
}
// findContainerByID takes either a long or short container ID and
// matches the remote container by that.
func findContainerByID(client *docker.Client, id string) (*docker.APIContainers, error) {
containers, err := client.ListContainers(docker.ListContainersOptions{All: false})
if err != nil {
return nil, fmt.Errorf("Unable to find container: %s", err)
}
for _, cntnr := range containers {
if cntnr.ID[0:12] == id[0:12] {
return &cntnr, nil
}
}
return nil, fmt.Errorf("Unable to match container ID: %s", id)
}
func findIPForContainer(client *docker.Client, cntnr *docker.APIContainers) (string, error) {
container, err := client.InspectContainer(cntnr.ID)
if err != nil {
return "", fmt.Errorf("Unable to inspect container: %s", err)
}
ip := container.NetworkSettings.IPAddress
log.Infof("Container IP address: %s", ip)
return ip, nil
}
// proxyPort start up a listener and begins proxying all TCP requests
// to the specified address and port.
func proxyPort(client *ssh.Client, localAddress string, ip string, port string) {
ports := strings.Split(port, ":")
localPort, err1 := strconv.Atoi(ports[0])
remotePort, err2 := strconv.Atoi(ports[1])
if err1 != nil || err2 != nil {
log.Fatal("Ports must be of the form <local>:<remote> where both are integers")
}
localAddr := &net.TCPAddr{IP: net.ParseIP(localAddress), Port: localPort}
remoteAddr := &net.TCPAddr{IP: net.ParseIP(ip), Port: remotePort}
proxy, err := NewTCPProxy(localAddr, remoteAddr, client)
if err != nil {
// OK to log Fatal here, we want to do from both goroutines
log.Fatalf("Unable to start TCP proxy for %d -> %d: %s", localPort, remotePort, err)
}
proxy.Run()
}
// readPassphrase uses the SSH terminal input function to take the
// passphrase for the key from the user on the command line.
func readPassphrase(keyPath string) []byte {
fmt.Printf("passphrase (%s): ", keyPath)
passphrase, err := terminal.ReadPassword(int(syscall.Stdin))
if err != nil {
log.Fatalf("Unable to read passphrase: %s", err)
}
fmt.Println()
return passphrase
}
func banner() {
banner := `
_ _ _ _
| | (_) | | | |
___| |__ _ _ __ ___ _ __ ___ | |_| |_ ___ _ __
/ __| '_ \| | '_ \/ __| '_ \ / _ \| __| __/ _ \ '__|
\__ \ | | | | |_) \__ \ |_) | (_) | |_| || __/ |
|___/_| |_|_| .__/|___/ .__/ \___/ \__|\__\___|_|
| | | |
|_| |_|
`
fmt.Println(banner)
}
func configureDockerClient(config *Config, client *ssh.Client) *docker.Client {
dialer := &sshDialer{
Client: client,
}
// Configure a new Docker client
dockerCli, err := docker.NewClient(*config.DockerSock)
if err != nil {
log.Fatalf("Unable to create new Docker client: %s", err)
}
// Override Dialer to use our SSH-proxied socket
dockerCli.Dialer = dialer
return dockerCli
}
func connectSSH(config *Config) *ssh.Client {
client, err := connectWithAgent(config)
if client == nil && err == nil {
client, err := connectWithKey(config)
if err != nil {
log.Fatalf("Failed ssh agent and RSA key auth: ", err)
}
return client
}
if err != nil {
log.Fatalf("Looks like ssh agent is available, but got error:", err)
}
return client
}
func connectWithAgent(config *Config) (*ssh.Client, error) {
socket := os.Getenv("SSH_AUTH_SOCK")
if len(socket) < 1 {
return nil, nil
}
conn, err := net.Dial("unix", socket)
if err != nil {
return nil, fmt.Errorf("Unable to talk to ssh agent: %s", err)
}
agentClient := agent.NewClient(conn)
sshConfig := &ssh.ClientConfig{
User: *config.Username,
Auth: []ssh.AuthMethod{
// Use a callback rather than PublicKeys
// so we only consult the agent once the remote server
// wants it.
ssh.PublicKeysCallback(agentClient.Signers),
},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
hostAddr := *config.Hostname+":"+*config.SSHPort
// Connect to the remote server and perform the SSH handshake.
client, err := ssh.Dial("tcp", hostAddr, sshConfig)
if err != nil {
return nil, fmt.Errorf("unable to connect: %v", err)
}
return client, nil
}
// connects to a remote SSH server using the supplied
// key and passphrase.
func connectWithKey(config *Config) (*ssh.Client, error) {
passphrase := readPassphrase(*config.SSHKeyPath)
key, err := ioutil.ReadFile(*config.SSHKeyPath)
if err != nil {
return nil, fmt.Errorf("unable to read private key: %v", err)
}
key = decrypt(key, passphrase)
// Create the Signer for this private key.
signer, err := ssh.ParsePrivateKey(key)
if err != nil {
return nil, fmt.Errorf("unable to parse private key: %v", err)
}
sshConfig := &ssh.ClientConfig{
User: *config.Username,
Auth: []ssh.AuthMethod{
// Use the PublicKeys method for remote authentication.
ssh.PublicKeys(signer),
},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
hostAddr := *config.Hostname+":"+*config.SSHPort
// Connect to the remote server and perform the SSH handshake.
client, err := ssh.Dial("tcp", hostAddr, sshConfig)
if err != nil {
return nil, fmt.Errorf("unable to connect: %v", err)
}
return client, nil
}
func main() {
config := configure()
banner()
if *config.Debug {
log.SetLevel(log.DebugLevel)
log.Debug("Turning on debug logging")
}
client := connectSSH(config)
defer client.Close()
// Configure the Docker client
dockerCli := configureDockerClient(config, client)
// Lookup a container by image name
var cntnr *docker.APIContainers
var err error
if len(*config.ImageName) > 0 {
cntnr, err = findContainerByImageName(dockerCli, *config.ImageName)
} else {
cntnr, err = findContainerByID(dockerCli, *config.ContainerID)
}
if err != nil {
log.Fatal(err.Error())
}
log.Infof("Using container: %s", cntnr.ID[0:12])
// Get the internal IP on the Docker network for this container
ip, err := findIPForContainer(dockerCli, cntnr)
if err != nil {
log.Fatal(err.Error())
}
log.Info("Forwarding ports:")
for _, port := range *config.Ports {
log.Infof(" - %s", port)
go proxyPort(client, *config.LocalAddress, ip, port)
}
if *config.ForwardEPMD {
log.Info("Forwarding EPMD on 4369")
go proxyPort(client, *config.LocalAddress, ip, "4369:4369")
}
select {}
}
|
[
"\"USER\"",
"\"HOME\"",
"\"SSH_AUTH_SOCK\""
] |
[] |
[
"USER",
"HOME",
"SSH_AUTH_SOCK"
] |
[]
|
["USER", "HOME", "SSH_AUTH_SOCK"]
|
go
| 3 | 0 | |
pkg/acp/ambassador_test.go
|
package acp_test
import (
"context"
"testing"
"time"
"github.com/datawire/ambassador/pkg/acp"
"github.com/datawire/dlib/dtime"
)
type awMetadata struct {
t *testing.T
ft *dtime.FakeTime
aw *acp.AmbassadorWatcher
}
func (m *awMetadata) check(seq int, clock int, alive bool, ready bool) {
if m.ft.TimeSinceBoot() != time.Duration(clock)*time.Second {
m.t.Errorf("%d: FakeTime.TimeSinceBoot should be %ds, not %v", seq, clock, m.ft.TimeSinceBoot())
}
if m.aw.IsAlive() != alive {
m.t.Errorf("%d: DiagdWatcher.IsAlive %t, wanted %t", seq, m.aw.IsAlive(), alive)
}
if m.aw.IsReady() != ready {
m.t.Errorf("%d: DiagdWatcher.IsReady %t, wanted %t", seq, m.aw.IsReady(), ready)
}
}
func (m *awMetadata) stepSec(step int) {
m.ft.StepSec(step)
}
func newAWMetadata(t *testing.T) *awMetadata {
ft := dtime.NewFakeTime()
f := &fakeReady{mode: Happy}
dw := acp.NewDiagdWatcher()
dw.SetFetchTime(ft.Now)
if dw == nil {
t.Error("New DiagdWatcher is nil?")
}
ew := acp.NewEnvoyWatcher()
ew.SetReadyCheck(f.readyCheck)
if ew == nil {
t.Error("New EnvoyWatcher is nil?")
}
aw := acp.NewAmbassadorWatcher(ew, dw)
aw.SetFetchTime(ft.Now)
return &awMetadata{t: t, ft: ft, aw: aw}
}
func TestAmbassadorHappyPath(t *testing.T) {
m := newAWMetadata(t)
m.check(0, 0, true, false)
// Advance the clock 10s.
m.stepSec(10)
m.check(1, 10, true, false)
// Send a snapshot.
m.stepSec(10)
m.aw.NoteSnapshotSent()
m.check(2, 20, true, false)
// Mark the snapshot processed.
m.stepSec(10)
m.aw.NoteSnapshotProcessed()
m.check(3, 30, true, false)
// Fetch readiness.
m.stepSec(10)
m.aw.FetchEnvoyReady(context.Background())
m.check(4, 40, true, true)
// Make sure it stays happy.
m.stepSec(10)
m.check(5, 50, true, true)
}
func TestAmbassadorUnrealisticallyHappy(t *testing.T) {
m := newAWMetadata(t)
m.check(0, 0, true, false)
// Advance the clock 10s.
m.stepSec(10)
m.check(1, 10, true, false)
// Send a snapshot, mark it processed, and have Envoy come up all
// in the same instant. This is _highly unlikely_ but WTF, let's
// try it. We expect to see alive and ready here.
m.stepSec(10)
m.aw.NoteSnapshotSent()
m.aw.NoteSnapshotProcessed()
m.aw.FetchEnvoyReady(context.Background())
m.check(2, 20, true, true)
// Make sure it stays happy.
m.stepSec(10)
m.check(3, 30, true, true)
}
func TestAmbassadorNoSnapshots(t *testing.T) {
m := newAWMetadata(t)
m.check(0, 0, true, false)
// Advance the clock halfway through the diagd boot grace period.
// We should see alive but not ready.
m.stepSec(300)
m.check(1, 300, true, false)
// Advance nearly to the end of diagd boot grace period.
// We should see alive but not ready.
m.stepSec(299)
m.check(2, 599, true, false)
// Advance to the end of diagd boot grace period.
// We should see neither alive nor ready.
m.stepSec(1)
m.check(3, 600, false, false)
// Nothing should change after another minute.
m.stepSec(60)
m.check(4, 660, false, false)
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
setup.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2021 Devon (Gorialis) R
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import pathlib
import re
import subprocess
from setuptools import setup
ROOT = pathlib.Path(__file__).parent
with open(ROOT / 'jishaku' / 'meta.py', 'r', encoding='utf-8') as f:
VERSION_MATCH = re.search(r'VersionInfo\(major=(\d+), minor=(\d+), micro=(\d+), .+\)', f.read(), re.MULTILINE)
if not VERSION_MATCH:
raise RuntimeError('version is not set or could not be located')
VERSION = '.'.join([VERSION_MATCH.group(1), VERSION_MATCH.group(2), VERSION_MATCH.group(3)])
EXTRA_REQUIRES = {}
for feature in (ROOT / 'requirements').glob('*.txt'):
with open(feature, 'r', encoding='utf-8') as f:
EXTRA_REQUIRES[feature.with_suffix('').name] = f.read().splitlines()
REQUIREMENTS = EXTRA_REQUIRES.pop('_')
if not VERSION:
raise RuntimeError('version is not set')
try:
PROCESS = subprocess.Popen(
['git', 'rev-list', '--count', 'HEAD'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
COMMIT_COUNT, ERR = PROCESS.communicate()
if COMMIT_COUNT:
PROCESS = subprocess.Popen(
['git', 'rev-parse', '--short', 'HEAD'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
COMMIT_HASH, ERR = PROCESS.communicate()
if COMMIT_HASH:
match = re.match(r'(\d).(\d).(\d)(a|b|rc)?', os.getenv('tag_name') or "")
if (match and match[4]) or not match:
VERSION += ('' if match else 'a') + COMMIT_COUNT.decode('utf-8').strip() + '+g' + COMMIT_HASH.decode('utf-8').strip()
# Also attempt to retrieve a branch, when applicable
PROCESS = subprocess.Popen(
['git', 'symbolic-ref', '-q', '--short', 'HEAD'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
COMMIT_BRANCH, ERR = PROCESS.communicate()
if COMMIT_BRANCH:
VERSION += "." + re.sub('[^a-zA-Z0-9.]', '.', COMMIT_BRANCH.decode('utf-8').strip())
except FileNotFoundError:
pass
with open(ROOT / 'README.md', 'r', encoding='utf-8') as f:
README = f.read()
setup(
name='jishaku',
author='Devon (Gorialis) R',
url='https://github.com/Gorialis/jishaku',
license='MIT',
description='A discord.py extension including useful tools for bot development and debugging.',
long_description=README,
long_description_content_type='text/markdown',
project_urls={
'Documentation': 'https://jishaku.readthedocs.io/en/latest/',
'Code': 'https://github.com/Gorialis/jishaku',
'Issue tracker': 'https://github.com/Gorialis/jishaku/issues'
},
version=VERSION,
packages=['jishaku', 'jishaku.features', 'jishaku.repl', 'jishaku.shim'],
include_package_data=True,
install_requires=REQUIREMENTS,
python_requires='>=3.8.0',
extras_require=EXTRA_REQUIRES,
download_url='https://github.com/Gorialis/jishaku/archive/{}.tar.gz'.format(VERSION),
keywords='jishaku discord.py discord cog repl extension',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: AsyncIO',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Communications :: Chat',
'Topic :: Internet',
'Topic :: Software Development :: Debuggers',
'Topic :: Software Development :: Testing',
'Topic :: Utilities'
]
)
|
[] |
[] |
[
"tag_name"
] |
[]
|
["tag_name"]
|
python
| 1 | 0 | |
derp/derp_server.go
|
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package derp
// TODO(crawshaw): with predefined serverKey in clients and HMAC on packets we could skip TLS
import (
"bufio"
"context"
"crypto/ed25519"
crand "crypto/rand"
"crypto/x509"
"crypto/x509/pkix"
"encoding/json"
"errors"
"expvar"
"fmt"
"io"
"io/ioutil"
"log"
"math"
"math/big"
"math/rand"
"os"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"go4.org/mem"
"golang.org/x/crypto/nacl/box"
"golang.org/x/sync/errgroup"
"tailscale.com/disco"
"tailscale.com/metrics"
"tailscale.com/types/key"
"tailscale.com/types/logger"
"tailscale.com/version"
)
var debug, _ = strconv.ParseBool(os.Getenv("DERP_DEBUG_LOGS"))
// verboseDropKeys is the set of destination public keys that should
// verbosely log whenever DERP drops a packet.
var verboseDropKeys = map[key.Public]bool{}
func init() {
keys := os.Getenv("TS_DEBUG_VERBOSE_DROPS")
if keys == "" {
return
}
for _, keyStr := range strings.Split(keys, ",") {
k, err := key.NewPublicFromHexMem(mem.S(keyStr))
if err != nil {
log.Printf("ignoring invalid debug key %q: %v", keyStr, err)
} else {
verboseDropKeys[k] = true
}
}
}
func init() {
rand.Seed(time.Now().UnixNano())
}
const (
perClientSendQueueDepth = 32 // packets buffered for sending
writeTimeout = 2 * time.Second
)
const host64bit = (^uint(0) >> 32) & 1 // 1 on 64-bit, 0 on 32-bit
// pad32bit is 4 on 32-bit machines and 0 on 64-bit.
// It exists so the Server struct's atomic fields can be aligned to 8
// byte boundaries. (As tested by GOARCH=386 go test, etc)
const pad32bit = 4 - host64bit*4 // 0 on 64-bit, 4 on 32-bit
// Server is a DERP server.
type Server struct {
// WriteTimeout, if non-zero, specifies how long to wait
// before failing when writing to a client.
WriteTimeout time.Duration
privateKey key.Private
publicKey key.Public
logf logger.Logf
memSys0 uint64 // runtime.MemStats.Sys at start (or early-ish)
meshKey string
limitedLogf logger.Logf
metaCert []byte // the encoded x509 cert to send after LetsEncrypt cert+intermediate
// Counters:
_ [pad32bit]byte
packetsSent, bytesSent expvar.Int
packetsRecv, bytesRecv expvar.Int
packetsRecvByKind metrics.LabelMap
packetsRecvDisco *expvar.Int
packetsRecvOther *expvar.Int
_ [pad32bit]byte
packetsDropped expvar.Int
packetsDroppedReason metrics.LabelMap
packetsDroppedUnknown *expvar.Int // unknown dst pubkey
packetsDroppedFwdUnknown *expvar.Int // unknown dst pubkey on forward
packetsDroppedGone *expvar.Int // dst conn shutting down
packetsDroppedQueueHead *expvar.Int // queue full, drop head packet
packetsDroppedQueueTail *expvar.Int // queue full, drop tail packet
packetsDroppedWrite *expvar.Int // error writing to dst conn
_ [pad32bit]byte
packetsForwardedOut expvar.Int
packetsForwardedIn expvar.Int
peerGoneFrames expvar.Int // number of peer gone frames sent
accepts expvar.Int
curClients expvar.Int
curHomeClients expvar.Int // ones with preferred
clientsReplaced expvar.Int
unknownFrames expvar.Int
homeMovesIn expvar.Int // established clients announce home server moves in
homeMovesOut expvar.Int // established clients announce home server moves out
multiForwarderCreated expvar.Int
multiForwarderDeleted expvar.Int
removePktForwardOther expvar.Int
avgQueueDuration *uint64 // In milliseconds; accessed atomically
mu sync.Mutex
closed bool
netConns map[Conn]chan struct{} // chan is closed when conn closes
clients map[key.Public]*sclient
clientsEver map[key.Public]bool // never deleted from, for stats; fine for now
watchers map[*sclient]bool // mesh peer -> true
// clientsMesh tracks all clients in the cluster, both locally
// and to mesh peers. If the value is nil, that means the
// peer is only local (and thus in the clients Map, but not
// remote). If the value is non-nil, it's remote (+ maybe also
// local).
clientsMesh map[key.Public]PacketForwarder
// sentTo tracks which peers have sent to which other peers,
// and at which connection number. This isn't on sclient
// because it includes intra-region forwarded packets as the
// src.
sentTo map[key.Public]map[key.Public]int64 // src => dst => dst's latest sclient.connNum
}
// PacketForwarder is something that can forward packets.
//
// It's mostly an inteface for circular dependency reasons; the
// typical implementation is derphttp.Client. The other implementation
// is a multiForwarder, which this package creates as needed if a
// public key gets more than one PacketForwarder registered for it.
type PacketForwarder interface {
ForwardPacket(src, dst key.Public, payload []byte) error
}
// Conn is the subset of the underlying net.Conn the DERP Server needs.
// It is a defined type so that non-net connections can be used.
type Conn interface {
io.Closer
// The *Deadline methods follow the semantics of net.Conn.
SetDeadline(time.Time) error
SetReadDeadline(time.Time) error
SetWriteDeadline(time.Time) error
}
// NewServer returns a new DERP server. It doesn't listen on its own.
// Connections are given to it via Server.Accept.
func NewServer(privateKey key.Private, logf logger.Logf) *Server {
var ms runtime.MemStats
runtime.ReadMemStats(&ms)
s := &Server{
privateKey: privateKey,
publicKey: privateKey.Public(),
logf: logf,
limitedLogf: logger.RateLimitedFn(logf, 30*time.Second, 5, 100),
packetsRecvByKind: metrics.LabelMap{Label: "kind"},
packetsDroppedReason: metrics.LabelMap{Label: "reason"},
clients: map[key.Public]*sclient{},
clientsEver: map[key.Public]bool{},
clientsMesh: map[key.Public]PacketForwarder{},
netConns: map[Conn]chan struct{}{},
memSys0: ms.Sys,
watchers: map[*sclient]bool{},
sentTo: map[key.Public]map[key.Public]int64{},
avgQueueDuration: new(uint64),
}
s.initMetacert()
s.packetsRecvDisco = s.packetsRecvByKind.Get("disco")
s.packetsRecvOther = s.packetsRecvByKind.Get("other")
s.packetsDroppedUnknown = s.packetsDroppedReason.Get("unknown_dest")
s.packetsDroppedFwdUnknown = s.packetsDroppedReason.Get("unknown_dest_on_fwd")
s.packetsDroppedGone = s.packetsDroppedReason.Get("gone")
s.packetsDroppedQueueHead = s.packetsDroppedReason.Get("queue_head")
s.packetsDroppedQueueTail = s.packetsDroppedReason.Get("queue_tail")
s.packetsDroppedWrite = s.packetsDroppedReason.Get("write_error")
return s
}
// SetMesh sets the pre-shared key that regional DERP servers used to mesh
// amongst themselves.
//
// It must be called before serving begins.
func (s *Server) SetMeshKey(v string) {
s.meshKey = v
}
// HasMeshKey reports whether the server is configured with a mesh key.
func (s *Server) HasMeshKey() bool { return s.meshKey != "" }
// MeshKey returns the configured mesh key, if any.
func (s *Server) MeshKey() string { return s.meshKey }
// PrivateKey returns the server's private key.
func (s *Server) PrivateKey() key.Private { return s.privateKey }
// PublicKey returns the server's public key.
func (s *Server) PublicKey() key.Public { return s.publicKey }
// Close closes the server and waits for the connections to disconnect.
func (s *Server) Close() error {
s.mu.Lock()
wasClosed := s.closed
s.closed = true
s.mu.Unlock()
if wasClosed {
return nil
}
var closedChs []chan struct{}
s.mu.Lock()
for nc, closed := range s.netConns {
nc.Close()
closedChs = append(closedChs, closed)
}
s.mu.Unlock()
for _, closed := range closedChs {
<-closed
}
return nil
}
func (s *Server) isClosed() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.closed
}
// Accept adds a new connection to the server and serves it.
//
// The provided bufio ReadWriter must be already connected to nc.
// Accept blocks until the Server is closed or the connection closes
// on its own.
//
// Accept closes nc.
func (s *Server) Accept(nc Conn, brw *bufio.ReadWriter, remoteAddr string) {
closed := make(chan struct{})
s.mu.Lock()
s.accepts.Add(1) // while holding s.mu for connNum read on next line
connNum := s.accepts.Value() // expvar sadly doesn't return new value on Add(1)
s.netConns[nc] = closed
s.mu.Unlock()
defer func() {
nc.Close()
close(closed)
s.mu.Lock()
delete(s.netConns, nc)
s.mu.Unlock()
}()
if err := s.accept(nc, brw, remoteAddr, connNum); err != nil && !s.isClosed() {
s.logf("derp: %s: %v", remoteAddr, err)
}
}
// initMetacert initialized s.metaCert with a self-signed x509 cert
// encoding this server's public key and protocol version. cmd/derper
// then sends this after the Let's Encrypt leaf + intermediate certs
// after the ServerHello (encrypted in TLS 1.3, not that it matters
// much).
//
// Then the client can save a round trip getting that and can start
// speaking DERP right away. (We don't use ALPN because that's sent in
// the clear and we're being paranoid to not look too weird to any
// middleboxes, given that DERP is an ultimate fallback path). But
// since the post-ServerHello certs are encrypted we can have the
// client also use them as a signal to be able to start speaking DERP
// right away, starting with its identity proof, encrypted to the
// server's public key.
//
// This RTT optimization fails where there's a corp-mandated
// TLS proxy with corp-mandated root certs on employee machines and
// and TLS proxy cleans up unnecessary certs. In that case we just fall
// back to the extra RTT.
func (s *Server) initMetacert() {
pub, priv, err := ed25519.GenerateKey(crand.Reader)
if err != nil {
log.Fatal(err)
}
tmpl := &x509.Certificate{
SerialNumber: big.NewInt(ProtocolVersion),
Subject: pkix.Name{
CommonName: fmt.Sprintf("derpkey%x", s.publicKey[:]),
},
// Windows requires NotAfter and NotBefore set:
NotAfter: time.Now().Add(30 * 24 * time.Hour),
NotBefore: time.Now().Add(-30 * 24 * time.Hour),
}
cert, err := x509.CreateCertificate(crand.Reader, tmpl, tmpl, pub, priv)
if err != nil {
log.Fatalf("CreateCertificate: %v", err)
}
s.metaCert = cert
}
// MetaCert returns the server metadata cert that can be sent by the
// TLS server to let the client skip a round trip during start-up.
func (s *Server) MetaCert() []byte { return s.metaCert }
// registerClient notes that client c is now authenticated and ready for packets.
// If c's public key was already connected with a different connection, the prior one is closed.
func (s *Server) registerClient(c *sclient) {
s.mu.Lock()
defer s.mu.Unlock()
old := s.clients[c.key]
if old == nil {
c.logf("adding connection")
} else {
s.clientsReplaced.Add(1)
c.logf("adding connection, replacing %s", old.remoteAddr)
go old.nc.Close()
}
s.clients[c.key] = c
s.clientsEver[c.key] = true
if _, ok := s.clientsMesh[c.key]; !ok {
s.clientsMesh[c.key] = nil // just for varz of total users in cluster
}
s.curClients.Add(1)
s.broadcastPeerStateChangeLocked(c.key, true)
}
// broadcastPeerStateChangeLocked enqueues a message to all watchers
// (other DERP nodes in the region, or trusted clients) that peer's
// presence changed.
//
// s.mu must be held.
func (s *Server) broadcastPeerStateChangeLocked(peer key.Public, present bool) {
for w := range s.watchers {
w.peerStateChange = append(w.peerStateChange, peerConnState{peer: peer, present: present})
go w.requestMeshUpdate()
}
}
// unregisterClient removes a client from the server.
func (s *Server) unregisterClient(c *sclient) {
s.mu.Lock()
defer s.mu.Unlock()
cur := s.clients[c.key]
if cur == c {
c.logf("removing connection")
delete(s.clients, c.key)
if v, ok := s.clientsMesh[c.key]; ok && v == nil {
delete(s.clientsMesh, c.key)
s.notePeerGoneFromRegionLocked(c.key)
}
s.broadcastPeerStateChangeLocked(c.key, false)
}
if c.canMesh {
delete(s.watchers, c)
}
s.curClients.Add(-1)
if c.preferred {
s.curHomeClients.Add(-1)
}
}
// notePeerGoneFromRegionLocked sends peerGone frames to parties that
// key has sent to previously (whether those sends were from a local
// client or forwarded). It must only be called after the key has
// been removed from clientsMesh.
func (s *Server) notePeerGoneFromRegionLocked(key key.Public) {
if _, ok := s.clientsMesh[key]; ok {
panic("usage")
}
// Find still-connected peers and either notify that we've gone away
// so they can drop their route entries to us (issue 150)
// or move them over to the active client (in case a replaced client
// connection is being unregistered).
for pubKey, connNum := range s.sentTo[key] {
if peer, ok := s.clients[pubKey]; ok && peer.connNum == connNum {
go peer.requestPeerGoneWrite(key)
}
}
delete(s.sentTo, key)
}
func (s *Server) addWatcher(c *sclient) {
if !c.canMesh {
panic("invariant: addWatcher called without permissions")
}
if c.key == s.publicKey {
// We're connecting to ourself. Do nothing.
return
}
s.mu.Lock()
defer s.mu.Unlock()
// Queue messages for each already-connected client.
for peer := range s.clients {
c.peerStateChange = append(c.peerStateChange, peerConnState{peer: peer, present: true})
}
// And enroll the watcher in future updates (of both
// connections & disconnections).
s.watchers[c] = true
go c.requestMeshUpdate()
}
func (s *Server) accept(nc Conn, brw *bufio.ReadWriter, remoteAddr string, connNum int64) error {
br, bw := brw.Reader, brw.Writer
nc.SetDeadline(time.Now().Add(10 * time.Second))
if err := s.sendServerKey(bw); err != nil {
return fmt.Errorf("send server key: %v", err)
}
nc.SetDeadline(time.Now().Add(10 * time.Second))
clientKey, clientInfo, err := s.recvClientKey(br)
if err != nil {
return fmt.Errorf("receive client key: %v", err)
}
if err := s.verifyClient(clientKey, clientInfo); err != nil {
return fmt.Errorf("client %x rejected: %v", clientKey, err)
}
// At this point we trust the client so we don't time out.
nc.SetDeadline(time.Time{})
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c := &sclient{
connNum: connNum,
s: s,
key: clientKey,
nc: nc,
br: br,
bw: bw,
logf: logger.WithPrefix(s.logf, fmt.Sprintf("derp client %v/%x: ", remoteAddr, clientKey)),
done: ctx.Done(),
remoteAddr: remoteAddr,
connectedAt: time.Now(),
sendQueue: make(chan pkt, perClientSendQueueDepth),
peerGone: make(chan key.Public),
canMesh: clientInfo.MeshKey != "" && clientInfo.MeshKey == s.meshKey,
}
if c.canMesh {
c.meshUpdate = make(chan struct{})
}
if clientInfo != nil {
c.info = *clientInfo
}
s.registerClient(c)
defer s.unregisterClient(c)
err = s.sendServerInfo(bw, clientKey)
if err != nil {
return fmt.Errorf("send server info: %v", err)
}
return c.run(ctx)
}
// run serves the client until there's an error.
// If the client hangs up or the server is closed, run returns nil, otherwise run returns an error.
func (c *sclient) run(ctx context.Context) error {
// Launch sender, but don't return from run until sender goroutine is done.
var grp errgroup.Group
sendCtx, cancelSender := context.WithCancel(ctx)
grp.Go(func() error { return c.sendLoop(sendCtx) })
defer func() {
cancelSender()
if err := grp.Wait(); err != nil && !c.s.isClosed() {
c.logf("sender failed: %v", err)
}
}()
for {
ft, fl, err := readFrameHeader(c.br)
if err != nil {
if errors.Is(err, io.EOF) {
c.logf("read EOF")
return nil
}
if c.s.isClosed() {
c.logf("closing; server closed")
return nil
}
return fmt.Errorf("client %x: readFrameHeader: %w", c.key, err)
}
switch ft {
case frameNotePreferred:
err = c.handleFrameNotePreferred(ft, fl)
case frameSendPacket:
err = c.handleFrameSendPacket(ft, fl)
case frameForwardPacket:
err = c.handleFrameForwardPacket(ft, fl)
case frameWatchConns:
err = c.handleFrameWatchConns(ft, fl)
case frameClosePeer:
err = c.handleFrameClosePeer(ft, fl)
default:
err = c.handleUnknownFrame(ft, fl)
}
if err != nil {
return err
}
}
}
func (c *sclient) handleUnknownFrame(ft frameType, fl uint32) error {
_, err := io.CopyN(ioutil.Discard, c.br, int64(fl))
return err
}
func (c *sclient) handleFrameNotePreferred(ft frameType, fl uint32) error {
if fl != 1 {
return fmt.Errorf("frameNotePreferred wrong size")
}
v, err := c.br.ReadByte()
if err != nil {
return fmt.Errorf("frameNotePreferred ReadByte: %v", err)
}
c.setPreferred(v != 0)
return nil
}
func (c *sclient) handleFrameWatchConns(ft frameType, fl uint32) error {
if fl != 0 {
return fmt.Errorf("handleFrameWatchConns wrong size")
}
if !c.canMesh {
return fmt.Errorf("insufficient permissions")
}
c.s.addWatcher(c)
return nil
}
func (c *sclient) handleFrameClosePeer(ft frameType, fl uint32) error {
if fl != keyLen {
return fmt.Errorf("handleFrameClosePeer wrong size")
}
if !c.canMesh {
return fmt.Errorf("insufficient permissions")
}
var targetKey key.Public
if _, err := io.ReadFull(c.br, targetKey[:]); err != nil {
return err
}
s := c.s
s.mu.Lock()
defer s.mu.Unlock()
if target, ok := s.clients[targetKey]; ok {
c.logf("frameClosePeer closing peer %x", targetKey)
go target.nc.Close()
} else {
c.logf("frameClosePeer failed to find peer %x", targetKey)
}
return nil
}
// handleFrameForwardPacket reads a "forward packet" frame from the client
// (which must be a trusted client, a peer in our mesh).
func (c *sclient) handleFrameForwardPacket(ft frameType, fl uint32) error {
if !c.canMesh {
return fmt.Errorf("insufficient permissions")
}
s := c.s
srcKey, dstKey, contents, err := s.recvForwardPacket(c.br, fl)
if err != nil {
return fmt.Errorf("client %x: recvForwardPacket: %v", c.key, err)
}
s.packetsForwardedIn.Add(1)
s.mu.Lock()
dst := s.clients[dstKey]
if dst != nil {
s.notePeerSendLocked(srcKey, dst)
}
s.mu.Unlock()
if dst == nil {
s.packetsDropped.Add(1)
s.packetsDroppedFwdUnknown.Add(1)
if debug {
c.logf("dropping forwarded packet for unknown %x", dstKey)
}
return nil
}
return c.sendPkt(dst, pkt{
bs: contents,
enqueuedAt: time.Now(),
src: srcKey,
})
}
// notePeerSendLocked records that src sent to dst. We keep track of
// that so when src disconnects, we can tell dst (if it's still
// around) that src is gone (a peerGone frame).
func (s *Server) notePeerSendLocked(src key.Public, dst *sclient) {
m, ok := s.sentTo[src]
if !ok {
m = map[key.Public]int64{}
s.sentTo[src] = m
}
m[dst.key] = dst.connNum
}
// handleFrameSendPacket reads a "send packet" frame from the client.
func (c *sclient) handleFrameSendPacket(ft frameType, fl uint32) error {
s := c.s
dstKey, contents, err := s.recvPacket(c.br, fl)
if err != nil {
return fmt.Errorf("client %x: recvPacket: %v", c.key, err)
}
var fwd PacketForwarder
s.mu.Lock()
dst := s.clients[dstKey]
if dst == nil {
fwd = s.clientsMesh[dstKey]
} else {
s.notePeerSendLocked(c.key, dst)
}
s.mu.Unlock()
if dst == nil {
if fwd != nil {
s.packetsForwardedOut.Add(1)
if err := fwd.ForwardPacket(c.key, dstKey, contents); err != nil {
// TODO:
return nil
}
return nil
}
s.packetsDropped.Add(1)
s.packetsDroppedUnknown.Add(1)
if debug {
c.logf("dropping packet for unknown %x", dstKey)
}
return nil
}
p := pkt{
bs: contents,
enqueuedAt: time.Now(),
src: c.key,
}
return c.sendPkt(dst, p)
}
func (c *sclient) sendPkt(dst *sclient, p pkt) error {
s := c.s
dstKey := dst.key
// Attempt to queue for sending up to 3 times. On each attempt, if
// the queue is full, try to drop from queue head to prioritize
// fresher packets.
for attempt := 0; attempt < 3; attempt++ {
select {
case <-dst.done:
s.packetsDropped.Add(1)
s.packetsDroppedGone.Add(1)
if debug {
c.logf("dropping packet for shutdown client %x", dstKey)
}
return nil
default:
}
select {
case dst.sendQueue <- p:
return nil
default:
}
select {
case pkt := <-dst.sendQueue:
s.packetsDropped.Add(1)
s.packetsDroppedQueueHead.Add(1)
if verboseDropKeys[dstKey] {
// Generate a full string including src and dst, so
// the limiter kicks in once per src.
msg := fmt.Sprintf("tail drop %s -> %s", p.src.ShortString(), dstKey.ShortString())
c.s.limitedLogf(msg)
}
c.recordQueueTime(pkt.enqueuedAt)
if debug {
c.logf("dropping packet from client %x queue head", dstKey)
}
default:
}
}
// Failed to make room for packet. This can happen in a heavily
// contended queue with racing writers. Give up and tail-drop in
// this case to keep reader unblocked.
s.packetsDropped.Add(1)
s.packetsDroppedQueueTail.Add(1)
if verboseDropKeys[dstKey] {
// Generate a full string including src and dst, so
// the limiter kicks in once per src.
msg := fmt.Sprintf("head drop %s -> %s", p.src.ShortString(), dstKey.ShortString())
c.s.limitedLogf(msg)
}
if debug {
c.logf("dropping packet from client %x queue tail", dstKey)
}
return nil
}
// requestPeerGoneWrite sends a request to write a "peer gone" frame
// that the provided peer has disconnected. It blocks until either the
// write request is scheduled, or the client has closed.
func (c *sclient) requestPeerGoneWrite(peer key.Public) {
select {
case c.peerGone <- peer:
case <-c.done:
}
}
func (c *sclient) requestMeshUpdate() {
if !c.canMesh {
panic("unexpected requestMeshUpdate")
}
select {
case c.meshUpdate <- struct{}{}:
case <-c.done:
}
}
func (s *Server) verifyClient(clientKey key.Public, info *clientInfo) error {
// TODO(crawshaw): implement policy constraints on who can use the DERP server
// TODO(bradfitz): ... and at what rate.
return nil
}
func (s *Server) sendServerKey(bw *bufio.Writer) error {
buf := make([]byte, 0, len(magic)+len(s.publicKey))
buf = append(buf, magic...)
buf = append(buf, s.publicKey[:]...)
return writeFrame(bw, frameServerKey, buf)
}
type serverInfo struct {
Version int `json:"version,omitempty"`
}
func (s *Server) sendServerInfo(bw *bufio.Writer, clientKey key.Public) error {
var nonce [24]byte
if _, err := crand.Read(nonce[:]); err != nil {
return err
}
msg, err := json.Marshal(serverInfo{Version: ProtocolVersion})
if err != nil {
return err
}
msgbox := box.Seal(nil, msg, &nonce, clientKey.B32(), s.privateKey.B32())
if err := writeFrameHeader(bw, frameServerInfo, nonceLen+uint32(len(msgbox))); err != nil {
return err
}
if _, err := bw.Write(nonce[:]); err != nil {
return err
}
if _, err := bw.Write(msgbox); err != nil {
return err
}
return bw.Flush()
}
// recvClientKey reads the frameClientInfo frame from the client (its
// proof of identity) upon its initial connection. It should be
// considered especially untrusted at this point.
func (s *Server) recvClientKey(br *bufio.Reader) (clientKey key.Public, info *clientInfo, err error) {
fl, err := readFrameTypeHeader(br, frameClientInfo)
if err != nil {
return zpub, nil, err
}
const minLen = keyLen + nonceLen
if fl < minLen {
return zpub, nil, errors.New("short client info")
}
// We don't trust the client at all yet, so limit its input size to limit
// things like JSON resource exhausting (http://github.com/golang/go/issues/31789).
if fl > 256<<10 {
return zpub, nil, errors.New("long client info")
}
if _, err := io.ReadFull(br, clientKey[:]); err != nil {
return zpub, nil, err
}
var nonce [24]byte
if _, err := io.ReadFull(br, nonce[:]); err != nil {
return zpub, nil, fmt.Errorf("nonce: %v", err)
}
msgLen := int(fl - minLen)
msgbox := make([]byte, msgLen)
if _, err := io.ReadFull(br, msgbox); err != nil {
return zpub, nil, fmt.Errorf("msgbox: %v", err)
}
msg, ok := box.Open(nil, msgbox, &nonce, (*[32]byte)(&clientKey), s.privateKey.B32())
if !ok {
return zpub, nil, fmt.Errorf("msgbox: cannot open len=%d with client key %x", msgLen, clientKey[:])
}
info = new(clientInfo)
if err := json.Unmarshal(msg, info); err != nil {
return zpub, nil, fmt.Errorf("msg: %v", err)
}
return clientKey, info, nil
}
func (s *Server) recvPacket(br *bufio.Reader, frameLen uint32) (dstKey key.Public, contents []byte, err error) {
if frameLen < keyLen {
return zpub, nil, errors.New("short send packet frame")
}
if err := readPublicKey(br, &dstKey); err != nil {
return zpub, nil, err
}
packetLen := frameLen - keyLen
if packetLen > MaxPacketSize {
return zpub, nil, fmt.Errorf("data packet longer (%d) than max of %v", packetLen, MaxPacketSize)
}
contents = make([]byte, packetLen)
if _, err := io.ReadFull(br, contents); err != nil {
return zpub, nil, err
}
s.packetsRecv.Add(1)
s.bytesRecv.Add(int64(len(contents)))
if disco.LooksLikeDiscoWrapper(contents) {
s.packetsRecvDisco.Add(1)
} else {
s.packetsRecvOther.Add(1)
}
return dstKey, contents, nil
}
// zpub is the key.Public zero value.
var zpub key.Public
func (s *Server) recvForwardPacket(br *bufio.Reader, frameLen uint32) (srcKey, dstKey key.Public, contents []byte, err error) {
if frameLen < keyLen*2 {
return zpub, zpub, nil, errors.New("short send packet frame")
}
if _, err := io.ReadFull(br, srcKey[:]); err != nil {
return zpub, zpub, nil, err
}
if _, err := io.ReadFull(br, dstKey[:]); err != nil {
return zpub, zpub, nil, err
}
packetLen := frameLen - keyLen*2
if packetLen > MaxPacketSize {
return zpub, zpub, nil, fmt.Errorf("data packet longer (%d) than max of %v", packetLen, MaxPacketSize)
}
contents = make([]byte, packetLen)
if _, err := io.ReadFull(br, contents); err != nil {
return zpub, zpub, nil, err
}
// TODO: was s.packetsRecv.Add(1)
// TODO: was s.bytesRecv.Add(int64(len(contents)))
return srcKey, dstKey, contents, nil
}
// sclient is a client connection to the server.
//
// (The "s" prefix is to more explicitly distinguish it from Client in derp_client.go)
type sclient struct {
// Static after construction.
connNum int64 // process-wide unique counter, incremented each Accept
s *Server
nc Conn
key key.Public
info clientInfo
logf logger.Logf
done <-chan struct{} // closed when connection closes
remoteAddr string // usually ip:port from net.Conn.RemoteAddr().String()
sendQueue chan pkt // packets queued to this client; never closed
peerGone chan key.Public // write request that a previous sender has disconnected (not used by mesh peers)
meshUpdate chan struct{} // write request to write peerStateChange
canMesh bool // clientInfo had correct mesh token for inter-region routing
// Owned by run, not thread-safe.
br *bufio.Reader
connectedAt time.Time
preferred bool
// Owned by sender, not thread-safe.
bw *bufio.Writer
// Guarded by s.mu
//
// peerStateChange is used by mesh peers (a set of regional
// DERP servers) and contains records that need to be sent to
// the client for them to update their map of who's connected
// to this node.
peerStateChange []peerConnState
}
// peerConnState represents whether a peer is connected to the server
// or not.
type peerConnState struct {
peer key.Public
present bool
}
// pkt is a request to write a data frame to an sclient.
type pkt struct {
// src is the who's the sender of the packet.
src key.Public
// enqueuedAt is when a packet was put onto a queue before it was sent,
// and is used for reporting metrics on the duration of packets in the queue.
enqueuedAt time.Time
// bs is the data packet bytes.
// The memory is owned by pkt.
bs []byte
}
func (c *sclient) setPreferred(v bool) {
if c.preferred == v {
return
}
c.preferred = v
var homeMove *expvar.Int
if v {
c.s.curHomeClients.Add(1)
homeMove = &c.s.homeMovesIn
} else {
c.s.curHomeClients.Add(-1)
homeMove = &c.s.homeMovesOut
}
// Keep track of varz for home serve moves in/out. But ignore
// the initial packet set when a client connects, which we
// assume happens within 5 seconds. In any case, just for
// graphs, so not important to miss a move. But it shouldn't:
// the netcheck/re-STUNs in magicsock only happen about every
// 30 seconds.
if time.Since(c.connectedAt) > 5*time.Second {
homeMove.Add(1)
}
}
// expMovingAverage returns the new moving average given the previous average,
// a new value, and an alpha decay factor.
// https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
func expMovingAverage(prev, newValue, alpha float64) float64 {
return alpha*newValue + (1-alpha)*prev
}
// recordQueueTime updates the average queue duration metric after a packet has been sent.
func (c *sclient) recordQueueTime(enqueuedAt time.Time) {
elapsed := float64(time.Since(enqueuedAt).Milliseconds())
for {
old := atomic.LoadUint64(c.s.avgQueueDuration)
newAvg := expMovingAverage(math.Float64frombits(old), elapsed, 0.1)
if atomic.CompareAndSwapUint64(c.s.avgQueueDuration, old, math.Float64bits(newAvg)) {
break
}
}
}
func (c *sclient) sendLoop(ctx context.Context) error {
defer func() {
// If the sender shuts down unilaterally due to an error, close so
// that the receive loop unblocks and cleans up the rest.
c.nc.Close()
// Drain the send queue to count dropped packets
for {
select {
case <-c.sendQueue:
c.s.packetsDropped.Add(1)
c.s.packetsDroppedGone.Add(1)
if debug {
c.logf("dropping packet for shutdown %x", c.key)
}
default:
return
}
}
}()
jitter := time.Duration(rand.Intn(5000)) * time.Millisecond
keepAliveTick := time.NewTicker(keepAlive + jitter)
defer keepAliveTick.Stop()
var werr error // last write error
for {
if werr != nil {
return werr
}
// First, a non-blocking select (with a default) that
// does as many non-flushing writes as possible.
select {
case <-ctx.Done():
return nil
case peer := <-c.peerGone:
werr = c.sendPeerGone(peer)
continue
case <-c.meshUpdate:
werr = c.sendMeshUpdates()
continue
case msg := <-c.sendQueue:
werr = c.sendPacket(msg.src, msg.bs)
c.recordQueueTime(msg.enqueuedAt)
continue
case <-keepAliveTick.C:
werr = c.sendKeepAlive()
continue
default:
// Flush any writes from the 3 sends above, or from
// the blocking loop below.
if werr = c.bw.Flush(); werr != nil {
return werr
}
}
// Then a blocking select with same:
select {
case <-ctx.Done():
return nil
case peer := <-c.peerGone:
werr = c.sendPeerGone(peer)
case <-c.meshUpdate:
werr = c.sendMeshUpdates()
continue
case msg := <-c.sendQueue:
werr = c.sendPacket(msg.src, msg.bs)
c.recordQueueTime(msg.enqueuedAt)
case <-keepAliveTick.C:
werr = c.sendKeepAlive()
}
}
}
func (c *sclient) setWriteDeadline() {
c.nc.SetWriteDeadline(time.Now().Add(writeTimeout))
}
// sendKeepAlive sends a keep-alive frame, without flushing.
func (c *sclient) sendKeepAlive() error {
c.setWriteDeadline()
return writeFrameHeader(c.bw, frameKeepAlive, 0)
}
// sendPeerGone sends a peerGone frame, without flushing.
func (c *sclient) sendPeerGone(peer key.Public) error {
c.s.peerGoneFrames.Add(1)
c.setWriteDeadline()
if err := writeFrameHeader(c.bw, framePeerGone, keyLen); err != nil {
return err
}
_, err := c.bw.Write(peer[:])
return err
}
// sendPeerPresent sends a peerPresent frame, without flushing.
func (c *sclient) sendPeerPresent(peer key.Public) error {
c.setWriteDeadline()
if err := writeFrameHeader(c.bw, framePeerPresent, keyLen); err != nil {
return err
}
_, err := c.bw.Write(peer[:])
return err
}
// sendMeshUpdates drains as many mesh peerStateChange entries as
// possible into the write buffer WITHOUT flushing or otherwise
// blocking (as it holds c.s.mu while working). If it can't drain them
// all, it schedules itself to be called again in the future.
func (c *sclient) sendMeshUpdates() error {
c.s.mu.Lock()
defer c.s.mu.Unlock()
writes := 0
for _, pcs := range c.peerStateChange {
if c.bw.Available() <= frameHeaderLen+keyLen {
break
}
var err error
if pcs.present {
err = c.sendPeerPresent(pcs.peer)
} else {
err = c.sendPeerGone(pcs.peer)
}
if err != nil {
// Shouldn't happen, though, as we're writing
// into available buffer space, not the
// network.
return err
}
writes++
}
remain := copy(c.peerStateChange, c.peerStateChange[writes:])
c.peerStateChange = c.peerStateChange[:remain]
// Did we manage to write them all into the bufio buffer without flushing?
if len(c.peerStateChange) == 0 {
if cap(c.peerStateChange) > 16 {
c.peerStateChange = nil
}
} else {
// Didn't finish in the buffer space provided; schedule a future run.
go c.requestMeshUpdate()
}
return nil
}
// sendPacket writes contents to the client in a RecvPacket frame. If
// srcKey.IsZero, uses the old DERPv1 framing format, otherwise uses
// DERPv2. The bytes of contents are only valid until this function
// returns, do not retain slices.
// It does not flush its bufio.Writer.
func (c *sclient) sendPacket(srcKey key.Public, contents []byte) (err error) {
defer func() {
// Stats update.
if err != nil {
c.s.packetsDropped.Add(1)
c.s.packetsDroppedWrite.Add(1)
if debug {
c.logf("dropping packet to %x: %v", c.key, err)
}
} else {
c.s.packetsSent.Add(1)
c.s.bytesSent.Add(int64(len(contents)))
}
}()
c.setWriteDeadline()
withKey := !srcKey.IsZero()
pktLen := len(contents)
if withKey {
pktLen += len(srcKey)
}
if err = writeFrameHeader(c.bw, frameRecvPacket, uint32(pktLen)); err != nil {
return err
}
if withKey {
err := writePublicKey(c.bw, &srcKey)
if err != nil {
return err
}
}
_, err = c.bw.Write(contents)
return err
}
// AddPacketForwarder registers fwd as a packet forwarder for dst.
// fwd must be comparable.
func (s *Server) AddPacketForwarder(dst key.Public, fwd PacketForwarder) {
s.mu.Lock()
defer s.mu.Unlock()
if prev, ok := s.clientsMesh[dst]; ok {
if prev == fwd {
// Duplicate registration of same forwarder. Ignore.
return
}
if m, ok := prev.(multiForwarder); ok {
if _, ok := m[fwd]; !ok {
// Duplicate registration of same forwarder in set; ignore.
return
}
m[fwd] = m.maxVal() + 1
return
}
if prev != nil {
// Otherwise, the existing value is not a set,
// not a dup, and not local-only (nil) so make
// it a set.
fwd = multiForwarder{
prev: 1, // existed 1st, higher priority
fwd: 2, // the passed in fwd is in 2nd place
}
s.multiForwarderCreated.Add(1)
}
}
s.clientsMesh[dst] = fwd
}
// RemovePacketForwarder removes fwd as a packet forwarder for dst.
// fwd must be comparable.
func (s *Server) RemovePacketForwarder(dst key.Public, fwd PacketForwarder) {
s.mu.Lock()
defer s.mu.Unlock()
v, ok := s.clientsMesh[dst]
if !ok {
return
}
if m, ok := v.(multiForwarder); ok {
if len(m) < 2 {
panic("unexpected")
}
delete(m, fwd)
// If fwd was in m and we no longer need to be a
// multiForwarder, replace the entry with the
// remaining PacketForwarder.
if len(m) == 1 {
var remain PacketForwarder
for k := range m {
remain = k
}
s.clientsMesh[dst] = remain
s.multiForwarderDeleted.Add(1)
}
return
}
if v != fwd {
s.removePktForwardOther.Add(1)
// Delete of an entry that wasn't in the
// map. Harmless, so ignore.
// (This might happen if a user is moving around
// between nodes and/or the server sent duplicate
// connection change broadcasts.)
return
}
if _, isLocal := s.clients[dst]; isLocal {
s.clientsMesh[dst] = nil
} else {
delete(s.clientsMesh, dst)
s.notePeerGoneFromRegionLocked(dst)
}
}
// multiForwarder is a PacketForwarder that represents a set of
// forwarding options. It's used in the rare cases that a client is
// connected to multiple DERP nodes in a region. That shouldn't really
// happen except for perhaps during brief moments while the client is
// reconfiguring, in which case we don't want to forget where the
// client is. The map value is unique connection number; the lowest
// one has been seen the longest. It's used to make sure we forward
// packets consistently to the same node and don't pick randomly.
type multiForwarder map[PacketForwarder]uint8
func (m multiForwarder) maxVal() (max uint8) {
for _, v := range m {
if v > max {
max = v
}
}
return
}
func (m multiForwarder) ForwardPacket(src, dst key.Public, payload []byte) error {
var fwd PacketForwarder
var lowest uint8
for k, v := range m {
if fwd == nil || v < lowest {
fwd = k
lowest = v
}
}
return fwd.ForwardPacket(src, dst, payload)
}
func (s *Server) expVarFunc(f func() interface{}) expvar.Func {
return expvar.Func(func() interface{} {
s.mu.Lock()
defer s.mu.Unlock()
return f()
})
}
// ExpVar returns an expvar variable suitable for registering with expvar.Publish.
func (s *Server) ExpVar() expvar.Var {
m := new(metrics.Set)
m.Set("counter_unique_clients_ever", s.expVarFunc(func() interface{} { return len(s.clientsEver) }))
m.Set("gauge_memstats_sys0", expvar.Func(func() interface{} { return int64(s.memSys0) }))
m.Set("gauge_watchers", s.expVarFunc(func() interface{} { return len(s.watchers) }))
m.Set("gauge_current_connections", &s.curClients)
m.Set("gauge_current_home_connections", &s.curHomeClients)
m.Set("gauge_clients_total", expvar.Func(func() interface{} { return len(s.clientsMesh) }))
m.Set("gauge_clients_local", expvar.Func(func() interface{} { return len(s.clients) }))
m.Set("gauge_clients_remote", expvar.Func(func() interface{} { return len(s.clientsMesh) - len(s.clients) }))
m.Set("accepts", &s.accepts)
m.Set("clients_replaced", &s.clientsReplaced)
m.Set("bytes_received", &s.bytesRecv)
m.Set("bytes_sent", &s.bytesSent)
m.Set("packets_dropped", &s.packetsDropped)
m.Set("counter_packets_dropped_reason", &s.packetsDroppedReason)
m.Set("counter_packets_received_kind", &s.packetsRecvByKind)
m.Set("packets_sent", &s.packetsSent)
m.Set("packets_received", &s.packetsRecv)
m.Set("unknown_frames", &s.unknownFrames)
m.Set("home_moves_in", &s.homeMovesIn)
m.Set("home_moves_out", &s.homeMovesOut)
m.Set("peer_gone_frames", &s.peerGoneFrames)
m.Set("packets_forwarded_out", &s.packetsForwardedOut)
m.Set("packets_forwarded_in", &s.packetsForwardedIn)
m.Set("multiforwarder_created", &s.multiForwarderCreated)
m.Set("multiforwarder_deleted", &s.multiForwarderDeleted)
m.Set("packet_forwarder_delete_other_value", &s.removePktForwardOther)
m.Set("average_queue_duration_ms", expvar.Func(func() interface{} {
return math.Float64frombits(atomic.LoadUint64(s.avgQueueDuration))
}))
var expvarVersion expvar.String
expvarVersion.Set(version.Long)
m.Set("version", &expvarVersion)
return m
}
func (s *Server) ConsistencyCheck() error {
s.mu.Lock()
defer s.mu.Unlock()
var errs []string
var nilMeshNotInClient int
for k, f := range s.clientsMesh {
if f == nil {
if _, ok := s.clients[k]; !ok {
nilMeshNotInClient++
}
}
}
if nilMeshNotInClient != 0 {
errs = append(errs, fmt.Sprintf("%d s.clientsMesh keys not in s.clients", nilMeshNotInClient))
}
var clientNotInMesh int
for k := range s.clients {
if _, ok := s.clientsMesh[k]; !ok {
clientNotInMesh++
}
}
if clientNotInMesh != 0 {
errs = append(errs, fmt.Sprintf("%d s.clients keys not in s.clientsMesh", clientNotInMesh))
}
if s.curClients.Value() != int64(len(s.clients)) {
errs = append(errs, fmt.Sprintf("expvar connections = %d != clients map says of %d",
s.curClients.Value(),
len(s.clients)))
}
if len(errs) == 0 {
return nil
}
return errors.New(strings.Join(errs, ", "))
}
// readPublicKey reads key from br.
// It is ~4x slower than io.ReadFull(br, key),
// but it prevents key from escaping and thus being allocated.
// If io.ReadFull(br, key) does not cause key to escape, use that instead.
func readPublicKey(br *bufio.Reader, key *key.Public) error {
// Do io.ReadFull(br, key), but one byte at a time, to avoid allocation.
for i := range key {
b, err := br.ReadByte()
if err != nil {
return err
}
key[i] = b
}
return nil
}
// writePublicKey writes key to bw.
// It is ~3x slower than bw.Write(key[:]),
// but it prevents key from escaping and thus being allocated.
// If bw.Write(key[:]) does not cause key to escape, use that instead.
func writePublicKey(bw *bufio.Writer, key *key.Public) error {
// Do bw.Write(key[:]), but one byte at a time to avoid allocation.
for _, b := range key {
err := bw.WriteByte(b)
if err != nil {
return err
}
}
return nil
}
|
[
"\"DERP_DEBUG_LOGS\"",
"\"TS_DEBUG_VERBOSE_DROPS\""
] |
[] |
[
"DERP_DEBUG_LOGS",
"TS_DEBUG_VERBOSE_DROPS"
] |
[]
|
["DERP_DEBUG_LOGS", "TS_DEBUG_VERBOSE_DROPS"]
|
go
| 2 | 0 | |
web/app/app/settings.py
|
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', 'TRUE') == 'TRUE'
ALLOWED_HOSTS = str(os.environ.get('DOMAIN')).split(",")
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'rest_framework',
'rest_framework.authtoken',
'dj_rest_auth',
'allauth',
'allauth.account',
'allauth.socialaccount',
'dj_rest_auth.registration',
'rest_framework_swagger',
'dbbackup',
'storages',
# Local
'users.apps.UsersConfig',
'core',
'django_extensions',
]
DBBACKUP_STORAGE = 'storages.backends.dropbox.DropBoxStorage'
DBBACKUP_STORAGE_OPTIONS = {
'oauth2_access_token': os.environ.get('DROPBOX_API_TOKEN', 'my_token'),
'root_path': 'mncntrl',
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': os.environ.get('SQL_ENGINE', 'django.db.backends.sqlite3'),
'NAME': os.environ.get('SQL_DATABASE', os.path.join(BASE_DIR, 'db.sqlite3')),
'USER': os.environ.get('SQL_USER', 'user'),
'PASSWORD': os.environ.get('SQL_PASSWORD', 'password'),
'HOST': os.environ.get('SQL_HOST', 'localhost'),
'PORT': os.environ.get('SQL_PORT', '5432'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'ru'
TIME_ZONE = 'Asia/Yekaterinburg'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = '/vol/web/media'
STATIC_ROOT = '/vol/web/static'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
AUTH_USER_MODEL = 'users.CustomUser'
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': [
# enable session auth if you want to use drf web api
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.BasicAuthentication',
],
}
# enable registration by email
# username generates automatically
# https://stackoverflow.com/questions/36698456/django-rest-auth-email-instead-of-username
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = False
# if you want users to confirm email
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend",
)
SWAGGER_SETTINGS = {
'USE_SESSION_AUTH': False,
'LOGIN_URL': 'login',
'LOGOUT_URL': 'logout',
}
SITE_ID = 3
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.yandex.ru'
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = 'Команда Mncntrl.ru <[email protected]>'
|
[] |
[] |
[
"EMAIL_HOST_PASSWORD",
"DOMAIN",
"EMAIL_HOST_USER",
"SQL_PASSWORD",
"SQL_ENGINE",
"SQL_DATABASE",
"SQL_USER",
"SECRET_KEY",
"SQL_PORT",
"DROPBOX_API_TOKEN",
"DEBUG",
"SQL_HOST"
] |
[]
|
["EMAIL_HOST_PASSWORD", "DOMAIN", "EMAIL_HOST_USER", "SQL_PASSWORD", "SQL_ENGINE", "SQL_DATABASE", "SQL_USER", "SECRET_KEY", "SQL_PORT", "DROPBOX_API_TOKEN", "DEBUG", "SQL_HOST"]
|
python
| 12 | 0 | |
naucse/views.py
|
import datetime
from pathlib import Path
import functools
import calendar
import os
from flask import Flask, render_template, jsonify, url_for, Response, abort, g, redirect
from flask import send_from_directory
import ics
from arca import Arca
from naucse import models
from naucse.urlconverters import register_url_converters
from naucse.templates import setup_jinja_env
app = Flask('naucse')
app.config['JSON_AS_ASCII'] = False
@app.before_request
def _get_model():
"""Set `g.model` to the root of the naucse model
A single model is used (and stored in app config).
In debug mode (elsa serve), the model is re-initialized for each request,
so changes are picked up.
In non-debug mode (elsa freeze), the model is initialized once, and
frozen (so all course data is requested and rendered upfront).
"""
freezing = os.environ.get('NAUCSE_FREEZE', not app.config['DEBUG'])
initialize = True
try:
g.model = app.config['NAUCSE_MODEL']
except KeyError:
g.model = init_model()
app.config['NAUCSE_MODEL'] = g.model
else:
if freezing:
# Model already initialized; don't look for changes
return
# (Re-)initialize model
g.model.load_licenses(Path(app.root_path).parent / 'licenses')
g.model.load_local_courses(Path(app.root_path).parent)
if freezing:
g.model.freeze()
def init_model():
trusted = os.environ.get('NAUCSE_TRUSTED_REPOS', None)
if trusted is None:
trusted_repo_patterns = ()
else:
trusted_repo_patterns = tuple(
line for line in trusted.split() if line
)
return models.Root(
url_factories={
'api': {
models.Root: lambda **kw: url_for('api', **kw),
models.Course: lambda **kw: url_for('course_api', **kw),
models.RunYear: lambda **kw: url_for('run_year_api', **kw),
},
'web': {
models.Lesson: lambda **kw: url_for('page',
page_slug='index', **kw),
models.Page: lambda **kw: url_for('page', **kw),
models.Solution: lambda **kw: url_for('solution', **kw),
models.Course: lambda **kw: url_for('course', **kw),
models.Session: lambda **kw: url_for('session', **kw),
models.SessionPage: lambda **kw: url_for(
'session', **kw),
models.StaticFile: lambda **kw: url_for('page_static', **kw),
models.Root: lambda **kw: url_for('index', **kw)
},
},
schema_url_factory=lambda m, is_input, **kw: url_for(
'schema', model_slug=m.model_slug,
is_input=is_input, **kw),
arca=Arca(settings={
"ARCA_BACKEND": "arca.backend.CurrentEnvironmentBackend",
"ARCA_BACKEND_CURRENT_ENVIRONMENT_REQUIREMENTS": "requirements.txt",
"ARCA_BACKEND_VERBOSITY": 2,
"ARCA_BACKEND_KEEP_CONTAINER_RUNNING": True,
"ARCA_BACKEND_USE_REGISTRY_NAME": "docker.io/naucse/naucse.python.cz",
"ARCA_SINGLE_PULL": True,
"ARCA_IGNORE_CACHE_ERRORS": True,
"ARCA_CACHE_BACKEND": "dogpile.cache.dbm",
"ARCA_CACHE_BACKEND_ARGUMENTS": {
"filename": ".arca/cache/naucse.dbm"
},
"ARCA_BASE_DIR": str(Path('.arca').resolve()),
}),
trusted_repo_patterns=trusted_repo_patterns,
)
register_url_converters(app)
setup_jinja_env(app.jinja_env)
@app.route('/')
def index():
return render_template("index.html", edit_info=g.model.edit_info)
@app.route('/courses/')
def courses():
return render_template(
"course_list.html",
featured_courses=g.model.featured_courses,
edit_info=g.model.course_edit_info,
)
@app.route('/runs/')
@app.route('/<int:year>/')
@app.route('/runs/<any(all):all>/')
def runs(year=None, all=None):
# XXX: Simplify?
today = datetime.date.today()
# List of years to show in the pagination
# If the current year is not there (no runs that start in the current year
# yet), add it manually
all_years = sorted(g.model.explicit_run_years)
if today.year not in all_years:
all_years.append(today.year)
first_year, last_year = min(all_years), max(all_years)
if year is not None:
if year > last_year:
# Instead of showing a future year, redirect to the 'Current' page
return redirect(url_for('runs'))
if year not in all_years:
# Otherwise, if there are no runs in requested year, return 404.
abort(404)
if all is not None:
run_data = {}
courses = g.model.courses
for slug, course in g.model.courses.items():
if course.start_date:
run_data.setdefault(course.start_date.year, {})[slug] = course
paginate_prev = {'year': first_year}
paginate_next = {'all': 'all'}
elif year is None:
# Show runs that are either ongoing or ended in the last 3 months
runs = {**g.model.run_years.get(today.year, {}),
**g.model.run_years.get(today.year - 1, {})}
ongoing = {slug: run for slug, run in runs.items()
if run.end_date >= today}
cutoff = today - datetime.timedelta(days=3*31)
recent = {slug: run for slug, run in runs.items()
if today > run.end_date > cutoff}
run_data = {"ongoing": ongoing, "recent": recent}
paginate_prev = {'year': None}
paginate_next = {'year': last_year}
else:
run_data = {year: g.model.run_years.get(year, {})}
past_years = [y for y in all_years if y < year]
if past_years:
paginate_next = {'year': max(past_years)}
else:
paginate_next = {'all': 'all'}
future_years = [y for y in all_years if y > year]
if future_years:
paginate_prev = {'year': min(future_years)}
else:
paginate_prev = {'year': None}
return render_template(
"run_list.html",
run_data=run_data,
today=datetime.date.today(),
year=year,
all=all,
all_years=all_years,
paginate_next=paginate_next,
paginate_prev=paginate_prev,
edit_info=g.model.runs_edit_info,
)
@app.route('/<course:course_slug>/')
def course(course_slug, year=None):
try:
course = g.model.courses[course_slug]
except KeyError:
print(g.model.courses)
abort(404)
recent_runs = course.get_recent_derived_runs()
return render_template(
"course.html",
course=course,
recent_runs=recent_runs,
edit_info=course.edit_info,
)
@app.route('/<course:course_slug>/sessions/<session_slug>/',
defaults={'page_slug': 'front'})
@app.route('/<course:course_slug>/sessions/<session_slug>/<page_slug>/')
def session(course_slug, session_slug, page_slug):
try:
course = g.model.courses[course_slug]
session = course.sessions[session_slug]
page = session.pages[page_slug]
except KeyError:
abort(404)
template = {
'front': 'coverpage.html',
'back': 'backpage.html',
}[page.slug]
materials_by_type = {}
for material in session.materials:
materials_by_type.setdefault(material.type, []).append(material)
return render_template(
template,
session=session,
course=session.course,
edit_info=session.edit_info,
materials_by_type=materials_by_type,
page=page,
)
def _get_canonicality_info(lesson):
"""Get canonical URL -- i.e., a lesson from 'lessons' with the same slug"""
# XXX: This could be made much more fancy
lessons_course = g.model.get_course('lessons')
is_canonical_lesson = (lessons_course == lesson.course)
if is_canonical_lesson:
canonical_url = None
else:
if lessons_course._has_lesson(lesson.slug):
canonical = lessons_course.lessons[lesson.slug]
canonical_url = canonical.get_url(external=True)
else:
canonical_url = None
return is_canonical_lesson, canonical_url
@app.route('/<course:course_slug>/<lesson:lesson_slug>/',
defaults={'page_slug': 'index'})
@app.route('/<course:course_slug>/<lesson:lesson_slug>/<page_slug>/')
def page(course_slug, lesson_slug, page_slug='index'):
try:
course = g.model.courses[course_slug]
lesson = course.lessons[lesson_slug]
page = lesson.pages[page_slug]
except KeyError:
raise abort(404)
is_canonical_lesson, canonical_url = _get_canonicality_info(lesson)
return render_template(
"lesson.html",
page=page,
content=page.content,
course=course,
canonical_url=canonical_url,
is_canonical_lesson=is_canonical_lesson,
page_attribution=page.attribution,
edit_info=page.edit_info,
)
@app.route('/<course:course_slug>/<lesson:lesson_slug>/<page_slug>'
+ '/solutions/<int:solution_index>/')
def solution(course_slug, lesson_slug, page_slug, solution_index):
try:
course = g.model.courses[course_slug]
lesson = course.lessons[lesson_slug]
page = lesson.pages[page_slug]
solution = page.solutions[solution_index]
except KeyError:
raise abort(404)
is_canonical_lesson, canonical_url = _get_canonicality_info(lesson)
return render_template(
"lesson.html",
page=page,
content=solution.content,
course=course,
canonical_url=canonical_url,
is_canonical_lesson=is_canonical_lesson,
page_attribution=page.attribution,
edit_info=page.edit_info,
solution=solution,
)
@app.route('/<course:course_slug>/<lesson:lesson_slug>/static/<path:filename>')
def page_static(course_slug, lesson_slug, filename):
try:
course = g.model.courses[course_slug]
lesson = course.lessons[lesson_slug]
static = lesson.static_files[filename]
except KeyError:
raise abort(404)
print('sending', static.base_path, static.filename)
return send_from_directory(static.base_path, static.path)
def list_months(start_date, end_date):
"""Return a span of months as a list of (year, month) tuples
The months of start_date and end_date are both included.
"""
months = []
year = start_date.year
month = start_date.month
while (year, month) <= (end_date.year, end_date.month):
months.append((year, month))
month += 1
if month > 12:
month = 1
year += 1
return months
@app.route('/<course:course_slug>/calendar/')
def course_calendar(course_slug):
try:
course = g.model.courses[course_slug]
except KeyError:
abort(404)
if not course.start_date:
abort(404)
sessions_by_date = {
s.date: s for s in course.sessions.values()
if hasattr(s, 'date')
}
return render_template(
'course_calendar.html',
course=course,
sessions_by_date=sessions_by_date,
months=list_months(course.start_date, course.end_date),
calendar=calendar.Calendar(),
edit_info=course.edit_info,
)
@app.route('/<course:course_slug>/calendar.ics')
def course_calendar_ics(course_slug):
try:
course = g.model.courses[course_slug]
except KeyError:
abort(404)
if not course.start_date:
abort(404)
events = []
for session in course.sessions.values():
time = getattr(session, 'time', None)
if time is None:
# Sessions without times don't show up in the calendar
continue
created = os.environ.get('NAUCSE_CALENDAR_DTSTAMP', None)
cal_event = ics.Event(
name=session.title,
begin=time['start'],
end=time['end'],
uid=session.get_url(external=True),
created=created,
)
events.append(cal_event)
cal = ics.Calendar(events=events)
return Response(str(cal), mimetype="text/calendar")
@app.route('/v0/schema/<is_input:is_input>.json', defaults={'model_slug': 'root'})
@app.route('/v0/schema/<is_input:is_input>/<model_slug>.json')
def schema(model_slug, is_input):
try:
cls = models.models[model_slug]
except KeyError:
abort(404)
return jsonify(models.get_schema(cls, is_input=is_input))
@app.route('/v0/naucse.json')
def api():
return jsonify(models.dump(g.model))
@app.route('/v0/years/<int:year>.json')
def run_year_api(year):
try:
run_year = g.model.run_years[year]
except KeyError:
abort(404)
return jsonify(models.dump(run_year))
@app.route('/v0/<course:course_slug>.json')
def course_api(course_slug):
try:
course = g.model.courses[course_slug]
except KeyError:
abort(404)
return jsonify(models.dump(course))
|
[] |
[] |
[
"NAUCSE_TRUSTED_REPOS",
"NAUCSE_FREEZE",
"NAUCSE_CALENDAR_DTSTAMP"
] |
[]
|
["NAUCSE_TRUSTED_REPOS", "NAUCSE_FREEZE", "NAUCSE_CALENDAR_DTSTAMP"]
|
python
| 3 | 0 | |
Lib/distutils/_msvccompiler.py
|
"""distutils._msvccompiler
Contains MSVCCompiler, an implementation of the abstract CCompiler class
for Microsoft Visual Studio 2015.
The module is compatible with VS 2015 and later. You can find legacy support
for older versions in distutils.msvc9compiler and distutils.msvccompiler.
"""
# Written by Perry Stoll
# hacked by Robin Becker and Thomas Heller to do a better job of
# finding DevStudio (through the registry)
# ported to VS 2005 and VS 2008 by Christian Heimes
# ported to VS 2015 by Steve Dower
import os
import shutil
import stat
import subprocess
from distutils.errors import DistutilsExecError, DistutilsPlatformError, \
CompileError, LibError, LinkError
from distutils.ccompiler import CCompiler, gen_lib_options
from distutils import log
from distutils.util import get_platform
import winreg
from itertools import count
def _find_vcvarsall(plat_spec):
with winreg.OpenKeyEx(
winreg.HKEY_LOCAL_MACHINE,
r"Software\Microsoft\VisualStudio\SxS\VC7",
access=winreg.KEY_READ | winreg.KEY_WOW64_32KEY
) as key:
if not key:
log.debug("Visual C++ is not registered")
return None, None
best_version = 0
best_dir = None
for i in count():
try:
v, vc_dir, vt = winreg.EnumValue(key, i)
except OSError:
break
if v and vt == winreg.REG_SZ and os.path.isdir(vc_dir):
try:
version = int(float(v))
except (ValueError, TypeError):
continue
if version >= 14 and version > best_version:
best_version, best_dir = version, vc_dir
if not best_version:
log.debug("No suitable Visual C++ version found")
return None, None
vcvarsall = os.path.join(best_dir, "vcvarsall.bat")
if not os.path.isfile(vcvarsall):
log.debug("%s cannot be found", vcvarsall)
return None, None
vcruntime = None
vcruntime_spec = _VCVARS_PLAT_TO_VCRUNTIME_REDIST.get(plat_spec)
if vcruntime_spec:
vcruntime = os.path.join(best_dir,
vcruntime_spec.format(best_version))
if not os.path.isfile(vcruntime):
log.debug("%s cannot be found", vcruntime)
vcruntime = None
return vcvarsall, vcruntime
def _get_vc_env(plat_spec):
if os.getenv("DISTUTILS_USE_SDK"):
return {
key.lower(): value
for key, value in os.environ.items()
}
vcvarsall, vcruntime = _find_vcvarsall(plat_spec)
if not vcvarsall:
raise DistutilsPlatformError("Unable to find vcvarsall.bat")
try:
out = subprocess.check_output(
'"{}" {} && set'.format(vcvarsall, plat_spec),
shell=True,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
except subprocess.CalledProcessError as exc:
log.error(exc.output)
raise DistutilsPlatformError("Error executing {}"
.format(exc.cmd))
env = {
key.lower(): value
for key, _, value in
(line.partition('=') for line in out.splitlines())
if key and value
}
if vcruntime:
env['py_vcruntime_redist'] = vcruntime
return env
def _find_exe(exe, paths=None):
"""Return path to an MSVC executable program.
Tries to find the program in several places: first, one of the
MSVC program search paths from the registry; next, the directories
in the PATH environment variable. If any of those work, return an
absolute path that is known to exist. If none of them work, just
return the original program name, 'exe'.
"""
if not paths:
paths = os.getenv('path').split(os.pathsep)
for p in paths:
fn = os.path.join(os.path.abspath(p), exe)
if os.path.isfile(fn):
return fn
return exe
# A map keyed by get_platform() return values to values accepted by
# 'vcvarsall.bat'. Note a cross-compile may combine these (eg, 'x86_amd64' is
# the param to cross-compile on x86 targetting amd64.)
PLAT_TO_VCVARS = {
'win32' : 'x86',
'win-amd64' : 'amd64',
}
# A map keyed by get_platform() return values to the file under
# the VC install directory containing the vcruntime redistributable.
_VCVARS_PLAT_TO_VCRUNTIME_REDIST = {
'x86' : 'redist\\x86\\Microsoft.VC{0}0.CRT\\vcruntime{0}0.dll',
'amd64' : 'redist\\x64\\Microsoft.VC{0}0.CRT\\vcruntime{0}0.dll',
'x86_amd64' : 'redist\\x64\\Microsoft.VC{0}0.CRT\\vcruntime{0}0.dll',
}
# A set containing the DLLs that are guaranteed to be available for
# all micro versions of this Python version. Known extension
# dependencies that are not in this set will be copied to the output
# path.
_BUNDLED_DLLS = frozenset(['vcruntime140.dll'])
class MSVCCompiler(CCompiler) :
"""Concrete class that implements an interface to Microsoft Visual C++,
as defined by the CCompiler abstract class."""
compiler_type = 'msvc'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
_rc_extensions = ['.rc']
_mc_extensions = ['.mc']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = (_c_extensions + _cpp_extensions +
_rc_extensions + _mc_extensions)
res_extension = '.res'
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__(self, verbose=0, dry_run=0, force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
# target platform (.plat_name is consistent with 'bdist')
self.plat_name = None
self.initialized = False
def initialize(self, plat_name=None):
# multi-init means we would need to check platform same each time...
assert not self.initialized, "don't init multiple times"
if plat_name is None:
plat_name = get_platform()
# sanity check for platforms to prevent obscure errors later.
if plat_name not in PLAT_TO_VCVARS:
raise DistutilsPlatformError("--plat-name must be one of {}"
.format(tuple(PLAT_TO_VCVARS)))
# On x86, 'vcvarsall.bat amd64' creates an env that doesn't work;
# to cross compile, you use 'x86_amd64'.
# On AMD64, 'vcvarsall.bat amd64' is a native build env; to cross
# compile use 'x86' (ie, it runs the x86 compiler directly)
if plat_name == get_platform() or plat_name == 'win32':
# native build or cross-compile to win32
plat_spec = PLAT_TO_VCVARS[plat_name]
else:
# cross compile from win32 -> some 64bit
plat_spec = '{}_{}'.format(
PLAT_TO_VCVARS[get_platform()],
PLAT_TO_VCVARS[plat_name]
)
vc_env = _get_vc_env(plat_spec)
if not vc_env:
raise DistutilsPlatformError("Unable to find a compatible "
"Visual Studio installation.")
self._paths = vc_env.get('path', '')
paths = self._paths.split(os.pathsep)
self.cc = _find_exe("cl.exe", paths)
self.linker = _find_exe("link.exe", paths)
self.lib = _find_exe("lib.exe", paths)
self.rc = _find_exe("rc.exe", paths) # resource compiler
self.mc = _find_exe("mc.exe", paths) # message compiler
self.mt = _find_exe("mt.exe", paths) # message compiler
self._vcruntime_redist = vc_env.get('py_vcruntime_redist', '')
for dir in vc_env.get('include', '').split(os.pathsep):
if dir:
self.add_include_dir(dir)
for dir in vc_env.get('lib', '').split(os.pathsep):
if dir:
self.add_library_dir(dir)
self.preprocess_options = None
# If vcruntime_redist is available, link against it dynamically. Otherwise,
# use /MT[d] to build statically, then switch from libucrt[d].lib to ucrt[d].lib
# later to dynamically link to ucrtbase but not vcruntime.
self.compile_options = [
'/nologo', '/Ox', '/W3', '/GL', '/DNDEBUG'
]
self.compile_options.append('/MD' if self._vcruntime_redist else '/MT')
self.compile_options_debug = [
'/nologo', '/Od', '/MDd', '/Zi', '/W3', '/D_DEBUG'
]
ldflags = [
'/nologo', '/INCREMENTAL:NO', '/LTCG'
]
if not self._vcruntime_redist:
ldflags.extend(('/nodefaultlib:libucrt.lib', 'ucrt.lib'))
ldflags_debug = [
'/nologo', '/INCREMENTAL:NO', '/LTCG', '/DEBUG:FULL'
]
self.ldflags_exe = [*ldflags, '/MANIFEST:EMBED,ID=1']
self.ldflags_exe_debug = [*ldflags_debug, '/MANIFEST:EMBED,ID=1']
self.ldflags_shared = [*ldflags, '/DLL', '/MANIFEST:EMBED,ID=2', '/MANIFESTUAC:NO']
self.ldflags_shared_debug = [*ldflags_debug, '/DLL', '/MANIFEST:EMBED,ID=2', '/MANIFESTUAC:NO']
self.ldflags_static = [*ldflags]
self.ldflags_static_debug = [*ldflags_debug]
self._ldflags = {
(CCompiler.EXECUTABLE, None): self.ldflags_exe,
(CCompiler.EXECUTABLE, False): self.ldflags_exe,
(CCompiler.EXECUTABLE, True): self.ldflags_exe_debug,
(CCompiler.SHARED_OBJECT, None): self.ldflags_shared,
(CCompiler.SHARED_OBJECT, False): self.ldflags_shared,
(CCompiler.SHARED_OBJECT, True): self.ldflags_shared_debug,
(CCompiler.SHARED_LIBRARY, None): self.ldflags_static,
(CCompiler.SHARED_LIBRARY, False): self.ldflags_static,
(CCompiler.SHARED_LIBRARY, True): self.ldflags_static_debug,
}
self.initialized = True
# -- Worker methods ------------------------------------------------
def object_filenames(self,
source_filenames,
strip_dir=0,
output_dir=''):
ext_map = {
**{ext: self.obj_extension for ext in self.src_extensions},
**{ext: self.res_extension for ext in self._rc_extensions + self._mc_extensions},
}
output_dir = output_dir or ''
def make_out_path(p):
base, ext = os.path.splitext(p)
if strip_dir:
base = os.path.basename(base)
else:
_, base = os.path.splitdrive(base)
if base.startswith((os.path.sep, os.path.altsep)):
base = base[1:]
try:
# XXX: This may produce absurdly long paths. We should check
# the length of the result and trim base until we fit within
# 260 characters.
return os.path.join(output_dir, base + ext_map[ext])
except LookupError:
# Better to raise an exception instead of silently continuing
# and later complain about sources and targets having
# different lengths
raise CompileError("Don't know how to compile {}".format(p))
return list(map(make_out_path, source_filenames))
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
if not self.initialized:
self.initialize()
compile_info = self._setup_compile(output_dir, macros, include_dirs,
sources, depends, extra_postargs)
macros, objects, extra_postargs, pp_opts, build = compile_info
compile_opts = extra_preargs or []
compile_opts.append('/c')
if debug:
compile_opts.extend(self.compile_options_debug)
else:
compile_opts.extend(self.compile_options)
add_cpp_opts = False
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
if debug:
# pass the full pathname to MSVC in debug mode,
# this allows the debugger to find the source file
# without asking the user to browse for it
src = os.path.abspath(src)
if ext in self._c_extensions:
input_opt = "/Tc" + src
elif ext in self._cpp_extensions:
input_opt = "/Tp" + src
add_cpp_opts = True
elif ext in self._rc_extensions:
# compile .RC to .RES file
input_opt = src
output_opt = "/fo" + obj
try:
self.spawn([self.rc] + pp_opts + [output_opt, input_opt])
except DistutilsExecError as msg:
raise CompileError(msg)
continue
elif ext in self._mc_extensions:
# Compile .MC to .RC file to .RES file.
# * '-h dir' specifies the directory for the
# generated include file
# * '-r dir' specifies the target directory of the
# generated RC file and the binary message resource
# it includes
#
# For now (since there are no options to change this),
# we use the source-directory for the include file and
# the build directory for the RC file and message
# resources. This works at least for win32all.
h_dir = os.path.dirname(src)
rc_dir = os.path.dirname(obj)
try:
# first compile .MC to .RC and .H file
self.spawn([self.mc, '-h', h_dir, '-r', rc_dir, src])
base, _ = os.path.splitext(os.path.basename (src))
rc_file = os.path.join(rc_dir, base + '.rc')
# then compile .RC to .RES file
self.spawn([self.rc, "/fo" + obj, rc_file])
except DistutilsExecError as msg:
raise CompileError(msg)
continue
else:
# how to handle this file?
raise CompileError("Don't know how to compile {} to {}"
.format(src, obj))
args = [self.cc] + compile_opts + pp_opts
if add_cpp_opts:
args.append('/EHsc')
args.append(input_opt)
args.append("/Fo" + obj)
args.extend(extra_postargs)
try:
self.spawn(args)
except DistutilsExecError as msg:
raise CompileError(msg)
return objects
def create_static_lib(self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
if not self.initialized:
self.initialize()
objects, output_dir = self._fix_object_args(objects, output_dir)
output_filename = self.library_filename(output_libname,
output_dir=output_dir)
if self._need_link(objects, output_filename):
lib_args = objects + ['/OUT:' + output_filename]
if debug:
pass # XXX what goes here?
try:
log.debug('Executing "%s" %s', self.lib, ' '.join(lib_args))
self.spawn([self.lib] + lib_args)
except DistutilsExecError as msg:
raise LibError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def link(self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
if not self.initialized:
self.initialize()
objects, output_dir = self._fix_object_args(objects, output_dir)
fixed_args = self._fix_lib_args(libraries, library_dirs,
runtime_library_dirs)
libraries, library_dirs, runtime_library_dirs = fixed_args
if runtime_library_dirs:
self.warn("I don't know what to do with 'runtime_library_dirs': "
+ str(runtime_library_dirs))
lib_opts = gen_lib_options(self,
library_dirs, runtime_library_dirs,
libraries)
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
ldflags = self._ldflags[target_desc, debug]
export_opts = ["/EXPORT:" + sym for sym in (export_symbols or [])]
ld_args = (ldflags + lib_opts + export_opts +
objects + ['/OUT:' + output_filename])
# The MSVC linker generates .lib and .exp files, which cannot be
# suppressed by any linker switches. The .lib files may even be
# needed! Make sure they are generated in the temporary build
# directory. Since they have different names for debug and release
# builds, they can go into the same directory.
build_temp = os.path.dirname(objects[0])
if export_symbols is not None:
(dll_name, dll_ext) = os.path.splitext(
os.path.basename(output_filename))
implib_file = os.path.join(
build_temp,
self.library_filename(dll_name))
ld_args.append ('/IMPLIB:' + implib_file)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
output_dir = os.path.dirname(os.path.abspath(output_filename))
self.mkpath(output_dir)
try:
log.debug('Executing "%s" %s', self.linker, ' '.join(ld_args))
self.spawn([self.linker] + ld_args)
self._copy_vcruntime(output_dir)
except DistutilsExecError as msg:
raise LinkError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def _copy_vcruntime(self, output_dir):
vcruntime = self._vcruntime_redist
if not vcruntime or not os.path.isfile(vcruntime):
return
if os.path.basename(vcruntime).lower() in _BUNDLED_DLLS:
return
log.debug('Copying "%s"', vcruntime)
vcruntime = shutil.copy(vcruntime, output_dir)
os.chmod(vcruntime, stat.S_IWRITE)
def spawn(self, cmd):
old_path = os.getenv('path')
try:
os.environ['path'] = self._paths
return super().spawn(cmd)
finally:
os.environ['path'] = old_path
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option(self, dir):
return "/LIBPATH:" + dir
def runtime_library_dir_option(self, dir):
raise DistutilsPlatformError(
"don't know how to set runtime library search path for MSVC")
def library_option(self, lib):
return self.library_filename(lib)
def find_library_file(self, dirs, lib, debug=0):
# Prefer a debugging library if found (and requested), but deal
# with it if we don't have one.
if debug:
try_names = [lib + "_d", lib]
else:
try_names = [lib]
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename(name))
if os.path.isfile(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
|
[] |
[] |
[
"DISTUTILS_USE_SDK",
"path"
] |
[]
|
["DISTUTILS_USE_SDK", "path"]
|
python
| 2 | 0 | |
tweet_feed/settings.py
|
# settings.py
import os
from os.path import join, dirname
from dotenv import load_dotenv
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
CONSUMER_KEY = os.environ.get("CONSUMER_KEY")
CONSUMER_SECRET = os.environ.get("CONSUMER_SECRET")
ACCESS_TOKEN = os.environ.get("ACCESS_TOKEN")
ACCESS_SECRET = os.environ.get("ACCESS_SECRET")
BROKER_URL = os.environ.get("BROKER_URL")
|
[] |
[] |
[
"CONSUMER_SECRET",
"CONSUMER_KEY",
"ACCESS_SECRET",
"BROKER_URL",
"ACCESS_TOKEN"
] |
[]
|
["CONSUMER_SECRET", "CONSUMER_KEY", "ACCESS_SECRET", "BROKER_URL", "ACCESS_TOKEN"]
|
python
| 5 | 0 | |
pkg/popeye.go
|
package pkg
import (
"bufio"
"bytes"
"context"
"encoding/xml"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/derailed/popeye/internal"
"github.com/derailed/popeye/internal/issues"
"github.com/derailed/popeye/internal/k8s"
"github.com/derailed/popeye/internal/report"
"github.com/derailed/popeye/internal/sanitize"
"github.com/derailed/popeye/internal/scrub"
"github.com/derailed/popeye/pkg/config"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
)
var (
// LogFile the path to our logs.
LogFile = filepath.Join(os.TempDir(), fmt.Sprintf("popeye.log"))
// DumpDir indicates a directory location for sanitixer reports.
DumpDir = dumpDir()
)
const outFmt = "sanitizer_%s_%d.%s"
func (p *Popeye) fileName() string {
return fmt.Sprintf(outFmt, p.client.ActiveCluster(), time.Now().UnixNano(), p.fileExt())
}
func (p *Popeye) fileExt() string {
switch *p.flags.Output {
case "json":
return "json"
case "junit":
return "xml"
case "yaml":
return "yml"
case "html":
return "html"
default:
return "txt"
}
}
func dumpDir() string {
if d := os.Getenv("POPEYE_REPORT_DIR"); d != "" {
return d
}
return filepath.Join(os.TempDir(), "popeye")
}
type (
scrubFn func(context.Context, *scrub.Cache, *issues.Codes) scrub.Sanitizer
// Popeye a kubernetes sanitizer.
Popeye struct {
client *k8s.Client
config *config.Config
outputTarget io.ReadWriteCloser
log *zerolog.Logger
flags *config.Flags
builder *report.Builder
aliases *internal.Aliases
}
)
// NewPopeye returns a new sanitizer.
func NewPopeye(flags *config.Flags, log *zerolog.Logger) (*Popeye, error) {
cfg, err := config.NewConfig(flags)
if err != nil {
return nil, err
}
a := internal.NewAliases()
p := Popeye{
client: k8s.NewClient(flags),
config: cfg,
log: log,
flags: flags,
aliases: a,
builder: report.NewBuilder(a),
}
return &p, nil
}
// Init configures popeye prior to sanitization.
func (p *Popeye) Init() error {
if !isSet(p.flags.Save) {
return p.ensureOutput()
}
if err := ensurePath(DumpDir, 0755); err != nil {
return err
}
return p.ensureOutput()
}
// Sanitize scans a cluster for potential issues.
func (p *Popeye) Sanitize() error {
defer func() {
switch {
case isSet(p.flags.Save):
if err := p.outputTarget.Close(); err != nil {
log.Fatal().Err(err).Msg("Closing report")
}
case isSetStr(p.flags.S3Bucket):
// Create a single AWS session (we can re use this if we're uploading many files)
s, err := session.NewSession(&aws.Config{
LogLevel: aws.LogLevel(aws.LogDebugWithRequestErrors)})
if err != nil {
log.Fatal().Err(err).Msg("Create S3 Session")
}
// Create an uploader with the session and default options
uploader := s3manager.NewUploader(s)
// Upload input parameters
upParams := &s3manager.UploadInput{
Bucket: p.flags.S3Bucket,
Key: aws.String(p.fileName()),
Body: p.outputTarget,
}
// Perform an upload.
if _, err = uploader.Upload(upParams); err != nil {
log.Fatal().Err(err).Msg("S3 Upload")
}
default:
}
}()
if err := p.sanitize(); err != nil {
return err
}
return p.dump(true)
}
func (p *Popeye) dumpJunit() error {
res, err := p.builder.ToJunit(config.Level(p.config.LinterLevel()))
if err != nil {
return err
}
if _, err := p.outputTarget.Write([]byte(xml.Header)); err != nil {
return err
}
fmt.Fprintf(p.outputTarget, "%v\n", res)
return nil
}
func (p *Popeye) dumpYAML() error {
res, err := p.builder.ToYAML()
if err != nil {
return err
}
fmt.Fprintf(p.outputTarget, "%v\n", res)
return nil
}
func (p *Popeye) dumpJSON() error {
res, err := p.builder.ToJSON()
if err != nil {
return err
}
fmt.Fprintf(p.outputTarget, "%v\n", res)
return nil
}
func (p *Popeye) dumpHTML() error {
res, err := p.builder.ToHTML()
if err != nil {
return err
}
fmt.Fprintf(p.outputTarget, "%v\n", res)
return nil
}
func (p *Popeye) dumpScore() error {
res, err := p.builder.ToScore()
if err != nil {
return err
}
fmt.Fprintf(p.outputTarget, "%v\n", res)
return nil
}
func (p *Popeye) dumpStd(mode, header bool) error {
var (
w = bufio.NewWriter(p.outputTarget)
s = report.NewSanitizer(w, mode)
)
if header {
p.builder.PrintHeader(s)
}
mx, err := p.client.ClusterHasMetrics()
if err != nil {
mx = false
}
p.builder.PrintClusterInfo(s, p.client.ActiveCluster(), mx)
p.builder.PrintReport(config.Level(p.config.LinterLevel()), s)
p.builder.PrintSummary(s)
return w.Flush()
}
func (p *Popeye) dumpPrometheus() error {
pusher := p.builder.ToPrometheus(
p.flags.PushGatewayAddress,
p.client.ActiveNamespace(),
)
return pusher.Add()
}
// Dump prints out sanitizer report.
func (p *Popeye) dump(printHeader bool) error {
if !p.builder.HasContent() {
return errors.New("Nothing to report, check section name or permissions")
}
p.builder.SetClusterName(p.client.ActiveCluster())
var err error
switch p.flags.OutputFormat() {
case report.JunitFormat:
err = p.dumpJunit()
case report.YAMLFormat:
err = p.dumpYAML()
case report.JSONFormat:
err = p.dumpJSON()
case report.HTMLFormat:
err = p.dumpHTML()
case report.PrometheusFormat:
err = p.dumpPrometheus()
case report.ScoreFormat:
err = p.dumpScore()
default:
err = p.dumpStd(p.flags.OutputFormat() == report.JurassicFormat, printHeader)
}
return err
}
func (p *Popeye) sanitizers() map[string]scrubFn {
return map[string]scrubFn{
"cluster": scrub.NewCluster,
"configmap": scrub.NewConfigMap,
"secret": scrub.NewSecret,
"deployment": scrub.NewDeployment,
"daemonset": scrub.NewDaemonSet,
"horizontalpodautoscaler": scrub.NewHorizontalPodAutoscaler,
"namespace": scrub.NewNamespace,
"node": scrub.NewNode,
"persistentvolume": scrub.NewPersistentVolume,
"persistentvolumeclaim": scrub.NewPersistentVolumeClaim,
"pod": scrub.NewPod,
"replicaset": scrub.NewReplicaSet,
"service": scrub.NewService,
"serviceaccount": scrub.NewServiceAccount,
"statefulset": scrub.NewStatefulSet,
"poddisruptionbudget": scrub.NewPodDisruptionBudget,
"ingress": scrub.NewIngress,
"networkpolicy": scrub.NewNetworkPolicy,
"podsecuritypolicy": scrub.NewPodSecurityPolicy,
"clusterrole": scrub.NewClusterRole,
"clusterrolebinding": scrub.NewClusterRoleBinding,
"role": scrub.NewRole,
"rolebinding": scrub.NewRoleBinding,
}
}
type readWriteCloser struct {
io.ReadWriter
}
func (wC readWriteCloser) Close() error {
return nil
}
func NopWriter(i io.ReadWriter) io.ReadWriteCloser {
return &readWriteCloser{i}
}
func (p *Popeye) ensureOutput() error {
p.outputTarget = os.Stdout
if !isSet(p.flags.Save) && !isSetStr(p.flags.S3Bucket) {
return nil
}
if p.flags.Output == nil {
*p.flags.Output = "standard"
}
var (
f io.ReadWriteCloser
err error
)
switch {
case isSet(p.flags.Save):
fPath := filepath.Join(DumpDir, p.fileName())
f, err = os.Create(fPath)
if err != nil {
return err
}
fmt.Println(fPath)
case isSetStr(p.flags.S3Bucket):
f = NopWriter(bytes.NewBufferString(""))
}
p.outputTarget = f
return nil
}
func (p *Popeye) sanitize() error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx = context.WithValue(
ctx,
sanitize.PopeyeKey("OverAllocs"),
*p.flags.CheckOverAllocs,
)
cache := scrub.NewCache(p.client, p.config)
codes, err := issues.LoadCodes()
if err != nil {
return err
}
codes.Refine(p.config.Codes)
sections := make([]string, 0, len(p.sanitizers()))
for section := range p.sanitizers() {
sections = append(sections, section)
}
sort.StringSlice(sections).Sort()
for _, section := range sections {
if !in(p.aliases.ToResources(p.config.Sections()), section) {
continue
}
// Skip node checks if active namespace is set.
if section == "node" && p.client.ActiveNamespace() != "" {
continue
}
ctx = context.WithValue(ctx, internal.KeyRun, internal.RunInfo{Section: section})
s := p.sanitizers()[section](ctx, cache, codes)
if err := s.Sanitize(ctx); err != nil {
p.builder.AddError(err)
continue
}
o := s.Outcome().Filter(config.Level(p.config.LinterLevel()))
tally := report.NewTally()
tally.Rollup(o)
p.builder.AddSection(section, o, tally)
}
return nil
}
// ----------------------------------------------------------------------------
// Helpers...
func isSet(b *bool) bool {
return b != nil && *b
}
func isSetStr(s *string) bool {
return s != nil && *s != ""
}
func ensurePath(path string, mod os.FileMode) error {
dir, err := filepath.Abs(path)
if err != nil {
return err
}
_, err = os.Stat(dir)
if err == nil || !os.IsNotExist(err) {
return nil
}
if err = os.MkdirAll(dir, mod); err != nil {
return fmt.Errorf("Fail to create popeye sanitizers dump dir: %v", err)
}
return nil
}
func in(list []string, member string) bool {
if len(list) == 0 {
return true
}
for _, m := range list {
if m == member {
return true
}
}
return false
}
|
[
"\"POPEYE_REPORT_DIR\""
] |
[] |
[
"POPEYE_REPORT_DIR"
] |
[]
|
["POPEYE_REPORT_DIR"]
|
go
| 1 | 0 | |
src/main/java/Main.java
|
import Controller.Controller;
import Model.IntegralInfo;
import java.sql.*;
import java.util.HashMap;
import java.util.ArrayList;
import java.util.Map;
import java.net.URI;
import java.net.URISyntaxException;
import static spark.Spark.*;
import spark.template.freemarker.FreeMarkerEngine;
import spark.ModelAndView;
import static spark.Spark.get;
import com.heroku.sdk.jdbc.DatabaseUrl;
import java.util.List;
/**
*
* @author SantiagoAvila
*/
public class Main {
/**
* Main method, where the endpoints are defined
*
* @param args
*/
public static void main(String[] args) {
port(Integer.valueOf(System.getenv("PORT")));
staticFileLocation("/public");
get("/simpsonIntegralXValue", (req, res) -> {
final String FILE_NAME = "dataset.txt";
List<IntegralInfo> data;
Controller controller = new Controller();
data = controller.loadClassInfo(FILE_NAME);
String dataString = "<p><br><table border=\"1\">";
dataString += "<tr align=\"center\"><td><b>p</b></td><td><b>dof</b></td><td><b>Expected X<br>Value</b></td><td><b>Actual X<br>Value</b></td></tr>";
for(IntegralInfo integralCase : data) {
integralCase = controller.findXValue(integralCase);
dataString += String.format("<tr><td>%f</td><td>%f</td><td>%f</td><td>%f</td></tr>", integralCase.getExpectedResult(), integralCase.getDegreesOfFreedom(), integralCase.getExpectedUpperLimit(), integralCase.getIntegralUpperLimit());
}
dataString += "</table><br>";
return dataString;
});
get("/", (request, response) -> {
Map<String, Object> attributes = new HashMap<>();
attributes.put("message", "Hello World!");
return new ModelAndView(attributes, "index.ftl");
}, new FreeMarkerEngine());
get("/db", (req, res) -> {
Connection connection = null;
Map<String, Object> attributes = new HashMap<>();
try {
connection = DatabaseUrl.extract().getConnection();
Statement stmt = connection.createStatement();
stmt.executeUpdate("CREATE TABLE IF NOT EXISTS ticks (tick timestamp)");
stmt.executeUpdate("INSERT INTO ticks VALUES (now())");
ResultSet rs = stmt.executeQuery("SELECT tick FROM ticks");
ArrayList<String> output = new ArrayList<String>();
while (rs.next()) {
output.add( "Read from DB: " + rs.getTimestamp("tick"));
}
attributes.put("results", output);
return new ModelAndView(attributes, "db.ftl");
} catch (Exception e) {
attributes.put("message", "There was an error: " + e);
return new ModelAndView(attributes, "error.ftl");
} finally {
if (connection != null) try{connection.close();} catch(SQLException e){}
}
}, new FreeMarkerEngine());
}
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
java
| 1 | 0 | |
python/tvm/rpc/server.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""RPC server implementation.
Note
----
Server is TCP based with the following protocol:
- Initial handshake to the peer
- [RPC_MAGIC, keysize(int32), key-bytes]
- The key is in format
- {server|client}:device-type[:random-key] [-timeout=timeout]
"""
# pylint: disable=invalid-name
import os
import ctypes
import socket
import select
import struct
import logging
import multiprocessing
import subprocess
import time
import sys
import signal
import platform
import tvm._ffi
from tvm._ffi.base import py_str
from tvm._ffi.libinfo import find_lib_path
from tvm.runtime.module import load_module as _load_module
from tvm.contrib import util
from . import _ffi_api
from . import base
from .base import TrackerCode
logger = logging.getLogger("RPCServer")
def _server_env(load_library, work_path=None):
"""Server environment function return temp dir"""
if work_path:
temp = work_path
else:
temp = util.tempdir()
# pylint: disable=unused-variable
@tvm._ffi.register_func("tvm.rpc.server.workpath", override=True)
def get_workpath(path):
return temp.relpath(path)
@tvm._ffi.register_func("tvm.rpc.server.load_module", override=True)
def load_module(file_name):
"""Load module from remote side."""
path = temp.relpath(file_name)
m = _load_module(path)
logger.info("load_module %s", path)
return m
@tvm._ffi.register_func("tvm.rpc.server.download_linked_module", override=True)
def download_linked_module(file_name):
"""Load module from remote side."""
# pylint: disable=import-outside-toplevel
path = temp.relpath(file_name)
if path.endswith(".o"):
# Extra dependencies during runtime.
from tvm.contrib import cc as _cc
_cc.create_shared(path + ".so", path)
path += ".so"
elif path.endswith(".tar"):
# Extra dependencies during runtime.
from tvm.contrib import cc as _cc, tar as _tar
tar_temp = util.tempdir(custom_path=path.replace(".tar", ""))
_tar.untar(path, tar_temp.temp_dir)
files = [tar_temp.relpath(x) for x in tar_temp.listdir()]
_cc.create_shared(path + ".so", files)
path += ".so"
elif path.endswith(".dylib") or path.endswith(".so"):
pass
else:
raise RuntimeError("Do not know how to link %s" % file_name)
logger.info("Send linked module %s to client", path)
return bytearray(open(path, "rb").read())
libs = []
load_library = load_library.split(":") if load_library else []
for file_name in load_library:
file_name = find_lib_path(file_name)[0]
libs.append(ctypes.CDLL(file_name, ctypes.RTLD_GLOBAL))
logger.info("Load additional library %s", file_name)
temp.libs = libs
return temp
def _serve_loop(sock, addr, load_library, work_path=None):
"""Server loop"""
sockfd = sock.fileno()
temp = _server_env(load_library, work_path)
_ffi_api.ServerLoop(sockfd)
if not work_path:
temp.remove()
logger.info("Finish serving %s", addr)
def _parse_server_opt(opts):
# parse client options
ret = {}
for kv in opts:
if kv.startswith("-timeout="):
ret["timeout"] = float(kv[9:])
return ret
def _listen_loop(sock, port, rpc_key, tracker_addr, load_library, custom_addr):
"""Listening loop of the server master."""
def _accept_conn(listen_sock, tracker_conn, ping_period=2):
"""Accept connection from the other places.
Parameters
----------
listen_sock: Socket
The socket used by listening process.
tracker_conn : connnection to tracker
Tracker connection
ping_period : float, optional
ping tracker every k seconds if no connection is accepted.
"""
old_keyset = set()
# Report resource to tracker
if tracker_conn:
matchkey = base.random_key(rpc_key + ":")
base.sendjson(tracker_conn, [TrackerCode.PUT, rpc_key, (port, matchkey), custom_addr])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
else:
matchkey = rpc_key
unmatch_period_count = 0
unmatch_timeout = 4
# Wait until we get a valid connection
while True:
if tracker_conn:
trigger = select.select([listen_sock], [], [], ping_period)
if not listen_sock in trigger[0]:
base.sendjson(tracker_conn, [TrackerCode.GET_PENDING_MATCHKEYS])
pending_keys = base.recvjson(tracker_conn)
old_keyset.add(matchkey)
# if match key not in pending key set
# it means the key is acquired by a client but not used.
if matchkey not in pending_keys:
unmatch_period_count += 1
else:
unmatch_period_count = 0
# regenerate match key if key is acquired but not used for a while
if unmatch_period_count * ping_period > unmatch_timeout + ping_period:
logger.info("no incoming connections, regenerate key ...")
matchkey = base.random_key(rpc_key + ":", old_keyset)
base.sendjson(
tracker_conn, [TrackerCode.PUT, rpc_key, (port, matchkey), custom_addr]
)
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
unmatch_period_count = 0
continue
conn, addr = listen_sock.accept()
magic = struct.unpack("<i", base.recvall(conn, 4))[0]
if magic != base.RPC_MAGIC:
conn.close()
continue
keylen = struct.unpack("<i", base.recvall(conn, 4))[0]
key = py_str(base.recvall(conn, keylen))
arr = key.split()
expect_header = "client:" + matchkey
server_key = "server:" + rpc_key
if arr[0] != expect_header:
conn.sendall(struct.pack("<i", base.RPC_CODE_MISMATCH))
conn.close()
logger.warning("mismatch key from %s", addr)
continue
conn.sendall(struct.pack("<i", base.RPC_CODE_SUCCESS))
conn.sendall(struct.pack("<i", len(server_key)))
conn.sendall(server_key.encode("utf-8"))
return conn, addr, _parse_server_opt(arr[1:])
# Server logic
tracker_conn = None
while True:
try:
# step 1: setup tracker and report to tracker
if tracker_addr and tracker_conn is None:
tracker_conn = base.connect_with_retry(tracker_addr)
tracker_conn.sendall(struct.pack("<i", base.RPC_TRACKER_MAGIC))
magic = struct.unpack("<i", base.recvall(tracker_conn, 4))[0]
if magic != base.RPC_TRACKER_MAGIC:
raise RuntimeError("%s is not RPC Tracker" % str(tracker_addr))
# report status of current queue
cinfo = {"key": "server:" + rpc_key}
base.sendjson(tracker_conn, [TrackerCode.UPDATE_INFO, cinfo])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
# step 2: wait for in-coming connections
conn, addr, opts = _accept_conn(sock, tracker_conn)
except (socket.error, IOError):
# retry when tracker is dropped
if tracker_conn:
tracker_conn.close()
tracker_conn = None
continue
except RuntimeError as exc:
raise exc
# step 3: serving
work_path = util.tempdir()
logger.info("connection from %s", addr)
server_proc = multiprocessing.Process(
target=_serve_loop, args=(conn, addr, load_library, work_path)
)
server_proc.deamon = True
server_proc.start()
# close from our side.
conn.close()
# wait until server process finish or timeout
server_proc.join(opts.get("timeout", None))
if server_proc.is_alive():
logger.info("Timeout in RPC session, kill..")
# pylint: disable=import-outside-toplevel
import psutil
parent = psutil.Process(server_proc.pid)
# terminate worker childs
for child in parent.children(recursive=True):
child.terminate()
# terminate the worker
server_proc.terminate()
work_path.remove()
def _connect_proxy_loop(addr, key, load_library):
key = "server:" + key
retry_count = 0
max_retry = 5
retry_period = 5
while True:
try:
sock = socket.socket(base.get_addr_family(addr), socket.SOCK_STREAM)
sock.connect(addr)
sock.sendall(struct.pack("<i", base.RPC_MAGIC))
sock.sendall(struct.pack("<i", len(key)))
sock.sendall(key.encode("utf-8"))
magic = struct.unpack("<i", base.recvall(sock, 4))[0]
if magic == base.RPC_CODE_DUPLICATE:
raise RuntimeError("key: %s has already been used in proxy" % key)
if magic == base.RPC_CODE_MISMATCH:
logger.warning("RPCProxy do not have matching client key %s", key)
elif magic != base.RPC_CODE_SUCCESS:
raise RuntimeError("%s is not RPC Proxy" % str(addr))
keylen = struct.unpack("<i", base.recvall(sock, 4))[0]
remote_key = py_str(base.recvall(sock, keylen))
opts = _parse_server_opt(remote_key.split()[1:])
logger.info("connected to %s", str(addr))
process = multiprocessing.Process(target=_serve_loop, args=(sock, addr, load_library))
process.deamon = True
process.start()
sock.close()
process.join(opts.get("timeout", None))
if process.is_alive():
logger.info("Timeout in RPC session, kill..")
process.terminate()
retry_count = 0
except (socket.error, IOError) as err:
retry_count += 1
logger.warning("Error encountered %s, retry in %g sec", str(err), retry_period)
if retry_count > max_retry:
raise RuntimeError("Maximum retry error: last error: %s" % str(err))
time.sleep(retry_period)
def _popen(cmd):
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=os.environ)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Server invoke error:\n"
msg += out
raise RuntimeError(msg)
class Server(object):
"""Start RPC server on a separate process.
This is a simple python implementation based on multi-processing.
It is also possible to implement a similar C based server with
TVM runtime which does not depend on the python.
Parameters
----------
host : str
The host url of the server.
port : int
The port to be bind to
port_end : int, optional
The end port to search
is_proxy : bool, optional
Whether the address specified is a proxy.
If this is true, the host and port actually corresponds to the
address of the proxy server.
use_popen : bool, optional
Whether to use Popen to start a fresh new process instead of fork.
This is recommended to switch on if we want to do local RPC demonstration
for GPU devices to avoid fork safety issues.
tracker_addr: Tuple (str, int) , optional
The address of RPC Tracker in tuple(host, ip) format.
If is not None, the server will register itself to the tracker.
key : str, optional
The key used to identify the device type in tracker.
load_library : str, optional
List of additional libraries to be loaded during execution.
custom_addr: str, optional
Custom IP Address to Report to RPC Tracker
silent: bool, optional
Whether run this server in silent mode.
"""
def __init__(
self,
host,
port=9091,
port_end=9199,
is_proxy=False,
use_popen=False,
tracker_addr=None,
key="",
load_library=None,
custom_addr=None,
silent=False,
utvm_dev_id=None,
utvm_dev_config_args=None,
):
try:
if _ffi_api.ServerLoop is None:
raise RuntimeError("Please compile with USE_RPC=1")
except NameError:
raise RuntimeError("Please compile with USE_RPC=1")
self.host = host
self.port = port
self.libs = []
self.custom_addr = custom_addr
self.use_popen = use_popen
if silent:
logger.setLevel(logging.ERROR)
if use_popen:
cmd = [
sys.executable,
"-m",
"tvm.exec.rpc_server",
"--host=%s" % host,
"--port=%s" % port,
"--port-end=%s" % port_end,
]
if tracker_addr:
assert key
cmd += ["--tracker=%s:%d" % tracker_addr, "--key=%s" % key]
if load_library:
cmd += ["--load-library", load_library]
if custom_addr:
cmd += ["--custom-addr", custom_addr]
if silent:
cmd += ["--silent"]
if utvm_dev_id is not None:
assert utvm_dev_config_args is not None
cmd += [f"--utvm-dev-id={utvm_dev_id}"]
cmd += [f"--utvm-dev-config-args={utvm_dev_config_args}"]
# prexec_fn is not thread safe and may result in deadlock.
# python 3.2 introduced the start_new_session parameter as
# an alternative to the common use case of
# prexec_fn=os.setsid. Once the minimum version of python
# supported by TVM reaches python 3.2 this code can be
# rewritten in favour of start_new_session. In the
# interim, stop the pylint diagnostic.
#
# pylint: disable=subprocess-popen-preexec-fn
if platform.system() == "Windows":
self.proc = subprocess.Popen(cmd, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
else:
self.proc = subprocess.Popen(cmd, preexec_fn=os.setsid)
time.sleep(0.5)
elif not is_proxy:
sock = socket.socket(base.get_addr_family((host, port)), socket.SOCK_STREAM)
self.port = None
for my_port in range(port, port_end):
try:
sock.bind((host, my_port))
self.port = my_port
break
except socket.error as sock_err:
if sock_err.errno in [98, 48]:
continue
raise sock_err
if not self.port:
raise ValueError("cannot bind to any port in [%d, %d)" % (port, port_end))
logger.info("bind to %s:%d", host, self.port)
sock.listen(1)
self.sock = sock
self.proc = multiprocessing.Process(
target=_listen_loop,
args=(self.sock, self.port, key, tracker_addr, load_library, self.custom_addr),
)
self.proc.deamon = True
self.proc.start()
else:
self.proc = multiprocessing.Process(
target=_connect_proxy_loop, args=((host, port), key, load_library)
)
self.proc.deamon = True
self.proc.start()
def terminate(self):
"""Terminate the server process"""
if self.use_popen:
if self.proc:
if platform.system() == "Windows":
os.kill(self.proc.pid, signal.CTRL_C_EVENT)
else:
os.killpg(self.proc.pid, signal.SIGTERM)
self.proc = None
else:
if self.proc:
self.proc.terminate()
self.proc = None
def __del__(self):
self.terminate()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
sdk/agrifood/azure-agrifood-farming/samples/async/sample_cascade_delete_async.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
FILE: sample_cascade_delete_async.py
DESCRIPTION:
This sample demonstrates
- Getting a filterd list of farmers based on last modified timestamp
- Queuing a cascade delete job on a farmer, and polling for it to complete
USAGE:
```python sample_cascade_delete_async.py```
Set the environment variables with your own values before running the sample:
- `AZURE_TENANT_ID`: The tenant ID of your active directory application.
- `AZURE_CLIENT_ID`: The client ID of your active directory application.
- `AZURE_CLIENT_SECRET`: The client secret of your active directory application.
- `FARMBEATS_ENDPOINT`: The FarmBeats endpoint that you want to run these samples on.
"""
from azure.identity.aio import DefaultAzureCredential
from azure.agrifood.farming.aio import FarmBeatsClient
import os
from datetime import datetime, timedelta
from random import randint
from isodate import UTC
import asyncio
from dotenv import load_dotenv
async def sample_cascade_delete_async():
farmbeats_endpoint = os.environ['FARMBEATS_ENDPOINT']
credential = DefaultAzureCredential()
client = FarmBeatsClient(
endpoint=farmbeats_endpoint,
credential=credential
)
job_id_prefix = "cascade-delete-job"
# Getting list of farmers modified in the last 7 days
print("Getting list of recently modified farmer id's... ", end="", flush=True)
farmers = client.farmers.list(
min_last_modified_date_time=datetime.now(tz=UTC) - timedelta(days=7)
)
farmer_ids = [farmer.id async for farmer in farmers]
print("Done")
# Ask for the id of the farmer which is to be deleted.
print(f"Recentely modified farmer id's:")
print(*farmer_ids, sep="\n")
farmer_id_to_delete = input("Please enter the id of the farmer you wish to delete resources for: ").strip()
if farmer_id_to_delete not in farmer_ids:
raise SystemExit("Entered id for farmer does not exist.")
# Deleting the farmer and it's associated resources. Queuing the cascade delete job.
job_id = f"{job_id_prefix}-{randint(0, 1000)}"
print(f"Queuing cascade delete job {job_id}... ", end="", flush=True)
cascade_delete_job_poller = await client.farmers.begin_create_cascade_delete_job(
job_id=job_id,
farmer_id = farmer_id_to_delete
)
print("Queued. Waiting for completion... ", end="", flush=True)
await cascade_delete_job_poller.result()
print("The job completed with status", cascade_delete_job_poller.status())
await client.close()
await credential.close()
if __name__ == "__main__":
load_dotenv()
asyncio.get_event_loop().run_until_complete(sample_cascade_delete_async())
|
[] |
[] |
[
"FARMBEATS_ENDPOINT"
] |
[]
|
["FARMBEATS_ENDPOINT"]
|
python
| 1 | 0 | |
device/src/main/java/com/leo/device/root/Const.java
|
package com.leo.device.root;
import java.util.ArrayList;
import java.util.Arrays;
/**
* Created by mat on 19/06/15.
*/
public final class Const {
public static final String BINARY_SU = "su";
public static final String BINARY_BUSYBOX = "busybox";
private Const() throws InstantiationException {
throw new InstantiationException("This class is not for instantiation");
}
public static final String[] knownRootAppsPackages = {
"com.noshufou.android.su",
"com.noshufou.android.su.elite",
"eu.chainfire.supersu",
"com.koushikdutta.superuser",
"com.thirdparty.superuser",
"com.yellowes.su",
"com.topjohnwu.magisk",
"com.kingroot.kinguser",
"com.kingo.root",
"com.smedialink.oneclickroot",
"com.zhiqupk.root.global",
"com.alephzain.framaroot"
};
public static final String[] knownDangerousAppsPackages = {
"com.koushikdutta.rommanager",
"com.koushikdutta.rommanager.license",
"com.dimonvideo.luckypatcher",
"com.chelpus.lackypatch",
"com.ramdroid.appquarantine",
"com.ramdroid.appquarantinepro",
"com.android.vending.billing.InAppBillingService.COIN",
"com.chelpus.luckypatcher"
};
public static final String[] knownRootCloakingPackages = {
"com.devadvance.rootcloak",
"com.devadvance.rootcloakplus",
"de.robv.android.xposed.installer",
"com.saurik.substrate",
"com.zachspong.temprootremovejb",
"com.amphoras.hidemyroot",
"com.amphoras.hidemyrootadfree",
"com.formyhm.hiderootPremium",
"com.formyhm.hideroot"
};
// These must end with a /
public static final String[] suPaths ={
"/data/local/",
"/data/local/bin/",
"/data/local/xbin/",
"/sbin/",
"/su/bin/",
"/system/bin/",
"/system/bin/.ext/",
"/system/bin/failsafe/",
"/system/sd/xbin/",
"/system/usr/we-need-root/",
"/system/xbin/",
"/cache/",
"/data/",
"/dev/"
};
public static final String[] pathsThatShouldNotBeWritable = {
"/system",
"/system/bin",
"/system/sbin",
"/system/xbin",
"/vendor/bin",
"/sbin",
"/etc",
//"/sys",
//"/proc",
//"/dev"
};
/**
* Get a list of paths to check for binaries
*
* @return List of paths to check, using a combination of a static list and those paths
* listed in the PATH environment variable.
*/
static String[] getPaths(){
ArrayList<String> paths = new ArrayList<>(Arrays.asList(suPaths));
String sysPaths = System.getenv("PATH");
// If we can't get the path variable just return the static paths
if (sysPaths == null || "".equals(sysPaths)){
return paths.toArray(new String[0]);
}
for (String path : sysPaths.split(":")){
if (!path.endsWith("/")){
path = path + '/';
}
if (!paths.contains(path)){
paths.add(path);
}
}
return paths.toArray(new String[0]);
}
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
java
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.