filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
tests/custom_cluster/test_breakpad.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import glob
import os
import psutil
import pytest
import shutil
import tempfile
import time
from resource import setrlimit, RLIMIT_CORE, RLIM_INFINITY
from signal import SIGSEGV, SIGKILL, SIGUSR1, SIGTERM
from subprocess import CalledProcessError
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
from tests.common.skip import SkipIfBuildType
DAEMONS = ['impalad', 'statestored', 'catalogd']
DAEMON_ARGS = ['impalad_args', 'state_store_args', 'catalogd_args']
class TestBreakpadBase(CustomClusterTestSuite):
"""Base class with utility methods for all breakpad tests."""
@classmethod
def get_workload(cls):
return 'functional-query'
def setup_method(self, method):
# Override parent
# The temporary directory gets removed in teardown_method() after each test.
self.tmp_dir = tempfile.mkdtemp()
def teardown_method(self, method):
# Override parent
# Stop the cluster to prevent future accesses to self.tmp_dir.
self.kill_cluster(SIGKILL)
assert self.tmp_dir
shutil.rmtree(self.tmp_dir)
@classmethod
def setup_class(cls):
super(TestBreakpadBase, cls).setup_class()
# Disable core dumps for this test
setrlimit(RLIMIT_CORE, (0, RLIM_INFINITY))
@classmethod
def teardown_class(cls):
# Re-enable core dumps
setrlimit(RLIMIT_CORE, (RLIM_INFINITY, RLIM_INFINITY))
def start_cluster_with_args(self, **kwargs):
cluster_options = []
for daemon_arg in DAEMON_ARGS:
daemon_options = " ".join("-{0}={1}".format(k, v) for k, v in kwargs.iteritems())
cluster_options.append("--{0}={1}".format(daemon_arg, daemon_options))
self._start_impala_cluster(cluster_options)
def start_cluster(self):
self.start_cluster_with_args(minidump_path=self.tmp_dir)
def kill_cluster(self, signal):
self.cluster.refresh()
processes = self.cluster.impalads + [self.cluster.catalogd, self.cluster.statestored]
processes = filter(None, processes)
self.kill_processes(processes, signal)
signal is SIGUSR1 or self.assert_all_processes_killed()
def kill_processes(self, processes, signal):
for process in processes:
process.kill(signal)
signal is SIGUSR1 or self.wait_for_all_processes_dead(processes)
def wait_for_all_processes_dead(self, processes, timeout=300):
for process in processes:
try:
# For every process in the list we might see the original Impala process plus a
# forked off child that is writing the minidump. We need to catch both.
for pid in process.get_pids():
print "Checking pid %s" % pid
psutil_process = psutil.Process(pid)
psutil_process.wait(timeout)
except psutil.NoSuchProcess:
# Process has exited in the meantime
pass
except psutil.TimeoutExpired:
raise RuntimeError("Unable to kill %s (pid %d) after %d seconds." %
(psutil_process.name, psutil_process.pid, timeout))
def get_num_processes(self, daemon):
self.cluster.refresh()
if daemon == 'impalad':
return len(self.cluster.impalads)
elif daemon == 'catalogd':
return self.cluster.catalogd and 1 or 0
elif daemon == 'statestored':
return self.cluster.statestored and 1 or 0
raise RuntimeError("Unknown daemon name: %s" % daemon)
def wait_for_num_processes(self, daemon, num_expected, timeout=30):
end = time.time() + timeout
self.cluster.refresh()
num_processes = self.get_num_processes(daemon)
while num_processes != num_expected and time.time() <= end:
time.sleep(1)
num_processes = self.get_num_processes(daemon)
return num_processes
def assert_all_processes_killed(self):
self.cluster.refresh()
assert not self.cluster.impalads
assert not self.cluster.statestored
assert not self.cluster.catalogd
def count_minidumps(self, daemon, base_dir=None):
base_dir = base_dir or self.tmp_dir
path = os.path.join(base_dir, daemon)
return len(glob.glob("%s/*.dmp" % path))
def count_all_minidumps(self, base_dir=None):
return sum((self.count_minidumps(daemon, base_dir) for daemon in DAEMONS))
def assert_num_minidumps_for_all_daemons(self, cluster_size, base_dir=None):
self.assert_num_logfile_entries(1)
assert self.count_minidumps('impalad', base_dir) == cluster_size
assert self.count_minidumps('statestored', base_dir) == 1
assert self.count_minidumps('catalogd', base_dir) == 1
def assert_num_logfile_entries(self, expected_count):
self.assert_impalad_log_contains('INFO', 'Wrote minidump to ',
expected_count=expected_count)
self.assert_impalad_log_contains('ERROR', 'Wrote minidump to ',
expected_count=expected_count)
class TestBreakpadCore(TestBreakpadBase):
"""Core tests to check that the breakpad integration into the daemons works as
expected. This includes writing minidump when the daemons call abort(). Add tests here
that depend on functionality of Impala other than the breakpad integration itself.
"""
@pytest.mark.execute_serially
def test_abort_writes_minidump(self):
"""Check that abort() (e.g. hitting a DCHECK macro) writes a minidump."""
assert self.count_all_minidumps() == 0
failed_to_start = False
try:
# Calling with an unresolvable hostname will abort.
self.start_cluster_with_args(minidump_path=self.tmp_dir,
hostname="jhzvlthd")
except CalledProcessError:
failed_to_start = True
assert failed_to_start
# Don't check for minidumps until all processes have gone away so that
# the state of the cluster is not in flux.
self.wait_for_num_processes('impalad', 0)
assert self.count_minidumps('impalad') > 0
class TestBreakpadExhaustive(TestBreakpadBase):
"""Exhaustive tests to check that the breakpad integration into the daemons works as
expected. This includes writing minidump files on unhandled signals and rotating old
minidumps on startup.
"""
@classmethod
def setup_class(cls):
if cls.exploration_strategy() != 'exhaustive':
pytest.skip('These breakpad tests only run in exhaustive')
super(TestBreakpadExhaustive, cls).setup_class()
@pytest.mark.execute_serially
def test_minidump_creation(self):
"""Check that when a daemon crashes, it writes a minidump file."""
assert self.count_all_minidumps() == 0
self.start_cluster()
assert self.count_all_minidumps() == 0
cluster_size = self.get_num_processes('impalad')
self.kill_cluster(SIGSEGV)
self.assert_num_minidumps_for_all_daemons(cluster_size)
@pytest.mark.execute_serially
def test_sigusr1_writes_minidump(self):
"""Check that when a daemon receives SIGUSR1, it writes a minidump file."""
assert self.count_all_minidumps() == 0
self.start_cluster()
assert self.count_all_minidumps() == 0
cluster_size = self.get_num_processes('impalad')
self.kill_cluster(SIGUSR1)
# Breakpad forks to write its minidump files, wait for all the clones to terminate.
assert self.wait_for_num_processes('impalad', cluster_size) == cluster_size
assert self.wait_for_num_processes('catalogd', 1) == 1
assert self.wait_for_num_processes('statestored', 1) == 1
# Make sure impalad still answers queries.
client = self.create_impala_client()
self.execute_query_expect_success(client, "SELECT COUNT(*) FROM functional.alltypes")
# Kill the cluster. Sending SIGKILL will not trigger minidumps to be written.
self.kill_cluster(SIGKILL)
self.assert_num_minidumps_for_all_daemons(cluster_size)
@pytest.mark.execute_serially
def test_sigusr1_doesnt_kill(self):
"""Check that when minidumps are disabled and a daemon receives SIGUSR1, it does not
die.
"""
assert self.count_all_minidumps() == 0
self.start_cluster_with_args(enable_minidumps=False)
cluster_size = self.get_num_processes('impalad')
self.kill_cluster(SIGUSR1)
# Check that no minidumps have been written.
self.assert_num_logfile_entries(0)
assert self.count_all_minidumps() == 0
# Check that all daemons are still alive.
assert self.get_num_processes('impalad') == cluster_size
assert self.get_num_processes('catalogd') == 1
assert self.get_num_processes('statestored') == 1
@pytest.mark.execute_serially
def test_sigterm_no_minidumps(self):
"""Check that when a SIGTERM is caught, no minidump file is written.
After receiving SIGTERM there should be no impalad/catalogd/statestored
running.
"""
assert self.count_all_minidumps() == 0
self.start_cluster()
cluster_size = self.get_num_processes('impalad')
assert self.count_all_minidumps() == 0
# impalad/catalogd/statestored should be running.
assert cluster_size > 0
assert self.get_num_processes('catalogd') == 1
assert self.get_num_processes('statestored') == 1
# There should be no SIGTERM message in the log
# when the system starts.
self.assert_impalad_log_contains('INFO', 'Caught signal: SIGTERM. Daemon will exit',
expected_count=0)
self.kill_cluster(SIGTERM)
# There should be no impalad/catalogd/statestored running.
# There should be no minidump generated.
assert self.get_num_processes('impalad') == 0
assert self.get_num_processes('catalogd') == 0
assert self.get_num_processes('statestored') == 0
assert self.count_all_minidumps() == 0
uid = os.getuid()
# There should be a SIGTERM message in the log now
# since we raised one above.
log_str = 'Caught signal: SIGTERM. Daemon will exit.'
self.assert_impalad_log_contains('INFO', log_str, expected_count=1)
@pytest.mark.execute_serially
def test_minidump_relative_path(self):
"""Check that setting 'minidump_path' to a relative value results in minidump files
written to 'log_dir'.
"""
minidump_base_dir = os.path.join(os.environ.get('LOG_DIR', '/tmp'), 'minidumps')
shutil.rmtree(minidump_base_dir, ignore_errors=True)
# Omitting minidump_path as a parameter to the cluster will choose the default
# configuration, which is a FLAGS_log_dir/minidumps.
self.start_cluster_with_args()
assert self.count_all_minidumps(minidump_base_dir) == 0
cluster_size = self.get_num_processes('impalad')
self.kill_cluster(SIGSEGV)
self.assert_num_minidumps_for_all_daemons(cluster_size, minidump_base_dir)
shutil.rmtree(minidump_base_dir)
@pytest.mark.execute_serially
def test_minidump_cleanup(self):
"""Check that a limited number of minidumps is preserved during startup."""
assert self.count_all_minidumps() == 0
self.start_cluster()
cluster_size = self.get_num_processes('impalad')
self.kill_cluster(SIGSEGV)
self.assert_num_logfile_entries(1)
# Maximum number of minidumps that the impalads should keep for this test.
max_minidumps = 2
self.start_cluster_with_args(minidump_path=self.tmp_dir,
max_minidumps=max_minidumps,
logbufsecs=1)
# Wait for log maintenance thread to clean up minidumps asynchronously.
start = time.time()
expected_impalad_minidumps = min(cluster_size, max_minidumps)
while (self.count_minidumps('impalad') != expected_impalad_minidumps
and time.time() - start < 10):
time.sleep(0.1)
assert self.count_minidumps('impalad') == expected_impalad_minidumps
assert self.count_minidumps('statestored') == 1
assert self.count_minidumps('catalogd') == 1
@pytest.mark.execute_serially
def test_minidump_cleanup_thread(self):
"""Check that periodic rotation preserves a limited number of minidumps."""
assert self.count_all_minidumps() == 0
# Maximum number of minidumps that the impalads should keep for this test.
max_minidumps = 2
# Sleep interval for the log rotation thread.
rotation_interval = 1
self.start_cluster_with_args(minidump_path=self.tmp_dir,
max_minidumps=max_minidumps,
logbufsecs=rotation_interval)
cluster_size = self.get_num_processes('impalad')
# We trigger several rounds of minidump creation to make sure that all daemons wrote
# enough files to trigger rotation.
for i in xrange(max_minidumps + 1):
self.kill_cluster(SIGUSR1)
# Breakpad forks to write its minidump files, sleep briefly to allow the forked
# processes to start.
time.sleep(1)
# Wait for all the clones to terminate.
assert self.wait_for_num_processes('impalad', cluster_size) == cluster_size
assert self.wait_for_num_processes('catalogd', 1) == 1
assert self.wait_for_num_processes('statestored', 1) == 1
self.assert_num_logfile_entries(i + 1)
# Sleep long enough for log cleaning to take effect.
time.sleep(rotation_interval + 1)
assert self.count_minidumps('impalad') == min(cluster_size, max_minidumps)
assert self.count_minidumps('statestored') == max_minidumps
assert self.count_minidumps('catalogd') == max_minidumps
@pytest.mark.execute_serially
def test_disable_minidumps(self):
"""Check that setting enable_minidumps to false disables minidump creation."""
assert self.count_all_minidumps() == 0
self.start_cluster_with_args(enable_minidumps=False)
self.kill_cluster(SIGSEGV)
self.assert_num_logfile_entries(0)
@pytest.mark.execute_serially
def test_empty_minidump_path_disables_breakpad(self):
"""Check that setting the minidump_path to an empty value disables minidump creation.
"""
assert self.count_all_minidumps() == 0
self.start_cluster_with_args(minidump_path='')
self.kill_cluster(SIGSEGV)
self.assert_num_logfile_entries(0)
def trigger_single_minidump_and_get_size(self):
"""Kill a single impalad with SIGSEGV to make it write a minidump. Kill the rest of
the cluster. Clean up the single minidump file and return its size.
"""
self.cluster.refresh()
assert self.get_num_processes('impalad') > 0
# Make one impalad write a minidump.
self.kill_processes(self.cluster.impalads[:1], SIGSEGV)
# Kill the rest of the cluster.
self.kill_cluster(SIGKILL)
assert self.count_minidumps('impalad') == 1
# Get file size of that miniump.
path = os.path.join(self.tmp_dir, 'impalad')
minidump_file = glob.glob("%s/*.dmp" % path)[0]
minidump_size = os.path.getsize(minidump_file)
os.remove(minidump_file)
assert self.count_all_minidumps() == 0
return minidump_size
@pytest.mark.execute_serially
def test_limit_minidump_size(self):
"""Check that setting the 'minidump_size_limit_hint_kb' to a small value will reduce
the minidump file size.
"""
assert self.count_all_minidumps() == 0
# Generate minidump with default settings.
self.start_cluster()
full_minidump_size = self.trigger_single_minidump_and_get_size()
# Start cluster with limited minidump file size, we use a very small value, to ensure
# the resulting minidump will be as small as possible.
self.start_cluster_with_args(minidump_path=self.tmp_dir,
minidump_size_limit_hint_kb=1)
reduced_minidump_size = self.trigger_single_minidump_and_get_size()
# Check that the minidump file size has been reduced.
assert reduced_minidump_size < full_minidump_size
class TestLogging(TestBreakpadBase):
"""Exhaustive tests to check that impala log is rolled periodically, obeying
max_log_size and max_log_files, even in the presence of heavy stderr writing.
"""
@classmethod
def setup_class(cls):
if cls.exploration_strategy() != 'exhaustive':
pytest.skip('These logging tests only run in exhaustive')
super(TestLogging, cls).setup_class()
def start_cluster_with_args(self, cluster_size, log_dir, **kwargs):
cluster_options = []
for daemon_arg in DAEMON_ARGS:
daemon_options = " ".join("-{0}={1}".format(k, v) for k, v in kwargs.iteritems())
cluster_options.append("--{0}={1}".format(daemon_arg, daemon_options))
self._start_impala_cluster(cluster_options, cluster_size=cluster_size,
expected_num_impalads=cluster_size, impala_log_dir=log_dir)
def assert_logs(self, daemon, max_count, max_bytes):
"""Assert that there are at most 'max_count' of INFO + ERROR log files for the
specified daemon and the individual file size does not exceed 'max_bytes'.
Also assert that stdout/stderr are redirected to correct file on each rotation."""
log_dir = self.tmp_dir
log_paths = glob.glob("%s/%s*log.ERROR.*" % (log_dir, daemon)) \
+ glob.glob("%s/%s*log.INFO.*" % (log_dir, daemon))
assert len(log_paths) <= max_count
# group log_paths by pid and kind
log_group = {}
for path in sorted(log_paths):
tok = path.split('.')
key = tok[-1] + '.' + tok[-3] # pid + kind
if key in log_group:
log_group[key].append(path)
else:
log_group[key] = [path]
for key, paths in log_group.items():
for i in range(0, len(paths)):
try:
curr_path = paths[i]
# check log size
log_size = os.path.getsize(curr_path)
assert log_size <= max_bytes, "{} exceed {} bytes".format(curr_path, max_bytes)
if i < len(paths) - 1:
# check that we print the next_path in last line of this log file
next_path = paths[i + 1]
with open(curr_path, 'rb') as f:
f.seek(-2, os.SEEK_END)
while f.read(1) != b'\n':
f.seek(-2, os.SEEK_CUR)
last_line = f.readline().decode()
assert next_path in last_line
except OSError:
# The daemon might delete the log in the middle of assertion.
# In that case, do nothing and move on.
pass
def silent_remove(self, filename):
try:
os.remove(filename)
except OSError:
pass
def start_excessive_cerr_cluster(self, test_cluster_size=1, remove_symlink=False):
"""Check that impalad log is kept being rotated when most writing activity is coming
from stderr stream.
Along with LogFaultInjectionThread in init.cc, this test will fill impalad error logs
with approximately 128kb error messages per second."""
test_logbufsecs = 3
test_max_log_files = 2
test_max_log_size = 1 # 1 MB
test_error_msg = ('123456789abcde_' * 64) # 1 KB error message
test_debug_actions = 'LOG_MAINTENANCE_STDERR:[email protected]@' + test_error_msg
daemon = 'impalad'
os.chmod(self.tmp_dir, 0744)
expected_log_max_bytes = int(1.2 * 1024**2) # 1.2 MB
self.assert_logs(daemon, 0, expected_log_max_bytes)
self.start_cluster_with_args(test_cluster_size, self.tmp_dir,
logbufsecs=test_logbufsecs,
max_log_files=test_max_log_files,
max_log_size=test_max_log_size,
debug_actions=test_debug_actions)
self.wait_for_num_processes(daemon, test_cluster_size, 30)
# Count both INFO and ERROR logs
expected_log_max_count = test_max_log_files * test_cluster_size * 2
# Wait for log maintenance thread to flush and rotate the logs asynchronously.
start = time.time()
while (time.time() - start < 40):
time.sleep(1)
self.assert_logs(daemon, expected_log_max_count, expected_log_max_bytes)
if (remove_symlink):
pattern = self.tmp_dir + '/' + daemon + '*'
symlinks = glob.glob(pattern + '.INFO') + glob.glob(pattern + '.ERROR')
for symlink in symlinks:
self.silent_remove(symlink)
@pytest.mark.execute_serially
def test_excessive_cerr(self):
"""Test excessive cerr activity with single node cluster."""
self.start_excessive_cerr_cluster()
@pytest.mark.execute_serially
def test_excessive_cerr_no_symlink(self):
"""Test excessive cerr activity with two node cluster and missing log symlinks."""
self.start_excessive_cerr_cluster(2, True)
|
[] |
[] |
[
"LOG_DIR"
] |
[]
|
["LOG_DIR"]
|
python
| 1 | 0 | |
cmdtest.go
|
// Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The cmdtest package simplifies testing of command-line interfaces. It
// provides a simple, cross-platform, shell-like language to express command
// execution. It can compare actual output with the expected output, and can
// also update a file with new "golden" output that is deemed correct.
//
// Start using cmdtest by writing a test file with commands and expected output,
// giving it the extension ".ct". All test files in the same directory make up a
// test suite. See the TestSuite documentation for the syntax of test files.
//
// To test, first read the suite:
//
// ts, err := cmdtest.Read("testdata")
//
// Then configure the resulting TestSuite by adding commands or enabling
// debugging features. Lastly, call TestSuite.Run with false to compare
// or true to update. Typically, this boolean will be the value of a flag:
//
// var update = flag.Bool("update", false, "update test files with results")
// ...
// err := ts.Run(*update)
package cmdtest
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"sync"
"testing"
"github.com/google/go-cmp/cmp"
)
// A TestSuite contains a set of test files, each of which may contain multiple
// test cases. Use Read to build a TestSuite from all the test files in a
// directory. Then configure it and call Run.
//
// Format of a test file:
//
// Before the first line starting with a '$', empty lines and lines beginning with
// "#" are ignored.
//
// A sequence of consecutive lines starting with '$' begin a test case. These lines
// are commands to execute. See below for the valid commands.
//
// Lines following the '$' lines are command output (merged stdout and stderr).
// Output is always treated literally. After the command output there should be a
// blank line. Between that blank line and the next '$' line, empty lines and lines
// beginning with '#' are ignored. (Because of these rules, cmdtest cannot
// distinguish trailing blank lines in the output.)
//
// Syntax of a line beginning with '$': A sequence of space-separated words (no
// quoting is supported). The first word is the command, the rest are its args.
// If the next-to-last word is '<', the last word is interpreted as a file and
// becomes the standard input to the command. None of the built-in commands (see
// below) support input redirection, but commands defined with Program do.
//
// By default, commands are expected to succeed, and the test will fail
// otherwise. However, commands that are expected to fail can be marked
// with a " --> FAIL" suffix.
//
// The cases of a test file are executed in order, starting in a freshly
// created temporary directory.
//
// The built-in commands (initial contents of the Commands map) are:
//
// cd DIR
// cat FILE
// mkdir DIR
// setenv VAR VALUE
// echo ARG1 ARG2 ...
// fecho FILE ARG1 ARG2 ...
//
// These all have their usual Unix shell meaning, except for fecho, which writes its
// arguments to a file (output redirection is not supported). All file and directory
// arguments must refer to the current directory; that is, they cannot contain
// slashes.
//
// cmdtest does its own environment variable substitution, using the syntax
// "${VAR}". Test execution inherits the full environment of the test binary
// caller (typically, your shell). The environment variable ROOTDIR is set to
// the temporary directory created to run the test file.
type TestSuite struct {
// If non-nil, this function is called for each test. It is passed the root
// directory after it has been made the current directory.
Setup func(string) error
// The commands that can be executed (that is, whose names can occur as the
// first word of a command line).
Commands map[string]CommandFunc
// If true, don't delete the temporary root directories for each test file,
// and print out their names for debugging.
KeepRootDirs bool
files []*testFile
}
type testFile struct {
suite *TestSuite
filename string // full filename of the test file
cases []*testCase
suffix []string // non-output lines after last case
}
type testCase struct {
before []string // lines before the commands
startLine int // line of first command
// The list of commands to execute.
commands []string
// The stdout and stderr, merged and split into lines.
gotOutput []string // from execution
wantOutput []string // from file
}
// CommandFunc is the signature of a command function. The function takes the
// subsequent words on the command line (so that arg[0] is the first argument),
// as well as the name of a file to use for input redirection. It returns the
// command's output.
type CommandFunc func(args []string, inputFile string) ([]byte, error)
// Read reads all the files in dir with extension ".ct" and returns a TestSuite
// containing them. See the TestSuite documentation for syntax.
func Read(dir string) (*TestSuite, error) {
filenames, err := filepath.Glob(filepath.Join(dir, "*.ct"))
if err != nil {
return nil, err
}
ts := &TestSuite{
Commands: map[string]CommandFunc{
"cat": fixedArgBuiltin(1, catCmd),
"cd": fixedArgBuiltin(1, cdCmd),
"echo": echoCmd,
"fecho": fechoCmd,
"mkdir": fixedArgBuiltin(1, mkdirCmd),
"setenv": fixedArgBuiltin(2, setenvCmd),
},
}
for _, fn := range filenames {
tf, err := readFile(fn)
if err != nil {
return nil, err
}
tf.suite = ts
ts.files = append(ts.files, tf)
}
return ts, nil
}
func readFile(filename string) (*testFile, error) {
// parse states
const (
beforeFirstCommand = iota
inCommands
inOutput
)
tf := &testFile{
filename: filename,
}
f, err := os.Open(filename)
if err != nil {
return nil, err
}
defer f.Close()
scanner := bufio.NewScanner(f)
var tc *testCase
lineno := 0
var prefix []string
state := beforeFirstCommand
for scanner.Scan() {
lineno++
line := scanner.Text()
isCommand := strings.HasPrefix(line, "$")
switch state {
case beforeFirstCommand:
if isCommand {
tc = &testCase{startLine: lineno, before: prefix}
tc.addCommandLine(line)
state = inCommands
} else {
line = strings.TrimSpace(line)
if line == "" || line[0] == '#' {
prefix = append(prefix, line)
} else {
return nil, fmt.Errorf("%s:%d: bad line %q (should begin with '#')", filename, lineno, line)
}
}
case inCommands:
if isCommand {
tc.addCommandLine(line)
} else { // End of commands marks the start of the output.
tc.wantOutput = append(tc.wantOutput, line)
state = inOutput
}
case inOutput:
if isCommand { // A command marks the end of the output.
prefix = tf.addCase(tc)
tc = &testCase{startLine: lineno, before: prefix}
tc.addCommandLine(line)
state = inCommands
} else {
tc.wantOutput = append(tc.wantOutput, line)
}
default:
panic("bad state")
}
}
if err := scanner.Err(); err != nil {
return nil, err
}
if tc != nil {
tf.suffix = tf.addCase(tc)
}
return tf, nil
}
func (tc *testCase) addCommandLine(line string) {
tc.commands = append(tc.commands, strings.TrimSpace(line[1:]))
}
// addCase first splits the collected output for tc into the actual command
// output, and a suffix consisting of blank lines and comments. It then adds tc
// to the cases of tf, and returns the suffix.
func (tf *testFile) addCase(tc *testCase) []string {
// Trim the suffix of output that consists solely of blank lines and comments,
// and return it.
var i int
for i = len(tc.wantOutput) - 1; i >= 0; i-- {
if tc.wantOutput[i] != "" && tc.wantOutput[i][0] != '#' {
break
}
}
i++
// i is the index of the first line to ignore.
keep, suffix := tc.wantOutput[:i], tc.wantOutput[i:]
if len(keep) == 0 {
keep = nil
}
tc.wantOutput = keep
tf.cases = append(tf.cases, tc)
return suffix
}
// Run runs the commands in each file in the test suite. Each file runs in a
// separate subtest.
//
// If update is false, it compares their output with the output in the file,
// line by line.
//
// If update is true, it writes the output back to the file, overwriting the
// previous output.
//
// Before comparing/updating, occurrences of the root directory in the output
// are replaced by ${ROOTDIR}.
func (ts *TestSuite) Run(t *testing.T, update bool) {
if update {
ts.update(t)
} else {
ts.compare(t)
}
}
// compare runs a subtest for each file in the test suite. See Run.
func (ts *TestSuite) compare(t *testing.T) {
for _, tf := range ts.files {
t.Run(strings.TrimSuffix(tf.filename, ".ct"), func(t *testing.T) {
if s := tf.compare(t.Logf); s != "" {
t.Error(s)
}
})
}
}
var noopLogger = func(_ string, _ ...interface{}) {}
// compareReturningError is similar to compare, but it returns
// errors/differences in an error. It is used in tests for this package.
func (ts *TestSuite) compareReturningError() error {
var ss []string
for _, tf := range ts.files {
if s := tf.compare(noopLogger); s != "" {
ss = append(ss, s)
}
}
if len(ss) > 0 {
return errors.New(strings.Join(ss, ""))
}
return nil
}
func (tf *testFile) compare(log func(string, ...interface{})) string {
if err := tf.execute(log); err != nil {
return fmt.Sprintf("%v", err)
}
buf := new(bytes.Buffer)
for _, c := range tf.cases {
if diff := cmp.Diff(c.gotOutput, c.wantOutput); diff != "" {
fmt.Fprintf(buf, "%s:%d: got=-, want=+\n", tf.filename, c.startLine)
c.writeCommands(buf)
fmt.Fprintf(buf, "%s\n", diff)
}
}
return buf.String()
}
// update runs a subtest for each file in the test suite, updating their output.
// See Run.
func (ts *TestSuite) update(t *testing.T) {
for _, tf := range ts.files {
t.Run(strings.TrimSuffix(tf.filename, ".ct"), func(t *testing.T) {
tmpfile, err := tf.updateToTemp()
if err != nil {
t.Fatal(err)
}
if err := os.Rename(tmpfile, tf.filename); err != nil {
t.Fatal(err)
}
})
}
}
// updateToTemp executes tf and writes the output to a temporary file.
// It returns the name of the temporary file.
func (tf *testFile) updateToTemp() (fname string, err error) {
if err := tf.execute(noopLogger); err != nil {
return "", err
}
f, err := ioutil.TempFile("", "cmdtest")
if err != nil {
return "", err
}
defer func() {
err2 := f.Close()
if err == nil {
err = err2
}
if err != nil {
os.Remove(f.Name())
}
}()
if err := tf.write(f); err != nil {
return "", err
}
return f.Name(), nil
}
func (tf *testFile) execute(log func(string, ...interface{})) error {
rootDir, err := ioutil.TempDir("", "cmdtest")
if err != nil {
return fmt.Errorf("%s: %v", tf.filename, err)
}
if tf.suite.KeepRootDirs {
fmt.Printf("%s: test root directory: %s\n", tf.filename, rootDir)
} else {
defer os.RemoveAll(rootDir)
}
if err := os.Setenv("ROOTDIR", rootDir); err != nil {
return fmt.Errorf("%s: %v", tf.filename, err)
}
defer os.Unsetenv("ROOTDIR")
cwd, err := os.Getwd()
if err != nil {
return fmt.Errorf("%s: %v", tf.filename, err)
}
if err := os.Chdir(rootDir); err != nil {
return fmt.Errorf("%s: %v", tf.filename, err)
}
defer func() { _ = os.Chdir(cwd) }()
if tf.suite.Setup != nil {
if err := tf.suite.Setup(rootDir); err != nil {
return fmt.Errorf("%s: calling Setup: %v", tf.filename, err)
}
}
for _, tc := range tf.cases {
if err := tc.execute(tf.suite, log); err != nil {
return fmt.Errorf("%s:%v", tf.filename, err) // no space after :, for line number
}
}
return nil
}
// A fatal error stops a test.
type fatal struct{ error }
// Run the test case by executing the commands. The concatenated output from all commands
// is saved in tc.gotOutput.
// An error is returned if: a command that should succeed instead failed; a command that should
// fail instead succeeded; or a built-in command was called incorrectly.
func (tc *testCase) execute(ts *TestSuite, log func(string, ...interface{})) error {
const failMarker = " --> FAIL"
tc.gotOutput = nil
var allout []byte
var err error
for i, cmd := range tc.commands {
wantFail := false
if strings.HasSuffix(cmd, failMarker) {
cmd = strings.TrimSuffix(cmd, failMarker)
wantFail = true
}
args := strings.Fields(cmd)
for i := range args {
args[i], err = expandVariables(args[i], os.LookupEnv)
if err != nil {
return err
}
}
log("$ %s", strings.Join(args, " "))
name := args[0]
args = args[1:]
var infile string
if len(args) >= 2 && args[len(args)-2] == "<" {
infile = args[len(args)-1]
args = args[:len(args)-2]
}
f := ts.Commands[name]
if f == nil {
return fmt.Errorf("%d: no such command %q", tc.startLine+i, name)
}
out, err := f(args, infile)
if _, ok := err.(fatal); ok {
return fmt.Errorf("%d: command %q failed fatally with %v", tc.startLine+i, cmd, err)
}
if err == nil && wantFail {
return fmt.Errorf("%d: %q succeeded, but it was expected to fail", tc.startLine+i, cmd)
}
if err != nil && !wantFail {
return fmt.Errorf("%d: %q failed with %v. Output:\n%s", tc.startLine+i, cmd, err, out)
}
log("%s\n", string(out))
allout = append(allout, out...)
}
if len(allout) > 0 {
allout = scrub(os.Getenv("ROOTDIR"), allout) // use Getenv because Setup could change ROOTDIR
// Remove final whitespace.
s := strings.TrimRight(string(allout), " \t\n")
tc.gotOutput = strings.Split(s, "\n")
}
return nil
}
// Program defines a command function that will run the executable at path using
// the exec.Command package and return its combined output. If path is relative,
// it is converted to an absolute path using the current directory at the time
// Program is called.
//
// In the unlikely event that Program cannot obtain the current directory, it
// panics.
func Program(path string) CommandFunc {
abspath, err := filepath.Abs(path)
if err != nil {
panic(fmt.Sprintf("Program(%q): %v", path, err))
}
return func(args []string, inputFile string) ([]byte, error) {
return execute(abspath, args, inputFile)
}
}
// InProcessProgram defines a command function that will invoke f, which must
// behave like an actual main function except that it returns an error code
// instead of calling os.Exit.
// Before calling f:
// - os.Args is set to the concatenation of name and args.
// - If inputFile is non-empty, it is redirected to standard input.
// - Standard output and standard error are redirected to a buffer, which is
// returned.
func InProcessProgram(name string, f func() int) CommandFunc {
return func(args []string, inputFile string) ([]byte, error) {
origArgs := os.Args
origOut := os.Stdout
origErr := os.Stderr
defer func() {
os.Args = origArgs
os.Stdout = origOut
os.Stderr = origErr
}()
os.Args = append([]string{name}, args...)
// Redirect stdout and stderr to pipes.
rOut, wOut, err := os.Pipe()
if err != nil {
return nil, err
}
rErr, wErr, err := os.Pipe()
if err != nil {
return nil, err
}
os.Stdout = wOut
os.Stderr = wErr
// Copy both stdout and stderr to the same buffer.
buf := &bytes.Buffer{}
lw := &lockingWriter{w: buf}
errc := make(chan error, 2)
go func() {
_, err := io.Copy(lw, rOut)
errc <- err
}()
go func() {
_, err := io.Copy(lw, rErr)
errc <- err
}()
// Redirect stdin if needed.
if inputFile != "" {
f, err := os.Open(inputFile)
if err != nil {
return nil, err
}
defer f.Close()
origIn := os.Stdin
defer func() { os.Stdin = origIn }()
os.Stdin = f
}
res := f()
if err := wOut.Close(); err != nil {
return nil, err
}
if err := wErr.Close(); err != nil {
return nil, err
}
// Wait for pipe copying to finish.
if err := <-errc; err != nil {
return nil, err
}
if err := <-errc; err != nil {
return nil, err
}
if res != 0 {
err = fmt.Errorf("%s failed with exit code %d", name, res)
}
return buf.Bytes(), err
}
}
// lockingWriter is an io.Writer whose Write method is safe for
// use by multiple goroutines.
type lockingWriter struct {
mu sync.Mutex
w io.Writer
}
func (w *lockingWriter) Write(b []byte) (int, error) {
w.mu.Lock()
defer w.mu.Unlock()
return w.w.Write(b)
}
// execute uses exec.Command to run the named program with the given args. The
// combined output is captured and returned. If infile is not empty, its contents
// become the command's standard input.
func execute(name string, args []string, infile string) ([]byte, error) {
ecmd := exec.Command(name, args...)
var errc chan error
if infile != "" {
f, err := os.Open(infile)
if err != nil {
return nil, err
}
defer f.Close()
ecmd.Stdin = f
}
out, err := ecmd.CombinedOutput()
if err != nil {
return out, err
}
if errc != nil {
if err = <-errc; err != nil {
return out, err
}
}
return out, nil
}
var varRegexp = regexp.MustCompile(`\$\{([^${}]+)\}`)
// expandVariables replaces variable references in s with their values. A reference
// to a variable V looks like "${V}".
// lookup is called on a variable's name to find its value. Its second return value
// is false if the variable doesn't exist.
// expandVariables fails if s contains a reference to a non-existent variable.
//
// This function differs from os.Expand in two ways. First, it does not expand $var,
// only ${var}. The former is fragile. Second, an undefined variable results in an error,
// rather than expanding to some string. We want to fail if a variable is undefined.
func expandVariables(s string, lookup func(string) (string, bool)) (string, error) {
var sb strings.Builder
for {
ixs := varRegexp.FindStringSubmatchIndex(s)
if ixs == nil {
sb.WriteString(s)
return sb.String(), nil
}
varName := s[ixs[2]:ixs[3]]
varVal, ok := lookup(varName)
if !ok {
return "", fmt.Errorf("variable %q not found", varName)
}
sb.WriteString(s[:ixs[0]])
sb.WriteString(varVal)
s = s[ixs[1]:]
}
}
// scrub removes dynamic content from output.
func scrub(rootDir string, b []byte) []byte {
const scrubbedRootDir = "${ROOTDIR}"
rootDirWithSeparator := rootDir + string(filepath.Separator)
scrubbedRootDirWithSeparator := scrubbedRootDir + "/"
b = bytes.Replace(b, []byte(rootDirWithSeparator), []byte(scrubbedRootDirWithSeparator), -1)
b = bytes.Replace(b, []byte(rootDir), []byte(scrubbedRootDir), -1)
return b
}
func (tf *testFile) write(w io.Writer) error {
for _, c := range tf.cases {
if err := c.write(w); err != nil {
return err
}
}
return writeLines(w, tf.suffix)
}
func (tc *testCase) write(w io.Writer) error {
if err := writeLines(w, tc.before); err != nil {
return err
}
if err := tc.writeCommands(w); err != nil {
return err
}
out := tc.gotOutput
if out == nil {
out = tc.wantOutput
}
return writeLines(w, out)
}
func (tc *testCase) writeCommands(w io.Writer) error {
for _, c := range tc.commands {
if _, err := fmt.Fprintf(w, "$ %s\n", c); err != nil {
return err
}
}
return nil
}
func writeLines(w io.Writer, lines []string) error {
for _, l := range lines {
if _, err := io.WriteString(w, l); err != nil {
return err
}
if _, err := w.Write([]byte{'\n'}); err != nil {
return err
}
}
return nil
}
func fixedArgBuiltin(nargs int, f func([]string) ([]byte, error)) CommandFunc {
return func(args []string, inputFile string) ([]byte, error) {
if len(args) != nargs {
return nil, fatal{fmt.Errorf("need exactly %d arguments", nargs)}
}
if inputFile != "" {
return nil, fatal{errors.New("input redirection not supported")}
}
return f(args)
}
}
// cd DIR
// change directory
func cdCmd(args []string) ([]byte, error) {
if err := checkPath(args[0]); err != nil {
return nil, err
}
cwd, err := os.Getwd()
if err != nil {
return nil, err
}
return nil, os.Chdir(filepath.Join(cwd, args[0]))
}
// echo ARG1 ARG2 ...
// write args to stdout
//
// \n is added at the end of the input.
// Also, literal "\n" in the input will be replaced by \n.
func echoCmd(args []string, inputFile string) ([]byte, error) {
if inputFile != "" {
return nil, fatal{errors.New("input redirection not supported")}
}
s := strings.Join(args, " ")
s = strings.Replace(s, "\\n", "\n", -1)
s += "\n"
return []byte(s), nil
}
// fecho FILE ARG1 ARG2 ...
// write args to FILE
//
// \n is added at the end of the input.
// Also, literal "\n" in the input will be replaced by \n.
func fechoCmd(args []string, inputFile string) ([]byte, error) {
if len(args) < 1 {
return nil, fatal{errors.New("need at least 1 argument")}
}
if inputFile != "" {
return nil, fatal{errors.New("input redirection not supported")}
}
if err := checkPath(args[0]); err != nil {
return nil, err
}
s := strings.Join(args[1:], " ")
s = strings.Replace(s, "\\n", "\n", -1)
s += "\n"
return nil, ioutil.WriteFile(args[0], []byte(s), 0600)
}
// cat FILE
// copy file to stdout
func catCmd(args []string) ([]byte, error) {
if err := checkPath(args[0]); err != nil {
return nil, err
}
f, err := os.Open(args[0])
if err != nil {
return nil, err
}
defer f.Close()
buf := &bytes.Buffer{}
_, err = io.Copy(buf, f)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// mkdir DIR
// create directory
func mkdirCmd(args []string) ([]byte, error) {
if err := checkPath(args[0]); err != nil {
return nil, err
}
return nil, os.Mkdir(args[0], 0700)
}
// setenv VAR VALUE
// set environment variable
func setenvCmd(args []string) ([]byte, error) {
return nil, os.Setenv(args[0], args[1])
}
func checkPath(path string) error {
if strings.ContainsRune(path, '/') || strings.ContainsRune(path, '\\') {
return fatal{fmt.Errorf("argument must be in the current directory (%q has a '/')", path)}
}
return nil
}
|
[
"\"ROOTDIR\""
] |
[] |
[
"ROOTDIR"
] |
[]
|
["ROOTDIR"]
|
go
| 1 | 0 | |
redis.go
|
package main
import (
"bytes"
fmt "github.com/starkandwayne/goutils/ansi"
"gopkg.in/redis.v5"
"net/http"
"os"
)
func init() {
http.HandleFunc("/redis", func(w http.ResponseWriter, r *http.Request) {
var expect, got string
var b bytes.Buffer
fmt.Fprintf(&b, "starting @M{redis} smoke tests...\n")
Step(&b, "parsing VCAP_SERVICES env var to find our Redis endpoint")
vcap, err := ParseVcap(os.Getenv("VCAP_SERVICES"))
if err != nil {
Final(w, b, err)
return
}
OK(&b)
Step(&b, "searching VCAP_SERVICES for our 'redis' service")
var hostname, port, password string
for _, service := range vcap {
if !Tagged(service, "redis") {
continue
}
hostname, err = Extract(service, "credentials", "host")
if err != nil {
Final(w, b, err)
return
}
port, err = Extract(service, "credentials", "port")
if err != nil {
Final(w, b, err)
return
}
password, err = Extract(service, "credentials", "password")
if err != nil {
Final(w, b, err)
return
}
break
}
if hostname == "" {
Final(w, b, fmt.Errorf("No service tagged 'redis' was found in VCAP_SERVICES"))
return
}
OK(&b)
Step(&b, "connecting to @C{%s:%s}", hostname, port)
client := redis.NewClient(&redis.Options{
Addr: fmt.Sprintf("%s:%s", hostname, port),
Password: password,
})
_, err = client.Ping().Result()
if err != nil {
Final(w, b, err)
return
}
OK(&b)
key := "cloud.vet.redis.test.key"
expect = "the first value"
Step(&b, "storing a value")
err = client.Set(key, expect, 0).Err()
if err != nil {
Final(w, b, err)
return
}
OK(&b)
Step(&b, "retrieving the stored value")
got, err = client.Get(key).Result()
if err != nil {
Final(w, b, err)
return
}
if got != expect {
Final(w, b, fmt.Errorf("We wrote '%s' to the key-value store, but\n"+
"got back '%s'", expect, got))
return
}
OK(&b)
Step(&b, "updating the stored a value")
err = client.Set(key, expect, 0).Err()
if err != nil {
Final(w, b, err)
return
}
OK(&b)
Step(&b, "retrieving the updated value")
got, err = client.Get(key).Result()
if err != nil {
Final(w, b, err)
return
}
if got != expect {
Final(w, b, fmt.Errorf("We wrote '%s' to the key-value store, but\n"+
"got back '%s'", expect, got))
return
}
OK(&b)
fmt.Fprintf(&b, "\n\n@G{REDIS TESTS PASSED!}\n\n")
Final(w, b, nil)
})
}
|
[
"\"VCAP_SERVICES\""
] |
[] |
[
"VCAP_SERVICES"
] |
[]
|
["VCAP_SERVICES"]
|
go
| 1 | 0 | |
pkg/store/store.go
|
package store
import (
"os"
"path/filepath"
"strings"
"github.com/containerd/containerd/identifiers"
"github.com/lima-vm/lima/pkg/limayaml"
"github.com/lima-vm/lima/pkg/store/filenames"
)
// DotLima is a directory that appears under the home directory.
const DotLima = ".lima"
// LimaDir returns the abstract path of `~/.lima` (or $LIMA_HOME, if set).
//
// NOTE: We do not use `~/Library/Application Support/Lima` on macOS.
// We use `~/.lima` so that we can have enough space for the length of the socket path,
// which can be only 104 characters on macOS.
func LimaDir() (string, error) {
dir := os.Getenv("LIMA_HOME")
if dir == "" {
homeDir, err := os.UserHomeDir()
if err != nil {
return "", err
}
dir = filepath.Join(homeDir, DotLima)
}
return dir, nil
}
// LimaConfigDir returns the path of the config directory, $LIMA_HOME/_config.
func LimaConfigDir() (string, error) {
limaDir, err := LimaDir()
if err != nil {
return "", err
}
return filepath.Join(limaDir, filenames.ConfigDir), nil
}
// Instances returns the names of the instances under LimaDir.
func Instances() ([]string, error) {
limaDir, err := LimaDir()
if err != nil {
return nil, err
}
limaDirList, err := os.ReadDir(limaDir)
if err != nil {
return nil, err
}
var names []string
for _, f := range limaDirList {
if strings.HasPrefix(f.Name(), ".") || strings.HasPrefix(f.Name(), "_") {
continue
}
names = append(names, f.Name())
}
return names, nil
}
// InstanceDir returns the instance dir.
// InstanceDir does not check whether the instance exists
func InstanceDir(name string) (string, error) {
if err := identifiers.Validate(name); err != nil {
return "", err
}
limaDir, err := LimaDir()
if err != nil {
return "", err
}
dir := filepath.Join(limaDir, name)
return dir, nil
}
// LoadYAMLByFilePath loads and validates the yaml.
func LoadYAMLByFilePath(filePath string) (*limayaml.LimaYAML, error) {
yContent, err := os.ReadFile(filePath)
if err != nil {
return nil, err
}
y, err := limayaml.Load(yContent, filePath)
if err != nil {
return nil, err
}
if err := limayaml.Validate(*y); err != nil {
return nil, err
}
return y, nil
}
|
[
"\"LIMA_HOME\""
] |
[] |
[
"LIMA_HOME"
] |
[]
|
["LIMA_HOME"]
|
go
| 1 | 0 | |
dicebot.py
|
import asyncio
import os
import logging
import discord
from discord.ext import commands
from discord.ext.commands import DefaultHelpCommand
from dotenv import load_dotenv
# logs data to the discord.log file, if this file doesn't exist at runtime it is created automatically
from cogs.utilities import Utilities
logger = logging.getLogger('discord')
logger.setLevel(logging.INFO) # logging levels: NOTSET (all), DEBUG (bot interactions), INFO (bot connected etc)
handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s:%(message)s'))
logger.addHandler(handler)
# load the private discord token from .env file.
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
# Initialise the Bot object with an accessible help Command object
helpCommand = DefaultHelpCommand()
bot = commands.Bot(
command_prefix="!",
help_command=helpCommand
)
# Setup the General cog with the help command
generalCog = Utilities()
bot.add_cog(generalCog)
helpCommand.cog = generalCog
@bot.event
async def on_ready():
"""
Do something when the bot is ready to use.
"""
print(f'{bot.user.name} has connected to Discord!')
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name="Rolling Dice"))
async def activity_loop():
"""
Cycles through different bot activities
"""
await bot.wait_until_ready()
i = 0
while not bot.is_closed():
if i > 1:
i = 0
status = ['the kitchen', 'the bathroom']
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=status[i]))
await asyncio.sleep(4)
i += 1
@bot.event
async def on_message(message):
await bot.process_commands(message)
@bot.event
async def on_command_error(ctx, error):
"""
Handle the Error message in a nice way.
"""
if isinstance(error, commands.errors.CheckFailure):
await ctx.send(error)
elif isinstance(error, commands.errors.MissingRequiredArgument):
await ctx.send('You are missing a required argument.')
elif isinstance(error, commands.errors.CommandNotFound):
pass
else:
await ctx.send('You are missing a required argument.')
logging.error(error)
# Start the bot
bot.run(TOKEN)
|
[] |
[] |
[
"DISCORD_TOKEN"
] |
[]
|
["DISCORD_TOKEN"]
|
python
| 1 | 0 | |
internal_utils.go
|
package wslreg
import "os"
func getWindowsDirectory() string {
dir := os.Getenv("SYSTEMROOT")
if dir != "" {
return dir
}
dir = os.Getenv("WINDIR")
if dir != "" {
return dir
}
return "C:\\WINDOWS"
}
|
[
"\"SYSTEMROOT\"",
"\"WINDIR\""
] |
[] |
[
"SYSTEMROOT",
"WINDIR"
] |
[]
|
["SYSTEMROOT", "WINDIR"]
|
go
| 2 | 0 | |
src/__about__.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021 shmilee
import os
import sys
VERSION = (0, 8, 0)
__description__ = "Gyrokinetic Toroidal Code Data Processing tools written in python3"
__url__ = "https://github.com/shmilee/gdpy3.git"
__version__ = '.'.join(map(str, VERSION))
__status__ = "4 - Beta"
__author__ = "shmilee"
__email__ = "[email protected]"
__license__ = "MIT"
__copyright__ = 'Copyright (c) 2017-2021 shmilee'
def _get_beside_path(name):
'''
Check directory or file *name* beside __about__.py or not.
Return abspath or ''
'''
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), name)
if os.path.exists(path):
return path
else:
return ''
def _get_data_path(name):
'''Get the path to *name*.'''
path = _get_beside_path(name)
if os.path.isdir(path):
return path
# pyinstaller frozen check
if getattr(sys, 'frozen', None):
path = os.path.join(sys._MEIPASS, name)
if os.path.isdir(path):
return path
path = os.path.join(os.path.dirname(sys.executable), name)
if os.path.isdir(path):
return path
path = os.path.join(sys.path[0], name)
if os.path.isdir(path):
return path
raise RuntimeError("Can't find the %s files!" % name)
__data_path__ = _get_data_path('gdpy3-data')
__icon_name__ = 'gdpy3_128'
__icon_path__ = os.path.join(__data_path__, 'icon', '%s.png' % __icon_name__)
# see: sysconfig._getuserbase()
def _get_userbase():
env_base = os.getenv("GDPY3_USERBASE", None)
if env_base:
return env_base
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
return joinuser(base, "Gdpy3")
if sys.platform == "darwin" and sys._framework:
return joinuser("~", "Library", "Gdpy3")
return joinuser("~", ".Gdpy3")
__ENABLE_USERBASE__ = True
__userbase__ = _get_userbase()
if __ENABLE_USERBASE__:
if not os.path.exists(__userbase__):
os.mkdir(__userbase__)
if __userbase__ not in sys.path:
sys.path.append(__userbase__)
def _git_versionstr_read(vfile):
'''read versionstr, v{X}.{Y}.{Z}-{N}-g{commit}'''
if os.path.isfile(vfile):
with open(vfile, 'r', encoding='utf-8') as f:
return f.readline()
else:
upath = _get_beside_path('utils.py')
if os.path.isfile(upath):
import importlib.util
spec = importlib.util.spec_from_file_location('utils.py', upath)
utils = importlib.util.module_from_spec(spec)
spec.loader.exec_module(utils)
run_child_cmd = getattr(utils, 'run_child_cmd')
git = 'git.exe' if os.name == 'nt' else 'git'
code, stdout, stderr = run_child_cmd(
[git, 'describe', '--tags', '--abbrev=40'],
cwd=os.path.dirname(upath))
if code == 0:
# example, v0.6.1-21-gbed56cc...
return stdout
# fallback
return ''
def _git_versionstr_write(vfile):
'''write versionstr'''
vstr = _git_versionstr_read('vfile-not-exists')
if os.path.isfile(vfile) and vstr == '':
# no need to write
return
with open(vfile, 'w', encoding='utf-8') as f:
f.write(vstr)
def _git_versionstr_fmt(vfile):
'''
1. v{X}.{Y}.{Z}-{N}-g{commit} --> (X, Y, Z, N, commit)
2. '' --> (X, Y, Z, 0, None)
'''
import re
vstr = _git_versionstr_read(vfile)
if vstr:
m = re.match('v(\d+).(\d+).(\d+)-(\d+)-g(.*)', vstr)
if m:
# int*4, str
return tuple(int(i) if i.isdigit() else i for i in m.groups())
# fallback
return (*VERSION, 0, None)
_git_versionstr_file = 'git-version'
GITVERSION = _git_versionstr_fmt(
os.path.join(__data_path__, _git_versionstr_file))
if GITVERSION[4]:
__gversion__ = "%s.r%s" % (__version__, GITVERSION[3])
else:
__gversion__ = __version__
|
[] |
[] |
[
"APPDATA",
"GDPY3_USERBASE"
] |
[]
|
["APPDATA", "GDPY3_USERBASE"]
|
python
| 2 | 0 | |
backend/twiliosmssender.py
|
import os
import pymongo
import json
import time
from twilio.rest import Client
def dummy(request):
"""Responds to any HTTP request.
Args:
request (flask.Request): HTTP request object.
Returns:
The response text or any set of values that can be turned into a
Response object using
`make_response <http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>`.
"""
if request.method == 'OPTIONS':
# Allows GET requests from origin https://mydomain.com with
# Authorization header
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'POST',
'Access-Control-Allow-Headers': '*',
'Access-Control-Max-Age': '3600',
'Access-Control-Allow-Credentials': 'true'
}
return ('', 204, headers)
# Set CORS headers for main requests
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Credentials': 'true'
}
request_json = request.get_json()
account_sid = os.environ.get('twiliosid')
auth_token = os.environ.get('twiliotoken')
sender = os.environ.get('sender')
client = Client(account_sid, auth_token)
if request_json:
# payload["sender"] = request_json['sender']
receiver = request_json['receiver']
text = request_json['message']
retjson = {}
if request_json['token'] == "REDACTED":
message = client.messages.create(
from_=sender,
body=text,
to=receiver
)
result=message.sid
retjson['result'] = 'sent successfully'
else:
retjson['result'] = 'invalid token'
# retjson['sid'] = result
# retjson['result'] = "successfully sent"
return json.dumps(retjson)
retstr = "action not done"
if request.args and 'message' in request.args:
return request.args.get('message')
elif request_json and 'message' in request_json:
return request_json['message']
else:
return retstr
|
[] |
[] |
[
"sender",
"twiliotoken",
"twiliosid"
] |
[]
|
["sender", "twiliotoken", "twiliosid"]
|
python
| 3 | 0 | |
pytorch/pytorch-engine/src/main/java/ai/djl/pytorch/jni/LibUtils.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.pytorch.jni;
import ai.djl.util.Platform;
import ai.djl.util.Utils;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Stream;
import java.util.zip.GZIPInputStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Utilities for finding the PyTorch Engine binary on the System.
*
* <p>The Engine will be searched for in a variety of locations in the following order:
*
* <ol>
* <li>In the path specified by the PYTORCH_LIBRARY_PATH environment variable
* <li>In a jar file location in the classpath. These jars can be created with the pytorch-native
* module.
* </ol>
*/
@SuppressWarnings("MissingJavadocMethod")
public final class LibUtils {
private static final Logger logger = LoggerFactory.getLogger(LibUtils.class);
private static final String LIB_NAME = "djl_torch";
private static final String NATIVE_LIB_NAME = "torch";
private static final Pattern VERSION_PATTERN =
Pattern.compile("(\\d+\\.\\d+\\.\\d+(-\\w)?)(-SNAPSHOT)?(-\\d+)?");
private LibUtils() {}
public static void loadLibrary() {
// TODO workaround to make it work on Android Studio
// It should search for several places to find the native library
if (System.getProperty("java.vendor.url").equals("http://www.android.com/")) {
System.loadLibrary(LIB_NAME); // NOPMD
return;
}
String libName = findOverrideLibrary();
if (libName == null) {
String nativeLibDir = findNativeLibrary();
if (nativeLibDir != null) {
libName = copyJniLibraryFromClasspath(Paths.get(nativeLibDir));
} else {
throw new IllegalStateException("Native library not found");
}
}
logger.debug("Loading pytorch library from: {}", libName);
if (System.getProperty("os.name").startsWith("Win")) {
loadWinDependencies(libName);
}
System.load(libName); // NOPMD
}
private static void loadWinDependencies(String libName) {
Path libDir = Paths.get(libName).getParent();
if (libDir == null) {
throw new IllegalArgumentException("Invalid library path!");
}
try (Stream<Path> paths = Files.walk(libDir)) {
paths.filter(
path -> {
String name = path.getFileName().toString();
return !"c10_cuda.dll".equals(name)
&& !"torch.dll".equals(name)
&& !"torch_cpu.dll".equals(name)
&& !"torch_cuda.dll".equals(name)
&& !"fbgemm.dll".equals(name)
&& Files.isRegularFile(path)
&& !name.endsWith("djl_torch.dll");
})
.map(path -> path.toAbsolutePath().toString())
.forEach(System::load);
System.load(libDir.resolve("fbgemm.dll").toAbsolutePath().toString());
System.load(libDir.resolve("torch_cpu.dll").toAbsolutePath().toString());
if (Files.exists(libDir.resolve("c10_cuda.dll"))) {
// Windows System.load is global load
System.load(libDir.resolve("c10_cuda.dll").toAbsolutePath().toString());
System.load(libDir.resolve("torch_cuda.dll").toAbsolutePath().toString());
}
System.load(libDir.resolve("torch.dll").toAbsolutePath().toString());
} catch (IOException e) {
throw new IllegalArgumentException("Folder not exist! " + libDir, e);
}
}
private static String findOverrideLibrary() {
String libPath = System.getenv("PYTORCH_LIBRARY_PATH");
if (libPath != null) {
String libName = findLibraryInPath(libPath);
if (libName != null) {
return libName;
}
}
libPath = System.getProperty("java.library.path");
if (libPath != null) {
return findLibraryInPath(libPath);
}
return null;
}
private static String findLibraryInPath(String libPath) {
String[] paths = libPath.split(File.pathSeparator);
List<String> mappedLibNames;
mappedLibNames = Collections.singletonList(System.mapLibraryName(LIB_NAME));
for (String path : paths) {
File p = new File(path);
if (!p.exists()) {
continue;
}
for (String name : mappedLibNames) {
if (p.isFile() && p.getName().endsWith(name)) {
return p.getAbsolutePath();
}
File file = new File(path, name);
if (file.exists() && file.isFile()) {
return file.getAbsolutePath();
}
}
}
return null;
}
private static String copyJniLibraryFromClasspath(Path nativeDir) {
String name = System.mapLibraryName(LIB_NAME);
Platform platform = Platform.fromSystem();
String classifier = platform.getClassifier();
String flavor = platform.getFlavor();
if (flavor.isEmpty()) {
flavor = "cpu";
}
Properties prop = new Properties();
try (InputStream stream =
LibUtils.class.getResourceAsStream(
"/jnilib/" + classifier + "/" + flavor + "/pytorch.properties")) {
prop.load(stream);
} catch (IOException e) {
throw new IllegalStateException("Cannot find pytorch property file", e);
}
String version = prop.getProperty("version");
Path path = nativeDir.resolve(version + flavor + name);
if (Files.exists(path)) {
return path.toAbsolutePath().toString();
}
Path tmp = null;
try (InputStream stream =
LibUtils.class.getResourceAsStream(
"/jnilib/" + classifier + "/" + flavor + "/" + name)) {
tmp = Files.createTempFile(nativeDir, "jni", "tmp");
Files.copy(stream, tmp, StandardCopyOption.REPLACE_EXISTING);
Utils.moveQuietly(tmp, path);
return path.toAbsolutePath().toString();
} catch (IOException e) {
throw new IllegalStateException("Cannot copy jni files", e);
} finally {
if (tmp != null) {
Utils.deleteQuietly(tmp);
}
}
}
private static synchronized String findNativeLibrary() {
List<URL> urls;
try {
urls =
Collections.list(
Thread.currentThread()
.getContextClassLoader()
.getResources("native/lib/pytorch.properties"));
} catch (IOException e) {
return null;
}
// No native jars
if (urls.isEmpty()) {
return null;
}
Platform systemPlatform = Platform.fromSystem();
try {
Platform matching = null;
Platform placeholder = null;
for (URL url : urls) {
Platform platform = Platform.fromUrl(url);
if (platform.isPlaceholder()) {
placeholder = platform;
} else if (platform.matches(systemPlatform)) {
matching = platform;
break;
}
}
if (matching != null) {
return copyNativeLibraryFromClasspath(matching);
}
if (placeholder != null) {
try {
return downloadPyTorch(placeholder);
} catch (IOException e) {
throw new IllegalStateException("Failed to download PyTorch native library", e);
}
}
} catch (IOException e) {
throw new IllegalStateException(
"Failed to read PyTorch native library jar properties", e);
}
throw new IllegalStateException(
"Your PyTorch native library jar does not match your operating system. Make sure the Maven Dependency Classifier matches your system type.");
}
private static String copyNativeLibraryFromClasspath(Platform platform) {
Path tmp = null;
String version = platform.getVersion();
String flavor = platform.getFlavor();
String classifier = platform.getClassifier();
try {
String libName = System.mapLibraryName(NATIVE_LIB_NAME);
Path cacheDir = getCacheDir();
Path dir = cacheDir.resolve(version + flavor + '-' + classifier);
Path path = dir.resolve(libName);
if (Files.exists(path)) {
return dir.toAbsolutePath().toString();
}
Files.createDirectories(cacheDir);
tmp = Files.createTempDirectory(cacheDir, "tmp");
for (String file : platform.getLibraries()) {
String libPath = "/native/lib/" + file;
try (InputStream is = LibUtils.class.getResourceAsStream(libPath)) {
Files.copy(is, tmp.resolve(file), StandardCopyOption.REPLACE_EXISTING);
}
}
Utils.moveQuietly(tmp, dir);
return dir.toAbsolutePath().toString();
} catch (IOException e) {
throw new IllegalStateException("Failed to extract PyTorch native library", e);
} finally {
if (tmp != null) {
Utils.deleteQuietly(tmp);
}
}
}
private static String downloadPyTorch(Platform platform) throws IOException {
String version = platform.getVersion();
String flavor = platform.getFlavor();
if (flavor.isEmpty()) {
flavor = "cpu";
}
String classifier = platform.getClassifier();
String os = platform.getOsPrefix();
String libName = System.mapLibraryName(NATIVE_LIB_NAME);
Path cacheDir = getCacheDir();
Path dir = cacheDir.resolve(version + flavor + '-' + classifier);
Path path = dir.resolve(libName);
if (Files.exists(path)) {
return dir.toAbsolutePath().toString();
}
// if files not found
Files.createDirectories(cacheDir);
Path tmp = Files.createTempDirectory(cacheDir, "tmp");
Matcher matcher = VERSION_PATTERN.matcher(version);
if (!matcher.matches()) {
throw new IllegalArgumentException("Unexpected version: " + version);
}
String link = "https://djl-ai.s3.amazonaws.com/publish/pytorch-" + matcher.group(1);
try (InputStream is = new URL(link + "/files.txt").openStream()) {
List<String> lines = Utils.readLines(is);
for (String line : lines) {
if (line.startsWith(flavor + '/' + os + '/')) {
URL url = new URL(link + '/' + line);
String fileName = line.substring(line.lastIndexOf('/') + 1, line.length() - 3);
logger.info("Downloading {} ...", fileName);
try (InputStream fis = new GZIPInputStream(url.openStream())) {
Files.copy(fis, tmp.resolve(fileName), StandardCopyOption.REPLACE_EXISTING);
}
}
}
Utils.moveQuietly(tmp, dir);
return dir.toAbsolutePath().toString();
} finally {
if (tmp != null) {
Utils.deleteQuietly(tmp);
}
}
}
private static Path getCacheDir() {
String cacheDir = System.getProperty("ENGINE_CACHE_DIR");
if (cacheDir == null || cacheDir.isEmpty()) {
cacheDir = System.getenv("ENGINE_CACHE_DIR");
if (cacheDir == null || cacheDir.isEmpty()) {
cacheDir = System.getProperty("DJL_CACHE_DIR");
if (cacheDir == null || cacheDir.isEmpty()) {
cacheDir = System.getenv("DJL_CACHE_DIR");
if (cacheDir == null || cacheDir.isEmpty()) {
String userHome = System.getProperty("user.home");
return Paths.get(userHome, ".pytorch/cache");
}
}
return Paths.get(cacheDir, "pytorch");
}
}
return Paths.get(cacheDir, ".pytorch/cache");
}
}
|
[
"\"PYTORCH_LIBRARY_PATH\"",
"\"ENGINE_CACHE_DIR\"",
"\"DJL_CACHE_DIR\""
] |
[] |
[
"DJL_CACHE_DIR",
"ENGINE_CACHE_DIR",
"PYTORCH_LIBRARY_PATH"
] |
[]
|
["DJL_CACHE_DIR", "ENGINE_CACHE_DIR", "PYTORCH_LIBRARY_PATH"]
|
java
| 3 | 0 | |
s2e_env/commands/build.py
|
"""
Copyright (c) 2017 Dependable Systems Laboratory, EPFL
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import glob
import logging
import os
import sys
import sh
from sh import ErrorReturnCode
from s2e_env.command import EnvCommand, CommandError
logger = logging.getLogger('build')
class Command(EnvCommand):
"""
Builds S2E.
This command also allows the user to specify a list of S2E components (e.g.
QEMU, libs2e, Z3, etc.) to force a rebuild for.
"""
help = 'Build S2E.'
def __init__(self):
super().__init__()
self._make = None
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument('-g', '--debug', action='store_true',
help='Build S2E in debug mode')
parser.add_argument('-r', '--rebuild-components', nargs='+',
required=False, dest='components',
help='List of S2E components to clean prior to '
'the build process')
def handle(self, *args, **options):
# Exit if the makefile doesn't exist
makefile = self.env_path('source', 'Makefile')
if not os.path.isfile(makefile):
raise CommandError('No makefile found in %s' %
os.path.dirname(makefile))
# If the build directory doesn't exist, create it
build_dir = self.env_path('build')
if not os.path.isdir(build_dir):
os.mkdir(build_dir)
# Set up some environment variables
env_vars = os.environ.copy()
env_vars['S2E_PREFIX'] = self.install_path()
components = options['components']
self._make = sh.Command('make').bake(directory=build_dir, file=makefile, _env=env_vars)
# If the user has specified any components to rebuild, do this before
# the build
if components:
self._rebuild_components(components)
try:
# Run make
if options['debug']:
logger.info('Building S2E (debug) in %s', build_dir)
self._make('all-debug', _out=sys.stdout, _err=sys.stderr)
else:
logger.info('Building S2E (release) in %s', build_dir)
self._make('install', _out=sys.stdout, _err=sys.stderr)
except ErrorReturnCode as e:
raise CommandError(e) from e
logger.success('S2E built')
def _rebuild_components(self, components):
"""
Cleans components to force them to be rebuilt.
After successfully building an S2E component (e.g. QEMU, libs2e, Z3,
etc.), the S2E Makefile will create a "stamp" in the S2E build
directory. Subsequent builds will first check if a component's stamp
exists, and if it does the build process will not rebuild. To force a
rebuild, the stamp must be deleted. This function will delete the
specified stamps to force a rebuild.
"""
# We are only interested in components that create a "stamp" in the
# "stamps" directory. The "stamps" directory is stripped from the
# component
stamps = [component[7:] for component in self._make('list').strip().split(' ')
if component.startswith('stamps/')]
# The user can also specify "libs2e" rather than the complete
# "libs2e-{release,debug}-make" stamp
stamp_prefixes = {component.split('-')[0] for component in stamps}
stamps_to_delete = []
for component in components:
# Check if the specified component is valid "as is"
if component in stamps:
stamps_to_delete.append(self.env_path('build', 'stamps', component))
continue
# Check if the user has specified a valid component prefix
# TODO: This will delete both the debug and release stamps (if they exist)
if component in stamp_prefixes:
stamps_to_delete.extend(glob.glob(self.env_path('build', 'stamps', '%s-*' % component)))
continue
# If we've made it this far, the component is not valid
raise CommandError('Component %s is not valid. Valid components '
'are: %s' % (component, ', '.join(stamp_prefixes)))
# Delete the stamps, ignoring any stamps that do not exist
for stamp_to_delete in stamps_to_delete:
try:
os.remove(stamp_to_delete)
logger.info('Deleted %s to force a rebuild', stamp_to_delete)
except OSError:
pass
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/yea/_setup.py
|
import importlib
import multiprocessing as mp
import os
def _setup_params():
names = os.environ.get("YEA_PARAM_NAMES")
values = os.environ.get("YEA_PARAM_VALUES")
if not names or not values:
return {}
names = names.split(",")
values = values.split(",")
params = dict(zip(names, values))
return params
def setup_mp(params):
start_method = params.get(":yea:start_method")
if not start_method:
return
print(f"INFO: start_method= {start_method}")
mp.set_start_method(start_method)
# TODO: check mp setup?
def setup_plugins(params):
plugins = os.environ.get("YEA_PLUGINS")
if not plugins:
return
plugins = plugins.split(",")
for plug in plugins:
mod_name = f"yea_{plug}"
mod = importlib.import_module(mod_name)
mod_setup = getattr(mod, "setup", None)
if not mod_setup:
continue
mod_setup()
def setup():
p = _setup_params()
setup_mp(params=p)
setup_plugins(params=p)
|
[] |
[] |
[
"YEA_PARAM_VALUES",
"YEA_PLUGINS",
"YEA_PARAM_NAMES"
] |
[]
|
["YEA_PARAM_VALUES", "YEA_PLUGINS", "YEA_PARAM_NAMES"]
|
python
| 3 | 0 | |
cmd/on.go
|
package cmd
import (
"fmt"
"os"
"github.com/wkhub/wk/shell"
"github.com/spf13/cobra"
"github.com/wkhub/wk/user"
)
// onCmd represents the on command
var onCmd = &cobra.Command{
Use: "on <project>",
Short: "Work on a project",
Long: `Open a subshell on the project path`,
Args: cobra.ExactArgs(1),
Annotations: map[string]string{
"source": "true",
},
Run: func(cmd *cobra.Command, args []string) {
name := args[0]
currentUser := user.Current()
project := currentUser.FindProject(name)
if project == nil {
if isEval {
fmt.Printf("echo 'Unknown project %s'; false\n", name)
os.Exit(1)
} else {
fmt.Println("Unknown project", name)
os.Exit(1)
}
}
session := shell.NewSession(isEval)
project.Contribute(&session)
if project.Config.IsSet("activate") {
session.AddCommand(project.Config.GetString("activate"))
}
if isEval {
currentUser.Shell().Eval(session)
} else {
fmt.Printf("Opening project %s (%s)\n", project.Name, project.Root())
currentUser.Shell().Run(session)
fmt.Printf("Exiting project %s\n", project.Name)
}
},
}
func init() {
rootCmd.AddCommand(onCmd)
onCmd.Flags().BoolP("ide", "i", false, "Launch ide")
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
backend/wishlist/settings/development.py
|
from .base import *
DEBUG = True
ALLOWED_HOSTS = []
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get("POSTGRES_DB"),
'USER': os.environ.get("POSTGRES_USER"),
'PASSWORD': os.environ.get("POSTGRES_PASSWORD"),
'HOST': 'db',
'PORT': 5432,
}
}
|
[] |
[] |
[
"POSTGRES_PASSWORD",
"POSTGRES_USER",
"POSTGRES_DB"
] |
[]
|
["POSTGRES_PASSWORD", "POSTGRES_USER", "POSTGRES_DB"]
|
python
| 3 | 0 | |
platipy/imaging/projects/cardiac/run.py
|
# Copyright 2020 University of New South Wales, University of Sydney, Ingham Institute
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import SimpleITK as sitk
import numpy as np
from loguru import logger
from platipy.imaging.registration.utils import apply_transform, convert_mask_to_reg_structure
from platipy.imaging.registration.linear import (
linear_registration,
)
from platipy.imaging.registration.deformable import (
fast_symmetric_forces_demons_registration,
)
from platipy.imaging.label.fusion import (
process_probability_image,
compute_weight_map,
combine_labels,
)
from platipy.imaging.label.iar import run_iar
from platipy.imaging.utils.vessel import vessel_spline_generation
from platipy.imaging.utils.valve import (
generate_valve_from_great_vessel,
generate_valve_using_cylinder,
)
from platipy.imaging.utils.conduction import (
geometric_sinoatrialnode,
geometric_atrioventricularnode,
)
from platipy.imaging.utils.crop import label_to_roi, crop_to_roi
from platipy.imaging.generation.mask import extend_mask
from platipy.imaging.label.utils import binary_encode_structure_list, correct_volume_overlap
ATLAS_PATH = "/atlas"
if "ATLAS_PATH" in os.environ:
ATLAS_PATH = os.environ["ATLAS_PATH"]
CARDIAC_SETTINGS_DEFAULTS = {
"atlas_settings": {
"atlas_id_list": [
"03",
"05",
"08",
"10",
"11",
"12",
"13",
"16",
"24",
"35",
],
"atlas_structure_list": [
"AORTICVALVE",
"ASCENDINGAORTA",
"LANTDESCARTERY",
"LCIRCUMFLEXARTERY",
"LCORONARYARTERY",
"LEFTATRIUM",
"LEFTVENTRICLE",
"MITRALVALVE",
"PULMONARYARTERY",
"PULMONICVALVE",
"RCORONARYARTERY",
"RIGHTATRIUM",
"RIGHTVENTRICLE",
"SVC",
"TRICUSPIDVALVE",
"WHOLEHEART",
],
"atlas_path": ATLAS_PATH,
"atlas_image_format": "Case_{0}/Images/Case_{0}_CROP.nii.gz",
"atlas_label_format": "Case_{0}/Structures/Case_{0}_{1}_CROP.nii.gz",
"crop_atlas_to_structures": False,
"crop_atlas_expansion_mm": (20, 20, 40),
"guide_structure_name": "WHOLEHEART",
"superior_extension": 30,
},
"auto_crop_target_image_settings": {
"expansion_mm": [20, 20, 40],
},
"linear_registration_settings": {
"reg_method": "affine",
"shrink_factors": [16, 8, 4],
"smooth_sigmas": [0, 0, 0],
"sampling_rate": 0.75,
"default_value": -1000,
"number_of_iterations": 50,
"metric": "mean_squares",
"optimiser": "gradient_descent_line_search",
"verbose": False,
},
"structure_guided_registration_settings": {
"isotropic_resample": True,
"resolution_staging": [
16,
8,
2,
], # specify voxel size (mm) since isotropic_resample is set
"iteration_staging": [50, 50, 50],
"smoothing_sigmas": [0, 0, 0],
"ncores": 8,
"default_value": 0,
"verbose": False,
},
"deformable_registration_settings": {
"isotropic_resample": True,
"resolution_staging": [
6,
4,
2,
1,
], # specify voxel size (mm) since isotropic_resample is set
"iteration_staging": [200, 150, 125, 100],
"smoothing_sigmas": [0, 0, 0, 0],
"ncores": 8,
"default_value": 0,
"verbose": False,
},
"iar_settings": {
"reference_structure": False,
"smooth_distance_maps": True,
"smooth_sigma": 1,
"z_score_statistic": "mad",
"outlier_method": "iqr",
"outlier_factor": 1.5,
"min_best_atlases": 5,
"project_on_sphere": False,
},
"label_fusion_settings": {
"vote_type": "unweighted",
"vote_params": None,
"optimal_threshold": {
"AORTICVALVE": 0.5,
"ASCENDINGAORTA": 0.44,
"LEFTATRIUM": 0.40,
"LEFTVENTRICLE": 0.45,
"MITRALVALVE": 0.5,
"PULMONARYARTERY": 0.46,
"PULMONICVALVE": 0.5,
"RIGHTATRIUM": 0.38,
"RIGHTVENTRICLE": 0.42,
"SVC": 0.44,
"TRICUSPIDVALVE": 0.5,
"WHOLEHEART": 0.5,
},
},
"vessel_spline_settings": {
"vessel_name_list": [
"LANTDESCARTERY",
"LCIRCUMFLEXARTERY",
"LCORONARYARTERY",
"RCORONARYARTERY",
],
"vessel_radius_mm_dict": {
"LANTDESCARTERY": 2,
"LCIRCUMFLEXARTERY": 2,
"LCORONARYARTERY": 2,
"RCORONARYARTERY": 2,
},
"scan_direction_dict": {
"LANTDESCARTERY": "z",
"LCIRCUMFLEXARTERY": "z",
"LCORONARYARTERY": "x",
"RCORONARYARTERY": "z",
},
"stop_condition_type_dict": {
"LANTDESCARTERY": "count",
"LCIRCUMFLEXARTERY": "count",
"LCORONARYARTERY": "count",
"RCORONARYARTERY": "count",
},
"stop_condition_value_dict": {
"LANTDESCARTERY": 2,
"LCIRCUMFLEXARTERY": 2,
"LCORONARYARTERY": 2,
"RCORONARYARTERY": 2,
},
},
"geometric_segmentation_settings": {
"run_geometric_algorithms": True,
"geometric_name_suffix": "_GEOMETRIC",
"atlas_structure_names": {
"atlas_left_ventricle": "LEFTVENTRICLE",
"atlas_right_ventricle": "RIGHTVENTRICLE",
"atlas_left_atrium": "LEFTATRIUM",
"atlas_right_atrium": "RIGHTATRIUM",
"atlas_ascending_aorta": "ASCENDINGAORTA",
"atlas_pulmonary_artery": "PULMONARYARTERY",
"atlas_superior_vena_cava": "SVC",
"atlas_whole_heart": "WHOLEHEART",
},
"valve_definitions": {
"mitral_valve_thickness_mm": 10,
"mitral_valve_radius_mm": 15,
"tricuspid_valve_thickness_mm": 10,
"tricuspid_valve_radius_mm": 15,
"pulmonic_valve_thickness_mm": 10,
"aortic_valve_thickness_mm": 10,
},
"conduction_system_definitions": {
"sinoatrial_node_radius_mm": 10,
"atrioventricular_node_radius_mm": 10,
},
},
"postprocessing_settings": {
"run_postprocessing": True,
"binaryfillhole_mm": 3,
"structures_for_binaryfillhole": [
"ASCENDINGAORTA",
"LEFTATRIUM",
"LEFTVENTRICLE",
"RIGHTATRIUM",
"RIGHTVENTRICLE",
"SVC",
"AORTICVALVE",
"MITRALVALVE",
"PULMONICVALVE",
"TRICUSPIDVALVE",
"WHOLEHEART",
],
"structures_for_overlap_correction": [
"ASCENDINGAORTA",
"LEFTATRIUM",
"LEFTVENTRICLE",
"RIGHTATRIUM",
"RIGHTVENTRICLE",
"PULMONARYARTERY",
"SVC",
],
},
"return_atlas_guide_structure": False,
"return_as_cropped": False,
"return_proba_as_contours": False,
}
def run_cardiac_segmentation(img, guide_structure=None, settings=CARDIAC_SETTINGS_DEFAULTS):
"""Runs the atlas-based cardiac segmentation
Args:
img (sitk.Image):
settings (dict, optional): Dictionary containing settings for algorithm.
Defaults to default_settings.
Returns:
dict: Dictionary containing output of segmentation
"""
results = {}
results_prob = {}
return_as_cropped = settings["return_as_cropped"]
"""
Initialisation - Read in atlases
- image files
- structure files
Atlas structure:
'ID': 'Original': 'CT Image' : sitk.Image
'Struct A' : sitk.Image
'Struct B' : sitk.Image
'RIR' : 'CT Image' : sitk.Image
'Transform' : transform parameter map
'Struct A' : sitk.Image
'Struct B' : sitk.Image
'DIR' : 'CT Image' : sitk.Image
'Transform' : displacement field transform
'Weight Map' : sitk.Image
'Struct A' : sitk.Image
'Struct B' : sitk.Image
"""
logger.info("")
# Settings
atlas_path = settings["atlas_settings"]["atlas_path"]
atlas_id_list = settings["atlas_settings"]["atlas_id_list"]
atlas_structure_list = settings["atlas_settings"]["atlas_structure_list"]
atlas_image_format = settings["atlas_settings"]["atlas_image_format"]
atlas_label_format = settings["atlas_settings"]["atlas_label_format"]
crop_atlas_to_structures = settings["atlas_settings"]["crop_atlas_to_structures"]
crop_atlas_expansion_mm = settings["atlas_settings"]["crop_atlas_expansion_mm"]
atlas_set = {}
for atlas_id in atlas_id_list:
atlas_set[atlas_id] = {}
atlas_set[atlas_id]["Original"] = {}
image = sitk.ReadImage(f"{atlas_path}/{atlas_image_format.format(atlas_id)}")
structures = {
struct: sitk.ReadImage(f"{atlas_path}/{atlas_label_format.format(atlas_id, struct)}")
for struct in atlas_structure_list
}
if crop_atlas_to_structures:
logger.info(f"Automatically cropping atlas: {atlas_id}")
original_volume = np.product(image.GetSize())
crop_box_size, crop_box_index = label_to_roi(
structures.values(), expansion_mm=crop_atlas_expansion_mm
)
image = crop_to_roi(image, size=crop_box_size, index=crop_box_index)
final_volume = np.product(image.GetSize())
logger.info(f" > Volume reduced by factor {original_volume/final_volume:.2f}")
for struct in atlas_structure_list:
structures[struct] = crop_to_roi(
structures[struct], size=crop_box_size, index=crop_box_index
)
atlas_set[atlas_id]["Original"]["CT Image"] = image
for struct in atlas_structure_list:
atlas_set[atlas_id]["Original"][struct] = structures[struct]
"""
Step 1 - Automatic cropping
If we have a guide structure:
- use structure to crop target image
Otherwise:
- using a quick registration to register each atlas
- expansion of the bounding box to ensure entire volume of interest is enclosed
- target image is cropped
"""
expansion_mm = settings["auto_crop_target_image_settings"]["expansion_mm"]
if guide_structure:
crop_box_size, crop_box_index = label_to_roi(guide_structure, expansion_mm=expansion_mm)
img_crop = crop_to_roi(img, crop_box_size, crop_box_index)
guide_structure = crop_to_roi(guide_structure, crop_box_size, crop_box_index)
target_reg_structure = convert_mask_to_reg_structure(guide_structure, expansion=2)
else:
quick_reg_settings = {
"reg_method": "similarity",
"shrink_factors": [8],
"smooth_sigmas": [0],
"sampling_rate": 0.75,
"default_value": -1000,
"number_of_iterations": 25,
"final_interp": sitk.sitkLinear,
"metric": "mean_squares",
"optimiser": "gradient_descent_line_search",
}
registered_crop_images = []
logger.info("Running initial Translation tranform to crop image volume")
for atlas_id in atlas_id_list[: min([8, len(atlas_id_list)])]:
logger.info(f" > atlas {atlas_id}")
# Register the atlases
atlas_set[atlas_id]["RIR"] = {}
atlas_image = atlas_set[atlas_id]["Original"]["CT Image"]
reg_image, _ = linear_registration(
img,
atlas_image,
**quick_reg_settings,
)
registered_crop_images.append(sitk.Cast(reg_image, sitk.sitkFloat32))
del reg_image
combined_image = sum(registered_crop_images) / len(registered_crop_images) > -1000
crop_box_size, crop_box_index = label_to_roi(combined_image, expansion_mm=expansion_mm)
img_crop = crop_to_roi(img, crop_box_size, crop_box_index)
logger.info("Calculated crop box:")
logger.info(f" > {crop_box_index}")
logger.info(f" > {crop_box_size}")
logger.info(f" > Vol reduction = {np.product(img.GetSize())/np.product(crop_box_size):.2f}")
"""
Step 2 - Rigid registration of target images
- Individual atlas images are registered to the target
- The transformation is used to propagate the labels onto the target
"""
linear_registration_settings = settings["linear_registration_settings"]
logger.info(
f"Running {linear_registration_settings['reg_method']} tranform to align atlas images"
)
for atlas_id in atlas_id_list:
# Register the atlases
logger.info(f" > atlas {atlas_id}")
atlas_set[atlas_id]["RIR"] = {}
if guide_structure:
guide_structure_name = settings["atlas_settings"]["guide_structure_name"]
target_reg_image = target_reg_structure
atlas_reg_image = convert_mask_to_reg_structure(
atlas_set[atlas_id]["Original"][guide_structure_name], expansion=2
)
else:
target_reg_image = img_crop
atlas_reg_image = atlas_set[atlas_id]["Original"]["CT Image"]
_, initial_tfm = linear_registration(
target_reg_image,
atlas_reg_image,
**linear_registration_settings,
)
# Save in the atlas dict
atlas_set[atlas_id]["RIR"]["Transform"] = initial_tfm
if guide_structure:
atlas_set[atlas_id]["RIR"]["Reg Mask"] = apply_transform(
input_image=atlas_reg_image,
reference_image=img_crop,
transform=initial_tfm,
default_value=0,
interpolator=sitk.sitkLinear,
)
expanded_atlas_guide_structure = extend_mask(
atlas_set[atlas_id]["Original"][guide_structure_name],
direction=("ax", "sup"),
extension_mm=settings["atlas_settings"]["superior_extension"],
interior_mm_shape=settings["atlas_settings"]["superior_extension"] / 2,
)
atlas_set[atlas_id]["RIR"][guide_structure_name + "EXPANDED"] = apply_transform(
input_image=expanded_atlas_guide_structure,
reference_image=img_crop,
transform=initial_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id]["RIR"]["CT Image"] = apply_transform(
input_image=atlas_set[atlas_id]["Original"]["CT Image"],
reference_image=img_crop,
transform=initial_tfm,
default_value=-1000,
interpolator=sitk.sitkLinear,
)
# sitk.WriteImage(rigid_image, f"./RR_{atlas_id}.nii.gz")
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id]["Original"][struct]
atlas_set[atlas_id]["RIR"][struct] = apply_transform(
input_image=input_struct,
reference_image=img_crop,
transform=initial_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id]["Original"] = None
"""
Step 3 - Deformable image registration
- Using Fast Symmetric Diffeomorphic Demons
"""
if guide_structure:
structure_guided_registration_settings = settings["structure_guided_registration_settings"]
logger.info("Running structure-guided deformable registration on atlas labels")
for atlas_id in atlas_id_list:
logger.info(f" > atlas {atlas_id}")
# Register the atlases
atlas_set[atlas_id]["DIR_STRUCT"] = {}
deform_image, struct_guided_tfm, _ = fast_symmetric_forces_demons_registration(
target_reg_structure,
atlas_set[atlas_id]["RIR"]["Reg Mask"],
**structure_guided_registration_settings,
)
# Save in the atlas dict
atlas_set[atlas_id]["DIR_STRUCT"]["Reg Mask"] = deform_image
atlas_set[atlas_id]["DIR_STRUCT"]["Transform"] = struct_guided_tfm
atlas_set[atlas_id]["DIR_STRUCT"]["CT Image"] = apply_transform(
input_image=atlas_set[atlas_id]["RIR"]["CT Image"],
transform=struct_guided_tfm,
default_value=-1000,
interpolator=sitk.sitkLinear,
)
atlas_set[atlas_id]["DIR_STRUCT"][guide_structure_name + "EXPANDED"] = apply_transform(
input_image=atlas_set[atlas_id]["RIR"][guide_structure_name + "EXPANDED"],
reference_image=img_crop,
transform=struct_guided_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
# sitk.WriteImage(deform_image, f"./DIR_STRUCT_{atlas_id}.nii.gz")
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id]["RIR"][struct]
atlas_set[atlas_id]["DIR_STRUCT"][struct] = apply_transform(
input_image=input_struct,
transform=struct_guided_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id]["RIR"] = None
# Settings
deformable_registration_settings = settings["deformable_registration_settings"]
logger.info("Running DIR to refine atlas image registration")
for atlas_id in atlas_id_list:
logger.info(f" > atlas {atlas_id}")
# Register the atlases
atlas_set[atlas_id]["DIR"] = {}
if guide_structure:
label = "DIR_STRUCT"
else:
label = "RIR"
atlas_reg_image = atlas_set[atlas_id][label]["CT Image"]
target_reg_image = img_crop
if guide_structure:
expanded_atlas_mask = atlas_set[atlas_id]["DIR_STRUCT"][
guide_structure_name + "EXPANDED"
]
expanded_target_mask = extend_mask(
guide_structure,
direction=("ax", "sup"),
extension_mm=settings["atlas_settings"]["superior_extension"],
interior_mm_shape=settings["atlas_settings"]["superior_extension"] / 2,
)
combined_mask = sitk.Maximum(expanded_atlas_mask, expanded_target_mask)
atlas_reg_image = sitk.Mask(atlas_reg_image, combined_mask, outsideValue=-1000)
atlas_reg_image = sitk.Mask(
atlas_reg_image, atlas_reg_image > -400, outsideValue=-1000
)
target_reg_image = sitk.Mask(target_reg_image, combined_mask, outsideValue=-1000)
target_reg_image = sitk.Mask(
target_reg_image, atlas_reg_image > -400, outsideValue=-1000
)
deform_image, dir_tfm, _ = fast_symmetric_forces_demons_registration(
target_reg_image,
atlas_reg_image,
**deformable_registration_settings,
)
# Save in the atlas dict
atlas_set[atlas_id]["DIR"]["Transform"] = dir_tfm
atlas_set[atlas_id]["DIR"]["CT Image"] = apply_transform(
input_image=atlas_set[atlas_id][label]["CT Image"],
transform=dir_tfm,
default_value=-1000,
interpolator=sitk.sitkLinear,
)
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id][label][struct]
atlas_set[atlas_id]["DIR"][struct] = apply_transform(
input_image=input_struct,
transform=dir_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id][label] = None
"""
Step 4 - Iterative atlas removal
- This is an automatic process that will attempt to remove inconsistent atlases from the entire set
"""
# Compute weight maps
# Here we use simple GWV as this minises the potentially negative influence of mis-registered
# atlases
iar_settings = settings["iar_settings"]
if iar_settings["reference_structure"]:
for atlas_id in atlas_id_list:
atlas_image = atlas_set[atlas_id]["DIR"]["CT Image"]
weight_map = compute_weight_map(img_crop, atlas_image, vote_type="global")
atlas_set[atlas_id]["DIR"]["Weight Map"] = weight_map
atlas_set = run_iar(atlas_set=atlas_set, **iar_settings)
else:
logger.info("IAR: No reference structure, skipping iterative atlas removal.")
"""
Step 4 - Vessel Splining
"""
vessel_spline_settings = settings["vessel_spline_settings"]
if len(vessel_spline_settings["vessel_name_list"]) > 0:
segmented_vessel_dict = vessel_spline_generation(
img_crop, atlas_set, **vessel_spline_settings
)
else:
logger.info("No vessel splining required, continue.")
"""
Step 5 - Label Fusion
"""
# Compute weight maps
vote_type = settings["label_fusion_settings"]["vote_type"]
vote_params = settings["label_fusion_settings"]["vote_params"]
# Compute weight maps
for atlas_id in list(atlas_set.keys()):
atlas_image = atlas_set[atlas_id]["DIR"]["CT Image"]
weight_map = compute_weight_map(
img_crop, atlas_image, vote_type=vote_type, vote_params=vote_params
)
atlas_set[atlas_id]["DIR"]["Weight Map"] = weight_map
combined_label_dict = combine_labels(atlas_set, atlas_structure_list)
"""
Step 6 - Paste the cropped structure into the original image space
"""
logger.info("Generating binary segmentations.")
template_img_binary = sitk.Cast((img * 0), sitk.sitkUInt8)
template_img_prob = sitk.Cast((img * 0), sitk.sitkFloat64)
vote_structures = settings["label_fusion_settings"]["optimal_threshold"].keys()
vote_structures = [i for i in vote_structures if i in atlas_structure_list]
for structure_name in vote_structures:
probability_map = combined_label_dict[structure_name]
optimal_threshold = settings["label_fusion_settings"]["optimal_threshold"][structure_name]
binary_struct = process_probability_image(probability_map, optimal_threshold)
if return_as_cropped:
results[structure_name] = binary_struct
if settings["return_proba_as_contours"]:
atlas_contours = [
atlas_set[atlas_id]["DIR"][structure_name] >= 2 for atlas_id in atlas_id_list
]
results_prob[structure_name] = binary_encode_structure_list(atlas_contours)
else:
results_prob[structure_name] = probability_map
# We also generate another version of the guide_structure using the atlas contours
# We *can* return this, but probably don't want to
# Here this check is performed
if (not settings["return_atlas_guide_structure"]) and (guide_structure is not None):
results[guide_structure_name] = guide_structure
results_prob[guide_structure_name] = guide_structure
else:
if settings["return_proba_as_contours"]:
atlas_contours = [
atlas_set[atlas_id]["DIR"][structure_name] >= 2 for atlas_id in atlas_id_list
]
probability_img = binary_encode_structure_list(atlas_contours)
template_img_prob = sitk.Cast((img * 0), sitk.sitkUInt32)
else:
probability_img = probability_map
# Un-crop binary structure
paste_img_binary = sitk.Paste(
template_img_binary,
binary_struct,
binary_struct.GetSize(),
(0, 0, 0),
crop_box_index,
)
results[structure_name] = paste_img_binary
# Un-crop probability map
paste_prob_img = sitk.Paste(
template_img_prob,
probability_img,
probability_img.GetSize(),
(0, 0, 0),
crop_box_index,
)
results_prob[structure_name] = paste_prob_img
# Un-crop the guide structure
if (not settings["return_atlas_guide_structure"]) and (guide_structure is not None):
new_guide_structure = sitk.Paste(
template_img_binary,
guide_structure,
guide_structure.GetSize(),
(0, 0, 0),
crop_box_index,
)
results[guide_structure_name] = new_guide_structure
results_prob[guide_structure_name] = new_guide_structure
for structure_name in vessel_spline_settings["vessel_name_list"]:
binary_struct = segmented_vessel_dict[structure_name]
if return_as_cropped:
results[structure_name] = binary_struct
vessel_list = [
atlas_set[atlas_id]["DIR"][structure_name] for atlas_id in list(atlas_set.keys())
]
else:
# Un-crop binary vessel
paste_img_binary = sitk.Paste(
template_img_binary,
binary_struct,
binary_struct.GetSize(),
(0, 0, 0),
crop_box_index,
)
results[structure_name] = paste_img_binary
vessel_list = []
for atlas_id in list(atlas_set.keys()):
paste_img_binary = sitk.Paste(
template_img_binary,
atlas_set[atlas_id]["DIR"][structure_name],
atlas_set[atlas_id]["DIR"][structure_name].GetSize(),
(0, 0, 0),
crop_box_index,
)
vessel_list.append(paste_img_binary)
# Encode list of vessels
encoded_vessels = binary_encode_structure_list(vessel_list)
results_prob[structure_name] = encoded_vessels
"""
Step 7 - Geometric definitions of cardiac valves and conduction system nodes
"""
geometric_segmentation_settings = settings["geometric_segmentation_settings"]
if geometric_segmentation_settings["run_geometric_algorithms"]:
logger.info("Computing geometric definitions for valves and conduction system.")
geom_atlas_names = geometric_segmentation_settings["atlas_structure_names"]
geom_valve_defs = geometric_segmentation_settings["valve_definitions"]
geom_conduction_defs = geometric_segmentation_settings["conduction_system_definitions"]
# 1 - MITRAL VALVE
mv_name = "MITRALVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[mv_name] = generate_valve_using_cylinder(
label_atrium=results[geom_atlas_names["atlas_left_atrium"]],
label_ventricle=results[geom_atlas_names["atlas_left_ventricle"]],
radius_mm=geom_valve_defs["mitral_valve_radius_mm"],
height_mm=geom_valve_defs["mitral_valve_thickness_mm"],
)
# 2 - TRICUSPID VALVE
tv_name = "TRICUSPIDVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[tv_name] = generate_valve_using_cylinder(
label_atrium=results[geom_atlas_names["atlas_right_atrium"]],
label_ventricle=results[geom_atlas_names["atlas_right_ventricle"]],
radius_mm=geom_valve_defs["tricuspid_valve_radius_mm"],
height_mm=geom_valve_defs["tricuspid_valve_thickness_mm"],
)
# 3 - AORTIC VALVE
av_name = "AORTICVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[av_name] = generate_valve_from_great_vessel(
label_great_vessel=results[geom_atlas_names["atlas_ascending_aorta"]],
label_ventricle=results[geom_atlas_names["atlas_left_ventricle"]],
valve_thickness_mm=geom_valve_defs["aortic_valve_thickness_mm"],
)
# 4 - PULMONIC VALVE
pv_name = "PULMONICVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[pv_name] = generate_valve_from_great_vessel(
label_great_vessel=results[geom_atlas_names["atlas_pulmonary_artery"]],
label_ventricle=results[geom_atlas_names["atlas_right_ventricle"]],
valve_thickness_mm=geom_valve_defs["pulmonic_valve_thickness_mm"],
)
# 5 - SINOATRIAL NODE
san_name = "SAN" + geometric_segmentation_settings["geometric_name_suffix"]
results[san_name] = geometric_sinoatrialnode(
label_svc=results[geom_atlas_names["atlas_superior_vena_cava"]],
label_ra=results[geom_atlas_names["atlas_right_atrium"]],
label_wholeheart=results[geom_atlas_names["atlas_whole_heart"]],
radius_mm=geom_conduction_defs["sinoatrial_node_radius_mm"],
)
# 6 - ATRIOVENTRICULAR NODE
avn_name = "AVN" + geometric_segmentation_settings["geometric_name_suffix"]
results[avn_name] = geometric_atrioventricularnode(
label_la=results[geom_atlas_names["atlas_left_atrium"]],
label_lv=results[geom_atlas_names["atlas_left_ventricle"]],
label_ra=results[geom_atlas_names["atlas_right_atrium"]],
label_rv=results[geom_atlas_names["atlas_right_ventricle"]],
radius_mm=geom_conduction_defs["atrioventricular_node_radius_mm"],
)
"""
Step 8 - Post-processing
"""
postprocessing_settings = settings["postprocessing_settings"]
if postprocessing_settings["run_postprocessing"]:
logger.info("Running post-processing.")
# Remove any smaller components and perform morphological closing (hole filling)
binaryfillhole_img = [
int(postprocessing_settings["binaryfillhole_mm"] / sp) for sp in img.GetSpacing()
]
for structure_name in postprocessing_settings["structures_for_binaryfillhole"]:
if structure_name not in results.keys():
continue
contour_s = results[structure_name]
contour_s = sitk.RelabelComponent(sitk.ConnectedComponent(contour_s)) == 1
contour_s = sitk.BinaryMorphologicalClosing(contour_s, binaryfillhole_img)
results[structure_name] = contour_s
# Remove any overlaps
input_overlap = {
s: results[s] for s in postprocessing_settings["structures_for_overlap_correction"]
}
output_overlap = correct_volume_overlap(input_overlap)
for s in postprocessing_settings["structures_for_overlap_correction"]:
results[s] = output_overlap[s]
if return_as_cropped:
results["CROP_IMAGE"] = img_crop
logger.info("Done!")
return results, results_prob
|
[] |
[] |
[
"ATLAS_PATH"
] |
[]
|
["ATLAS_PATH"]
|
python
| 1 | 0 | |
resources/page/pagemeta/pagemeta_test.go
|
// Copyright 2020 The Hugo Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pagemeta
import (
"fmt"
"testing"
"github.com/neohugo/neohugo/htesting/hqt"
"github.com/neohugo/neohugo/config"
qt "github.com/frankban/quicktest"
)
func TestDecodeBuildConfig(t *testing.T) {
t.Parallel()
c := qt.New(t)
configTempl := `
[_build]
render = %s
list = %s
publishResources = true`
for _, test := range []struct {
args []interface{}
expect BuildConfig
}{
{
[]interface{}{"true", "true"},
BuildConfig{
Render: Always,
List: Always,
PublishResources: true,
set: true,
},
},
{[]interface{}{"true", "false"}, BuildConfig{
Render: Always,
List: Never,
PublishResources: true,
set: true,
}},
{[]interface{}{`"always"`, `"always"`}, BuildConfig{
Render: Always,
List: Always,
PublishResources: true,
set: true,
}},
{[]interface{}{`"never"`, `"never"`}, BuildConfig{
Render: Never,
List: Never,
PublishResources: true,
set: true,
}},
{[]interface{}{`"link"`, `"local"`}, BuildConfig{
Render: Link,
List: ListLocally,
PublishResources: true,
set: true,
}},
{[]interface{}{`"always"`, `"asdfadf"`}, BuildConfig{
Render: Always,
List: Always,
PublishResources: true,
set: true,
}},
} {
cfg, err := config.FromConfigString(fmt.Sprintf(configTempl, test.args...), "toml")
c.Assert(err, qt.IsNil)
bcfg, err := DecodeBuildConfig(cfg.Get("_build"))
c.Assert(err, qt.IsNil)
eq := qt.CmpEquals(hqt.DeepAllowUnexported(BuildConfig{}))
c.Assert(bcfg, eq, test.expect)
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
ghibliservice/wsgi.py
|
"""
WSGI config for ghibliservice project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ghibliservice.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/cmd/compile/internal/ssagen/ssa.go
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssagen
import (
"bufio"
"bytes"
"cmd/compile/internal/abi"
"fmt"
"go/constant"
"html"
"internal/buildcfg"
"os"
"path/filepath"
"sort"
"strings"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/liveness"
"cmd/compile/internal/objw"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssa"
"cmd/compile/internal/staticdata"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/x86"
"cmd/internal/objabi"
"cmd/internal/src"
"cmd/internal/sys"
)
var ssaConfig *ssa.Config
var ssaCaches []ssa.Cache
var ssaDump string // early copy of $GOSSAFUNC; the func name to dump output for
var ssaDir string // optional destination for ssa dump file
var ssaDumpStdout bool // whether to dump to stdout
var ssaDumpCFG string // generate CFGs for these phases
const ssaDumpFile = "ssa.html"
// ssaDumpInlined holds all inlined functions when ssaDump contains a function name.
var ssaDumpInlined []*ir.Func
func DumpInline(fn *ir.Func) {
if ssaDump != "" && ssaDump == ir.FuncName(fn) {
ssaDumpInlined = append(ssaDumpInlined, fn)
}
}
func InitEnv() {
ssaDump = os.Getenv("GOSSAFUNC")
ssaDir = os.Getenv("GOSSADIR")
if ssaDump != "" {
if strings.HasSuffix(ssaDump, "+") {
ssaDump = ssaDump[:len(ssaDump)-1]
ssaDumpStdout = true
}
spl := strings.Split(ssaDump, ":")
if len(spl) > 1 {
ssaDump = spl[0]
ssaDumpCFG = spl[1]
}
}
}
func InitConfig() {
types_ := ssa.NewTypes()
if Arch.SoftFloat {
softfloatInit()
}
// Generate a few pointer types that are uncommon in the frontend but common in the backend.
// Caching is disabled in the backend, so generating these here avoids allocations.
_ = types.NewPtr(types.Types[types.TINTER]) // *interface{}
_ = types.NewPtr(types.NewPtr(types.Types[types.TSTRING])) // **string
_ = types.NewPtr(types.NewSlice(types.Types[types.TINTER])) // *[]interface{}
_ = types.NewPtr(types.NewPtr(types.ByteType)) // **byte
_ = types.NewPtr(types.NewSlice(types.ByteType)) // *[]byte
_ = types.NewPtr(types.NewSlice(types.Types[types.TSTRING])) // *[]string
_ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[types.TUINT8]))) // ***uint8
_ = types.NewPtr(types.Types[types.TINT16]) // *int16
_ = types.NewPtr(types.Types[types.TINT64]) // *int64
_ = types.NewPtr(types.ErrorType) // *error
types.NewPtrCacheEnabled = false
ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0, Arch.SoftFloat)
ssaConfig.Race = base.Flag.Race
ssaCaches = make([]ssa.Cache, base.Flag.LowerC)
// Set up some runtime functions we'll need to call.
ir.Syms.AssertE2I = typecheck.LookupRuntimeFunc("assertE2I")
ir.Syms.AssertE2I2 = typecheck.LookupRuntimeFunc("assertE2I2")
ir.Syms.AssertI2I = typecheck.LookupRuntimeFunc("assertI2I")
ir.Syms.AssertI2I2 = typecheck.LookupRuntimeFunc("assertI2I2")
ir.Syms.Deferproc = typecheck.LookupRuntimeFunc("deferproc")
ir.Syms.DeferprocStack = typecheck.LookupRuntimeFunc("deferprocStack")
ir.Syms.Deferreturn = typecheck.LookupRuntimeFunc("deferreturn")
ir.Syms.Duffcopy = typecheck.LookupRuntimeFunc("duffcopy")
ir.Syms.Duffzero = typecheck.LookupRuntimeFunc("duffzero")
ir.Syms.GCWriteBarrier = typecheck.LookupRuntimeFunc("gcWriteBarrier")
ir.Syms.Goschedguarded = typecheck.LookupRuntimeFunc("goschedguarded")
ir.Syms.Growslice = typecheck.LookupRuntimeFunc("growslice")
ir.Syms.Msanread = typecheck.LookupRuntimeFunc("msanread")
ir.Syms.Msanwrite = typecheck.LookupRuntimeFunc("msanwrite")
ir.Syms.Msanmove = typecheck.LookupRuntimeFunc("msanmove")
ir.Syms.Newobject = typecheck.LookupRuntimeFunc("newobject")
ir.Syms.Newproc = typecheck.LookupRuntimeFunc("newproc")
ir.Syms.Panicdivide = typecheck.LookupRuntimeFunc("panicdivide")
ir.Syms.PanicdottypeE = typecheck.LookupRuntimeFunc("panicdottypeE")
ir.Syms.PanicdottypeI = typecheck.LookupRuntimeFunc("panicdottypeI")
ir.Syms.Panicnildottype = typecheck.LookupRuntimeFunc("panicnildottype")
ir.Syms.Panicoverflow = typecheck.LookupRuntimeFunc("panicoverflow")
ir.Syms.Panicshift = typecheck.LookupRuntimeFunc("panicshift")
ir.Syms.Raceread = typecheck.LookupRuntimeFunc("raceread")
ir.Syms.Racereadrange = typecheck.LookupRuntimeFunc("racereadrange")
ir.Syms.Racewrite = typecheck.LookupRuntimeFunc("racewrite")
ir.Syms.Racewriterange = typecheck.LookupRuntimeFunc("racewriterange")
ir.Syms.X86HasPOPCNT = typecheck.LookupRuntimeVar("x86HasPOPCNT") // bool
ir.Syms.X86HasSSE41 = typecheck.LookupRuntimeVar("x86HasSSE41") // bool
ir.Syms.X86HasFMA = typecheck.LookupRuntimeVar("x86HasFMA") // bool
ir.Syms.ARMHasVFPv4 = typecheck.LookupRuntimeVar("armHasVFPv4") // bool
ir.Syms.ARM64HasATOMICS = typecheck.LookupRuntimeVar("arm64HasATOMICS") // bool
ir.Syms.Staticuint64s = typecheck.LookupRuntimeVar("staticuint64s")
ir.Syms.Typedmemclr = typecheck.LookupRuntimeFunc("typedmemclr")
ir.Syms.Typedmemmove = typecheck.LookupRuntimeFunc("typedmemmove")
ir.Syms.Udiv = typecheck.LookupRuntimeVar("udiv") // asm func with special ABI
ir.Syms.WriteBarrier = typecheck.LookupRuntimeVar("writeBarrier") // struct { bool; ... }
ir.Syms.Zerobase = typecheck.LookupRuntimeVar("zerobase")
// asm funcs with special ABI
if base.Ctxt.Arch.Name == "amd64" {
GCWriteBarrierReg = map[int16]*obj.LSym{
x86.REG_AX: typecheck.LookupRuntimeFunc("gcWriteBarrier"),
x86.REG_CX: typecheck.LookupRuntimeFunc("gcWriteBarrierCX"),
x86.REG_DX: typecheck.LookupRuntimeFunc("gcWriteBarrierDX"),
x86.REG_BX: typecheck.LookupRuntimeFunc("gcWriteBarrierBX"),
x86.REG_BP: typecheck.LookupRuntimeFunc("gcWriteBarrierBP"),
x86.REG_SI: typecheck.LookupRuntimeFunc("gcWriteBarrierSI"),
x86.REG_R8: typecheck.LookupRuntimeFunc("gcWriteBarrierR8"),
x86.REG_R9: typecheck.LookupRuntimeFunc("gcWriteBarrierR9"),
}
}
if Arch.LinkArch.Family == sys.Wasm {
BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("goPanicIndex")
BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("goPanicIndexU")
BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("goPanicSliceAlen")
BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("goPanicSliceAlenU")
BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("goPanicSliceAcap")
BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("goPanicSliceAcapU")
BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("goPanicSliceB")
BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("goPanicSliceBU")
BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("goPanicSlice3Alen")
BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("goPanicSlice3AlenU")
BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("goPanicSlice3Acap")
BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("goPanicSlice3AcapU")
BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("goPanicSlice3B")
BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("goPanicSlice3BU")
BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("goPanicSlice3C")
BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("goPanicSlice3CU")
BoundsCheckFunc[ssa.BoundsConvert] = typecheck.LookupRuntimeFunc("goPanicSliceConvert")
} else {
BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("panicIndex")
BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("panicIndexU")
BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("panicSliceAlen")
BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("panicSliceAlenU")
BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("panicSliceAcap")
BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("panicSliceAcapU")
BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("panicSliceB")
BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("panicSliceBU")
BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("panicSlice3Alen")
BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("panicSlice3AlenU")
BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("panicSlice3Acap")
BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("panicSlice3AcapU")
BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("panicSlice3B")
BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("panicSlice3BU")
BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("panicSlice3C")
BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("panicSlice3CU")
BoundsCheckFunc[ssa.BoundsConvert] = typecheck.LookupRuntimeFunc("panicSliceConvert")
}
if Arch.LinkArch.PtrSize == 4 {
ExtendCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeVar("panicExtendIndex")
ExtendCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeVar("panicExtendIndexU")
ExtendCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeVar("panicExtendSliceAlen")
ExtendCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeVar("panicExtendSliceAlenU")
ExtendCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeVar("panicExtendSliceAcap")
ExtendCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeVar("panicExtendSliceAcapU")
ExtendCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeVar("panicExtendSliceB")
ExtendCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeVar("panicExtendSliceBU")
ExtendCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeVar("panicExtendSlice3Alen")
ExtendCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeVar("panicExtendSlice3AlenU")
ExtendCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeVar("panicExtendSlice3Acap")
ExtendCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeVar("panicExtendSlice3AcapU")
ExtendCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeVar("panicExtendSlice3B")
ExtendCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeVar("panicExtendSlice3BU")
ExtendCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeVar("panicExtendSlice3C")
ExtendCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeVar("panicExtendSlice3CU")
}
// Wasm (all asm funcs with special ABIs)
ir.Syms.WasmMove = typecheck.LookupRuntimeVar("wasmMove")
ir.Syms.WasmZero = typecheck.LookupRuntimeVar("wasmZero")
ir.Syms.WasmDiv = typecheck.LookupRuntimeVar("wasmDiv")
ir.Syms.WasmTruncS = typecheck.LookupRuntimeVar("wasmTruncS")
ir.Syms.WasmTruncU = typecheck.LookupRuntimeVar("wasmTruncU")
ir.Syms.SigPanic = typecheck.LookupRuntimeFunc("sigpanic")
}
// AbiForBodylessFuncStackMap returns the ABI for a bodyless function's stack map.
// This is not necessarily the ABI used to call it.
// Currently (1.17 dev) such a stack map is always ABI0;
// any ABI wrapper that is present is nosplit, hence a precise
// stack map is not needed there (the parameters survive only long
// enough to call the wrapped assembly function).
// This always returns a freshly copied ABI.
func AbiForBodylessFuncStackMap(fn *ir.Func) *abi.ABIConfig {
return ssaConfig.ABI0.Copy() // No idea what races will result, be safe
}
// These are disabled but remain ready for use in case they are needed for the next regabi port.
// TODO if they are not needed for 1.18 / next register abi port, delete them.
const magicNameDotSuffix = ".*disabled*MagicMethodNameForTestingRegisterABI"
const magicLastTypeName = "*disabled*MagicLastTypeNameForTestingRegisterABI"
// abiForFunc implements ABI policy for a function, but does not return a copy of the ABI.
// Passing a nil function returns the default ABI based on experiment configuration.
func abiForFunc(fn *ir.Func, abi0, abi1 *abi.ABIConfig) *abi.ABIConfig {
if buildcfg.Experiment.RegabiArgs {
// Select the ABI based on the function's defining ABI.
if fn == nil {
return abi1
}
switch fn.ABI {
case obj.ABI0:
return abi0
case obj.ABIInternal:
// TODO(austin): Clean up the nomenclature here.
// It's not clear that "abi1" is ABIInternal.
return abi1
}
base.Fatalf("function %v has unknown ABI %v", fn, fn.ABI)
panic("not reachable")
}
a := abi0
if fn != nil {
name := ir.FuncName(fn)
magicName := strings.HasSuffix(name, magicNameDotSuffix)
if fn.Pragma&ir.RegisterParams != 0 { // TODO(register args) remove after register abi is working
if strings.Contains(name, ".") {
if !magicName {
base.ErrorfAt(fn.Pos(), "Calls to //go:registerparams method %s won't work, remove the pragma from the declaration.", name)
}
}
a = abi1
} else if magicName {
if base.FmtPos(fn.Pos()) == "<autogenerated>:1" {
// no way to put a pragma here, and it will error out in the real source code if they did not do it there.
a = abi1
} else {
base.ErrorfAt(fn.Pos(), "Methods with magic name %s (method %s) must also specify //go:registerparams", magicNameDotSuffix[1:], name)
}
}
if regAbiForFuncType(fn.Type().FuncType()) {
// fmt.Printf("Saw magic last type name for function %s\n", name)
a = abi1
}
}
return a
}
func regAbiForFuncType(ft *types.Func) bool {
np := ft.Params.NumFields()
return np > 0 && strings.Contains(ft.Params.FieldType(np-1).String(), magicLastTypeName)
}
// dvarint writes a varint v to the funcdata in symbol x and returns the new offset
func dvarint(x *obj.LSym, off int, v int64) int {
if v < 0 || v > 1e9 {
panic(fmt.Sprintf("dvarint: bad offset for funcdata - %v", v))
}
if v < 1<<7 {
return objw.Uint8(x, off, uint8(v))
}
off = objw.Uint8(x, off, uint8((v&127)|128))
if v < 1<<14 {
return objw.Uint8(x, off, uint8(v>>7))
}
off = objw.Uint8(x, off, uint8(((v>>7)&127)|128))
if v < 1<<21 {
return objw.Uint8(x, off, uint8(v>>14))
}
off = objw.Uint8(x, off, uint8(((v>>14)&127)|128))
if v < 1<<28 {
return objw.Uint8(x, off, uint8(v>>21))
}
off = objw.Uint8(x, off, uint8(((v>>21)&127)|128))
return objw.Uint8(x, off, uint8(v>>28))
}
// emitOpenDeferInfo emits FUNCDATA information about the defers in a function
// that is using open-coded defers. This funcdata is used to determine the active
// defers in a function and execute those defers during panic processing.
//
// The funcdata is all encoded in varints (since values will almost always be less than
// 128, but stack offsets could potentially be up to 2Gbyte). All "locations" (offsets)
// for stack variables are specified as the number of bytes below varp (pointer to the
// top of the local variables) for their starting address. The format is:
//
// - Offset of the deferBits variable
// - Number of defers in the function
// - Information about each defer call, in reverse order of appearance in the function:
// - Offset of the closure value to call
func (s *state) emitOpenDeferInfo() {
x := base.Ctxt.Lookup(s.curfn.LSym.Name + ".opendefer")
s.curfn.LSym.Func().OpenCodedDeferInfo = x
off := 0
off = dvarint(x, off, -s.deferBitsTemp.FrameOffset())
off = dvarint(x, off, int64(len(s.openDefers)))
// Write in reverse-order, for ease of running in that order at runtime
for i := len(s.openDefers) - 1; i >= 0; i-- {
r := s.openDefers[i]
off = dvarint(x, off, -r.closureNode.FrameOffset())
}
}
func okOffset(offset int64) int64 {
if offset == types.BOGUS_FUNARG_OFFSET {
panic(fmt.Errorf("Bogus offset %d", offset))
}
return offset
}
// buildssa builds an SSA function for fn.
// worker indicates which of the backend workers is doing the processing.
func buildssa(fn *ir.Func, worker int) *ssa.Func {
name := ir.FuncName(fn)
printssa := false
if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", package.name e.g. "compress/gzip.(*Reader).Reset", or subpackage name "gzip.(*Reader).Reset"
pkgDotName := base.Ctxt.Pkgpath + "." + name
printssa = name == ssaDump ||
strings.HasSuffix(pkgDotName, ssaDump) && (pkgDotName == ssaDump || strings.HasSuffix(pkgDotName, "/"+ssaDump))
}
var astBuf *bytes.Buffer
if printssa {
astBuf = &bytes.Buffer{}
ir.FDumpList(astBuf, "buildssa-enter", fn.Enter)
ir.FDumpList(astBuf, "buildssa-body", fn.Body)
ir.FDumpList(astBuf, "buildssa-exit", fn.Exit)
if ssaDumpStdout {
fmt.Println("generating SSA for", name)
fmt.Print(astBuf.String())
}
}
var s state
s.pushLine(fn.Pos())
defer s.popLine()
s.hasdefer = fn.HasDefer()
if fn.Pragma&ir.CgoUnsafeArgs != 0 {
s.cgoUnsafeArgs = true
}
fe := ssafn{
curfn: fn,
log: printssa && ssaDumpStdout,
}
s.curfn = fn
s.f = ssa.NewFunc(&fe)
s.config = ssaConfig
s.f.Type = fn.Type()
s.f.Config = ssaConfig
s.f.Cache = &ssaCaches[worker]
s.f.Cache.Reset()
s.f.Name = name
s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH")
s.f.PrintOrHtmlSSA = printssa
if fn.Pragma&ir.Nosplit != 0 {
s.f.NoSplit = true
}
s.f.ABI0 = ssaConfig.ABI0.Copy() // Make a copy to avoid racy map operations in type-register-width cache.
s.f.ABI1 = ssaConfig.ABI1.Copy()
s.f.ABIDefault = abiForFunc(nil, s.f.ABI0, s.f.ABI1)
s.f.ABISelf = abiForFunc(fn, s.f.ABI0, s.f.ABI1)
s.panics = map[funcLine]*ssa.Block{}
s.softFloat = s.config.SoftFloat
// Allocate starting block
s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
s.f.Entry.Pos = fn.Pos()
if printssa {
ssaDF := ssaDumpFile
if ssaDir != "" {
ssaDF = filepath.Join(ssaDir, base.Ctxt.Pkgpath+"."+name+".html")
ssaD := filepath.Dir(ssaDF)
os.MkdirAll(ssaD, 0755)
}
s.f.HTMLWriter = ssa.NewHTMLWriter(ssaDF, s.f, ssaDumpCFG)
// TODO: generate and print a mapping from nodes to values and blocks
dumpSourcesColumn(s.f.HTMLWriter, fn)
s.f.HTMLWriter.WriteAST("AST", astBuf)
}
// Allocate starting values
s.labels = map[string]*ssaLabel{}
s.fwdVars = map[ir.Node]*ssa.Value{}
s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.OpenCodedDeferDisallowed()
switch {
case base.Debug.NoOpenDefer != 0:
s.hasOpenDefers = false
case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && base.Ctxt.Arch.Name == "386":
// Don't support open-coded defers for 386 ONLY when using shared
// libraries, because there is extra code (added by rewriteToUseGot())
// preceding the deferreturn/ret code that we don't track correctly.
s.hasOpenDefers = false
}
if s.hasOpenDefers && len(s.curfn.Exit) > 0 {
// Skip doing open defers if there is any extra exit code (likely
// race detection), since we will not generate that code in the
// case of the extra deferreturn/ret segment.
s.hasOpenDefers = false
}
if s.hasOpenDefers {
// Similarly, skip if there are any heap-allocated result
// parameters that need to be copied back to their stack slots.
for _, f := range s.curfn.Type().Results().FieldSlice() {
if !f.Nname.(*ir.Name).OnStack() {
s.hasOpenDefers = false
break
}
}
}
if s.hasOpenDefers &&
s.curfn.NumReturns*s.curfn.NumDefers > 15 {
// Since we are generating defer calls at every exit for
// open-coded defers, skip doing open-coded defers if there are
// too many returns (especially if there are multiple defers).
// Open-coded defers are most important for improving performance
// for smaller functions (which don't have many returns).
s.hasOpenDefers = false
}
s.sp = s.entryNewValue0(ssa.OpSP, types.Types[types.TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
s.sb = s.entryNewValue0(ssa.OpSB, types.Types[types.TUINTPTR])
s.startBlock(s.f.Entry)
s.vars[memVar] = s.startmem
if s.hasOpenDefers {
// Create the deferBits variable and stack slot. deferBits is a
// bitmask showing which of the open-coded defers in this function
// have been activated.
deferBitsTemp := typecheck.TempAt(src.NoXPos, s.curfn, types.Types[types.TUINT8])
deferBitsTemp.SetAddrtaken(true)
s.deferBitsTemp = deferBitsTemp
// For this value, AuxInt is initialized to zero by default
startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[types.TUINT8])
s.vars[deferBitsVar] = startDeferBits
s.deferBitsAddr = s.addr(deferBitsTemp)
s.store(types.Types[types.TUINT8], s.deferBitsAddr, startDeferBits)
// Make sure that the deferBits stack slot is kept alive (for use
// by panics) and stores to deferBits are not eliminated, even if
// all checking code on deferBits in the function exit can be
// eliminated, because the defer statements were all
// unconditional.
s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false)
}
var params *abi.ABIParamResultInfo
params = s.f.ABISelf.ABIAnalyze(fn.Type(), true)
// Generate addresses of local declarations
s.decladdrs = map[*ir.Name]*ssa.Value{}
for _, n := range fn.Dcl {
switch n.Class {
case ir.PPARAM:
// Be aware that blank and unnamed input parameters will not appear here, but do appear in the type
s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
case ir.PPARAMOUT:
s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
case ir.PAUTO:
// processed at each use, to prevent Addr coming
// before the decl.
default:
s.Fatalf("local variable with class %v unimplemented", n.Class)
}
}
s.f.OwnAux = ssa.OwnAuxCall(fn.LSym, params)
// Populate SSAable arguments.
for _, n := range fn.Dcl {
if n.Class == ir.PPARAM {
if s.canSSA(n) {
v := s.newValue0A(ssa.OpArg, n.Type(), n)
s.vars[n] = v
s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself.
} else { // address was taken AND/OR too large for SSA
paramAssignment := ssa.ParamAssignmentForArgName(s.f, n)
if len(paramAssignment.Registers) > 0 {
if TypeOK(n.Type()) { // SSA-able type, so address was taken -- receive value in OpArg, DO NOT bind to var, store immediately to memory.
v := s.newValue0A(ssa.OpArg, n.Type(), n)
s.store(n.Type(), s.decladdrs[n], v)
} else { // Too big for SSA.
// Brute force, and early, do a bunch of stores from registers
// TODO fix the nasty storeArgOrLoad recursion in ssa/expand_calls.go so this Just Works with store of a big Arg.
s.storeParameterRegsToStack(s.f.ABISelf, paramAssignment, n, s.decladdrs[n], false)
}
}
}
}
}
// Populate closure variables.
if fn.Needctxt() {
clo := s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)
offset := int64(types.PtrSize) // PtrSize to skip past function entry PC field
for _, n := range fn.ClosureVars {
typ := n.Type()
if !n.Byval() {
typ = types.NewPtr(typ)
}
offset = types.Rnd(offset, typ.Alignment())
ptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(typ), offset, clo)
offset += typ.Size()
// If n is a small variable captured by value, promote
// it to PAUTO so it can be converted to SSA.
//
// Note: While we never capture a variable by value if
// the user took its address, we may have generated
// runtime calls that did (#43701). Since we don't
// convert Addrtaken variables to SSA anyway, no point
// in promoting them either.
if n.Byval() && !n.Addrtaken() && TypeOK(n.Type()) {
n.Class = ir.PAUTO
fn.Dcl = append(fn.Dcl, n)
s.assign(n, s.load(n.Type(), ptr), false, 0)
continue
}
if !n.Byval() {
ptr = s.load(typ, ptr)
}
s.setHeapaddr(fn.Pos(), n, ptr)
}
}
// Convert the AST-based IR to the SSA-based IR
s.stmtList(fn.Enter)
s.zeroResults()
s.paramsToHeap()
s.stmtList(fn.Body)
// fallthrough to exit
if s.curBlock != nil {
s.pushLine(fn.Endlineno)
s.exit()
s.popLine()
}
for _, b := range s.f.Blocks {
if b.Pos != src.NoXPos {
s.updateUnsetPredPos(b)
}
}
s.f.HTMLWriter.WritePhase("before insert phis", "before insert phis")
s.insertPhis()
// Main call to ssa package to compile function
ssa.Compile(s.f)
if s.hasOpenDefers {
s.emitOpenDeferInfo()
}
// Record incoming parameter spill information for morestack calls emitted in the assembler.
// This is done here, using all the parameters (used, partially used, and unused) because
// it mimics the behavior of the former ABI (everything stored) and because it's not 100%
// clear if naming conventions are respected in autogenerated code.
// TODO figure out exactly what's unused, don't spill it. Make liveness fine-grained, also.
for _, p := range params.InParams() {
typs, offs := p.RegisterTypesAndOffsets()
for i, t := range typs {
o := offs[i] // offset within parameter
fo := p.FrameOffset(params) // offset of parameter in frame
reg := ssa.ObjRegForAbiReg(p.Registers[i], s.f.Config)
s.f.RegArgs = append(s.f.RegArgs, ssa.Spill{Reg: reg, Offset: fo + o, Type: t})
}
}
return s.f
}
func (s *state) storeParameterRegsToStack(abi *abi.ABIConfig, paramAssignment *abi.ABIParamAssignment, n *ir.Name, addr *ssa.Value, pointersOnly bool) {
typs, offs := paramAssignment.RegisterTypesAndOffsets()
for i, t := range typs {
if pointersOnly && !t.IsPtrShaped() {
continue
}
r := paramAssignment.Registers[i]
o := offs[i]
op, reg := ssa.ArgOpAndRegisterFor(r, abi)
aux := &ssa.AuxNameOffset{Name: n, Offset: o}
v := s.newValue0I(op, t, reg)
v.Aux = aux
p := s.newValue1I(ssa.OpOffPtr, types.NewPtr(t), o, addr)
s.store(t, p, v)
}
}
// zeroResults zeros the return values at the start of the function.
// We need to do this very early in the function. Defer might stop a
// panic and show the return values as they exist at the time of
// panic. For precise stacks, the garbage collector assumes results
// are always live, so we need to zero them before any allocations,
// even allocations to move params/results to the heap.
func (s *state) zeroResults() {
for _, f := range s.curfn.Type().Results().FieldSlice() {
n := f.Nname.(*ir.Name)
if !n.OnStack() {
// The local which points to the return value is the
// thing that needs zeroing. This is already handled
// by a Needzero annotation in plive.go:(*liveness).epilogue.
continue
}
// Zero the stack location containing f.
if typ := n.Type(); TypeOK(typ) {
s.assign(n, s.zeroVal(typ), false, 0)
} else {
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
s.zero(n.Type(), s.decladdrs[n])
}
}
}
// paramsToHeap produces code to allocate memory for heap-escaped parameters
// and to copy non-result parameters' values from the stack.
func (s *state) paramsToHeap() {
do := func(params *types.Type) {
for _, f := range params.FieldSlice() {
if f.Nname == nil {
continue // anonymous or blank parameter
}
n := f.Nname.(*ir.Name)
if ir.IsBlank(n) || n.OnStack() {
continue
}
s.newHeapaddr(n)
if n.Class == ir.PPARAM {
s.move(n.Type(), s.expr(n.Heapaddr), s.decladdrs[n])
}
}
}
typ := s.curfn.Type()
do(typ.Recvs())
do(typ.Params())
do(typ.Results())
}
// newHeapaddr allocates heap memory for n and sets its heap address.
func (s *state) newHeapaddr(n *ir.Name) {
s.setHeapaddr(n.Pos(), n, s.newObject(n.Type()))
}
// setHeapaddr allocates a new PAUTO variable to store ptr (which must be non-nil)
// and then sets it as n's heap address.
func (s *state) setHeapaddr(pos src.XPos, n *ir.Name, ptr *ssa.Value) {
if !ptr.Type.IsPtr() || !types.Identical(n.Type(), ptr.Type.Elem()) {
base.FatalfAt(n.Pos(), "setHeapaddr %L with type %v", n, ptr.Type)
}
// Declare variable to hold address.
addr := ir.NewNameAt(pos, &types.Sym{Name: "&" + n.Sym().Name, Pkg: types.LocalPkg})
addr.SetType(types.NewPtr(n.Type()))
addr.Class = ir.PAUTO
addr.SetUsed(true)
addr.Curfn = s.curfn
s.curfn.Dcl = append(s.curfn.Dcl, addr)
types.CalcSize(addr.Type())
if n.Class == ir.PPARAMOUT {
addr.SetIsOutputParamHeapAddr(true)
}
n.Heapaddr = addr
s.assign(addr, ptr, false, 0)
}
// newObject returns an SSA value denoting new(typ).
func (s *state) newObject(typ *types.Type) *ssa.Value {
if typ.Size() == 0 {
return s.newValue1A(ssa.OpAddr, types.NewPtr(typ), ir.Syms.Zerobase, s.sb)
}
return s.rtcall(ir.Syms.Newobject, true, []*types.Type{types.NewPtr(typ)}, s.reflectType(typ))[0]
}
// reflectType returns an SSA value representing a pointer to typ's
// reflection type descriptor.
func (s *state) reflectType(typ *types.Type) *ssa.Value {
lsym := reflectdata.TypeLinksym(typ)
return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(types.Types[types.TUINT8]), lsym, s.sb)
}
func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *ir.Func) {
// Read sources of target function fn.
fname := base.Ctxt.PosTable.Pos(fn.Pos()).Filename()
targetFn, err := readFuncLines(fname, fn.Pos().Line(), fn.Endlineno.Line())
if err != nil {
writer.Logf("cannot read sources for function %v: %v", fn, err)
}
// Read sources of inlined functions.
var inlFns []*ssa.FuncLines
for _, fi := range ssaDumpInlined {
elno := fi.Endlineno
fname := base.Ctxt.PosTable.Pos(fi.Pos()).Filename()
fnLines, err := readFuncLines(fname, fi.Pos().Line(), elno.Line())
if err != nil {
writer.Logf("cannot read sources for inlined function %v: %v", fi, err)
continue
}
inlFns = append(inlFns, fnLines)
}
sort.Sort(ssa.ByTopo(inlFns))
if targetFn != nil {
inlFns = append([]*ssa.FuncLines{targetFn}, inlFns...)
}
writer.WriteSources("sources", inlFns)
}
func readFuncLines(file string, start, end uint) (*ssa.FuncLines, error) {
f, err := os.Open(os.ExpandEnv(file))
if err != nil {
return nil, err
}
defer f.Close()
var lines []string
ln := uint(1)
scanner := bufio.NewScanner(f)
for scanner.Scan() && ln <= end {
if ln >= start {
lines = append(lines, scanner.Text())
}
ln++
}
return &ssa.FuncLines{Filename: file, StartLineno: start, Lines: lines}, nil
}
// updateUnsetPredPos propagates the earliest-value position information for b
// towards all of b's predecessors that need a position, and recurs on that
// predecessor if its position is updated. B should have a non-empty position.
func (s *state) updateUnsetPredPos(b *ssa.Block) {
if b.Pos == src.NoXPos {
s.Fatalf("Block %s should have a position", b)
}
bestPos := src.NoXPos
for _, e := range b.Preds {
p := e.Block()
if !p.LackingPos() {
continue
}
if bestPos == src.NoXPos {
bestPos = b.Pos
for _, v := range b.Values {
if v.LackingPos() {
continue
}
if v.Pos != src.NoXPos {
// Assume values are still in roughly textual order;
// TODO: could also seek minimum position?
bestPos = v.Pos
break
}
}
}
p.Pos = bestPos
s.updateUnsetPredPos(p) // We do not expect long chains of these, thus recursion is okay.
}
}
// Information about each open-coded defer.
type openDeferInfo struct {
// The node representing the call of the defer
n *ir.CallExpr
// If defer call is closure call, the address of the argtmp where the
// closure is stored.
closure *ssa.Value
// The node representing the argtmp where the closure is stored - used for
// function, method, or interface call, to store a closure that panic
// processing can use for this defer.
closureNode *ir.Name
}
type state struct {
// configuration (arch) information
config *ssa.Config
// function we're building
f *ssa.Func
// Node for function
curfn *ir.Func
// labels in f
labels map[string]*ssaLabel
// unlabeled break and continue statement tracking
breakTo *ssa.Block // current target for plain break statement
continueTo *ssa.Block // current target for plain continue statement
// current location where we're interpreting the AST
curBlock *ssa.Block
// variable assignments in the current block (map from variable symbol to ssa value)
// *Node is the unique identifier (an ONAME Node) for the variable.
// TODO: keep a single varnum map, then make all of these maps slices instead?
vars map[ir.Node]*ssa.Value
// fwdVars are variables that are used before they are defined in the current block.
// This map exists just to coalesce multiple references into a single FwdRef op.
// *Node is the unique identifier (an ONAME Node) for the variable.
fwdVars map[ir.Node]*ssa.Value
// all defined variables at the end of each block. Indexed by block ID.
defvars []map[ir.Node]*ssa.Value
// addresses of PPARAM and PPARAMOUT variables on the stack.
decladdrs map[*ir.Name]*ssa.Value
// starting values. Memory, stack pointer, and globals pointer
startmem *ssa.Value
sp *ssa.Value
sb *ssa.Value
// value representing address of where deferBits autotmp is stored
deferBitsAddr *ssa.Value
deferBitsTemp *ir.Name
// line number stack. The current line number is top of stack
line []src.XPos
// the last line number processed; it may have been popped
lastPos src.XPos
// list of panic calls by function name and line number.
// Used to deduplicate panic calls.
panics map[funcLine]*ssa.Block
cgoUnsafeArgs bool
hasdefer bool // whether the function contains a defer statement
softFloat bool
hasOpenDefers bool // whether we are doing open-coded defers
// If doing open-coded defers, list of info about the defer calls in
// scanning order. Hence, at exit we should run these defers in reverse
// order of this list
openDefers []*openDeferInfo
// For open-coded defers, this is the beginning and end blocks of the last
// defer exit code that we have generated so far. We use these to share
// code between exits if the shareDeferExits option (disabled by default)
// is on.
lastDeferExit *ssa.Block // Entry block of last defer exit code we generated
lastDeferFinalBlock *ssa.Block // Final block of last defer exit code we generated
lastDeferCount int // Number of defers encountered at that point
prevCall *ssa.Value // the previous call; use this to tie results to the call op.
}
type funcLine struct {
f *obj.LSym
base *src.PosBase
line uint
}
type ssaLabel struct {
target *ssa.Block // block identified by this label
breakTarget *ssa.Block // block to break to in control flow node identified by this label
continueTarget *ssa.Block // block to continue to in control flow node identified by this label
}
// label returns the label associated with sym, creating it if necessary.
func (s *state) label(sym *types.Sym) *ssaLabel {
lab := s.labels[sym.Name]
if lab == nil {
lab = new(ssaLabel)
s.labels[sym.Name] = lab
}
return lab
}
func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) }
func (s *state) Log() bool { return s.f.Log() }
func (s *state) Fatalf(msg string, args ...interface{}) {
s.f.Frontend().Fatalf(s.peekPos(), msg, args...)
}
func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) }
func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() }
func ssaMarker(name string) *ir.Name {
return typecheck.NewName(&types.Sym{Name: name})
}
var (
// marker node for the memory variable
memVar = ssaMarker("mem")
// marker nodes for temporary variables
ptrVar = ssaMarker("ptr")
lenVar = ssaMarker("len")
newlenVar = ssaMarker("newlen")
capVar = ssaMarker("cap")
typVar = ssaMarker("typ")
okVar = ssaMarker("ok")
deferBitsVar = ssaMarker("deferBits")
)
// startBlock sets the current block we're generating code in to b.
func (s *state) startBlock(b *ssa.Block) {
if s.curBlock != nil {
s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
}
s.curBlock = b
s.vars = map[ir.Node]*ssa.Value{}
for n := range s.fwdVars {
delete(s.fwdVars, n)
}
}
// endBlock marks the end of generating code for the current block.
// Returns the (former) current block. Returns nil if there is no current
// block, i.e. if no code flows to the current execution point.
func (s *state) endBlock() *ssa.Block {
b := s.curBlock
if b == nil {
return nil
}
for len(s.defvars) <= int(b.ID) {
s.defvars = append(s.defvars, nil)
}
s.defvars[b.ID] = s.vars
s.curBlock = nil
s.vars = nil
if b.LackingPos() {
// Empty plain blocks get the line of their successor (handled after all blocks created),
// except for increment blocks in For statements (handled in ssa conversion of OFOR),
// and for blocks ending in GOTO/BREAK/CONTINUE.
b.Pos = src.NoXPos
} else {
b.Pos = s.lastPos
}
return b
}
// pushLine pushes a line number on the line number stack.
func (s *state) pushLine(line src.XPos) {
if !line.IsKnown() {
// the frontend may emit node with line number missing,
// use the parent line number in this case.
line = s.peekPos()
if base.Flag.K != 0 {
base.Warn("buildssa: unknown position (line 0)")
}
} else {
s.lastPos = line
}
s.line = append(s.line, line)
}
// popLine pops the top of the line number stack.
func (s *state) popLine() {
s.line = s.line[:len(s.line)-1]
}
// peekPos peeks the top of the line number stack.
func (s *state) peekPos() src.XPos {
return s.line[len(s.line)-1]
}
// newValue0 adds a new value with no arguments to the current block.
func (s *state) newValue0(op ssa.Op, t *types.Type) *ssa.Value {
return s.curBlock.NewValue0(s.peekPos(), op, t)
}
// newValue0A adds a new value with no arguments and an aux value to the current block.
func (s *state) newValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value {
return s.curBlock.NewValue0A(s.peekPos(), op, t, aux)
}
// newValue0I adds a new value with no arguments and an auxint value to the current block.
func (s *state) newValue0I(op ssa.Op, t *types.Type, auxint int64) *ssa.Value {
return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint)
}
// newValue1 adds a new value with one argument to the current block.
func (s *state) newValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1(s.peekPos(), op, t, arg)
}
// newValue1A adds a new value with one argument and an aux value to the current block.
func (s *state) newValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
}
// newValue1Apos adds a new value with one argument and an aux value to the current block.
// isStmt determines whether the created values may be a statement or not
// (i.e., false means never, yes means maybe).
func (s *state) newValue1Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value, isStmt bool) *ssa.Value {
if isStmt {
return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
}
return s.curBlock.NewValue1A(s.peekPos().WithNotStmt(), op, t, aux, arg)
}
// newValue1I adds a new value with one argument and an auxint value to the current block.
func (s *state) newValue1I(op ssa.Op, t *types.Type, aux int64, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg)
}
// newValue2 adds a new value with two arguments to the current block.
func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1)
}
// newValue2A adds a new value with two arguments and an aux value to the current block.
func (s *state) newValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
}
// newValue2Apos adds a new value with two arguments and an aux value to the current block.
// isStmt determines whether the created values may be a statement or not
// (i.e., false means never, yes means maybe).
func (s *state) newValue2Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value, isStmt bool) *ssa.Value {
if isStmt {
return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
}
return s.curBlock.NewValue2A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1)
}
// newValue2I adds a new value with two arguments and an auxint value to the current block.
func (s *state) newValue2I(op ssa.Op, t *types.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1)
}
// newValue3 adds a new value with three arguments to the current block.
func (s *state) newValue3(op ssa.Op, t *types.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2)
}
// newValue3I adds a new value with three arguments and an auxint value to the current block.
func (s *state) newValue3I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2)
}
// newValue3A adds a new value with three arguments and an aux value to the current block.
func (s *state) newValue3A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
}
// newValue3Apos adds a new value with three arguments and an aux value to the current block.
// isStmt determines whether the created values may be a statement or not
// (i.e., false means never, yes means maybe).
func (s *state) newValue3Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2 *ssa.Value, isStmt bool) *ssa.Value {
if isStmt {
return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
}
return s.curBlock.NewValue3A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1, arg2)
}
// newValue4 adds a new value with four arguments to the current block.
func (s *state) newValue4(op ssa.Op, t *types.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3)
}
// newValue4 adds a new value with four arguments and an auxint value to the current block.
func (s *state) newValue4I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue4I(s.peekPos(), op, t, aux, arg0, arg1, arg2, arg3)
}
func (s *state) entryBlock() *ssa.Block {
b := s.f.Entry
if base.Flag.N > 0 && s.curBlock != nil {
// If optimizations are off, allocate in current block instead. Since with -N
// we're not doing the CSE or tighten passes, putting lots of stuff in the
// entry block leads to O(n^2) entries in the live value map during regalloc.
// See issue 45897.
b = s.curBlock
}
return b
}
// entryNewValue0 adds a new value with no arguments to the entry block.
func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value {
return s.entryBlock().NewValue0(src.NoXPos, op, t)
}
// entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value {
return s.entryBlock().NewValue0A(src.NoXPos, op, t, aux)
}
// entryNewValue1 adds a new value with one argument to the entry block.
func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
return s.entryBlock().NewValue1(src.NoXPos, op, t, arg)
}
// entryNewValue1 adds a new value with one argument and an auxint value to the entry block.
func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value {
return s.entryBlock().NewValue1I(src.NoXPos, op, t, auxint, arg)
}
// entryNewValue1A adds a new value with one argument and an aux value to the entry block.
func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value {
return s.entryBlock().NewValue1A(src.NoXPos, op, t, aux, arg)
}
// entryNewValue2 adds a new value with two arguments to the entry block.
func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
return s.entryBlock().NewValue2(src.NoXPos, op, t, arg0, arg1)
}
// entryNewValue2A adds a new value with two arguments and an aux value to the entry block.
func (s *state) entryNewValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value {
return s.entryBlock().NewValue2A(src.NoXPos, op, t, aux, arg0, arg1)
}
// const* routines add a new const value to the entry block.
func (s *state) constSlice(t *types.Type) *ssa.Value {
return s.f.ConstSlice(t)
}
func (s *state) constInterface(t *types.Type) *ssa.Value {
return s.f.ConstInterface(t)
}
func (s *state) constNil(t *types.Type) *ssa.Value { return s.f.ConstNil(t) }
func (s *state) constEmptyString(t *types.Type) *ssa.Value {
return s.f.ConstEmptyString(t)
}
func (s *state) constBool(c bool) *ssa.Value {
return s.f.ConstBool(types.Types[types.TBOOL], c)
}
func (s *state) constInt8(t *types.Type, c int8) *ssa.Value {
return s.f.ConstInt8(t, c)
}
func (s *state) constInt16(t *types.Type, c int16) *ssa.Value {
return s.f.ConstInt16(t, c)
}
func (s *state) constInt32(t *types.Type, c int32) *ssa.Value {
return s.f.ConstInt32(t, c)
}
func (s *state) constInt64(t *types.Type, c int64) *ssa.Value {
return s.f.ConstInt64(t, c)
}
func (s *state) constFloat32(t *types.Type, c float64) *ssa.Value {
return s.f.ConstFloat32(t, c)
}
func (s *state) constFloat64(t *types.Type, c float64) *ssa.Value {
return s.f.ConstFloat64(t, c)
}
func (s *state) constInt(t *types.Type, c int64) *ssa.Value {
if s.config.PtrSize == 8 {
return s.constInt64(t, c)
}
if int64(int32(c)) != c {
s.Fatalf("integer constant too big %d", c)
}
return s.constInt32(t, int32(c))
}
func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value {
return s.f.ConstOffPtrSP(t, c, s.sp)
}
// newValueOrSfCall* are wrappers around newValue*, which may create a call to a
// soft-float runtime function instead (when emitting soft-float code).
func (s *state) newValueOrSfCall1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
if s.softFloat {
if c, ok := s.sfcall(op, arg); ok {
return c
}
}
return s.newValue1(op, t, arg)
}
func (s *state) newValueOrSfCall2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
if s.softFloat {
if c, ok := s.sfcall(op, arg0, arg1); ok {
return c
}
}
return s.newValue2(op, t, arg0, arg1)
}
type instrumentKind uint8
const (
instrumentRead = iota
instrumentWrite
instrumentMove
)
func (s *state) instrument(t *types.Type, addr *ssa.Value, kind instrumentKind) {
s.instrument2(t, addr, nil, kind)
}
// instrumentFields instruments a read/write operation on addr.
// If it is instrumenting for MSAN and t is a struct type, it instruments
// operation for each field, instead of for the whole struct.
func (s *state) instrumentFields(t *types.Type, addr *ssa.Value, kind instrumentKind) {
if !base.Flag.MSan || !t.IsStruct() {
s.instrument(t, addr, kind)
return
}
for _, f := range t.Fields().Slice() {
if f.Sym.IsBlank() {
continue
}
offptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(f.Type), f.Offset, addr)
s.instrumentFields(f.Type, offptr, kind)
}
}
func (s *state) instrumentMove(t *types.Type, dst, src *ssa.Value) {
if base.Flag.MSan {
s.instrument2(t, dst, src, instrumentMove)
} else {
s.instrument(t, src, instrumentRead)
s.instrument(t, dst, instrumentWrite)
}
}
func (s *state) instrument2(t *types.Type, addr, addr2 *ssa.Value, kind instrumentKind) {
if !s.curfn.InstrumentBody() {
return
}
w := t.Size()
if w == 0 {
return // can't race on zero-sized things
}
if ssa.IsSanitizerSafeAddr(addr) {
return
}
var fn *obj.LSym
needWidth := false
if addr2 != nil && kind != instrumentMove {
panic("instrument2: non-nil addr2 for non-move instrumentation")
}
if base.Flag.MSan {
switch kind {
case instrumentRead:
fn = ir.Syms.Msanread
case instrumentWrite:
fn = ir.Syms.Msanwrite
case instrumentMove:
fn = ir.Syms.Msanmove
default:
panic("unreachable")
}
needWidth = true
} else if base.Flag.Race && t.NumComponents(types.CountBlankFields) > 1 {
// for composite objects we have to write every address
// because a write might happen to any subobject.
// composites with only one element don't have subobjects, though.
switch kind {
case instrumentRead:
fn = ir.Syms.Racereadrange
case instrumentWrite:
fn = ir.Syms.Racewriterange
default:
panic("unreachable")
}
needWidth = true
} else if base.Flag.Race {
// for non-composite objects we can write just the start
// address, as any write must write the first byte.
switch kind {
case instrumentRead:
fn = ir.Syms.Raceread
case instrumentWrite:
fn = ir.Syms.Racewrite
default:
panic("unreachable")
}
} else {
panic("unreachable")
}
args := []*ssa.Value{addr}
if addr2 != nil {
args = append(args, addr2)
}
if needWidth {
args = append(args, s.constInt(types.Types[types.TUINTPTR], w))
}
s.rtcall(fn, true, nil, args...)
}
func (s *state) load(t *types.Type, src *ssa.Value) *ssa.Value {
s.instrumentFields(t, src, instrumentRead)
return s.rawLoad(t, src)
}
func (s *state) rawLoad(t *types.Type, src *ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpLoad, t, src, s.mem())
}
func (s *state) store(t *types.Type, dst, val *ssa.Value) {
s.vars[memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, dst, val, s.mem())
}
func (s *state) zero(t *types.Type, dst *ssa.Value) {
s.instrument(t, dst, instrumentWrite)
store := s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), dst, s.mem())
store.Aux = t
s.vars[memVar] = store
}
func (s *state) move(t *types.Type, dst, src *ssa.Value) {
s.instrumentMove(t, dst, src)
store := s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), dst, src, s.mem())
store.Aux = t
s.vars[memVar] = store
}
// stmtList converts the statement list n to SSA and adds it to s.
func (s *state) stmtList(l ir.Nodes) {
for _, n := range l {
s.stmt(n)
}
}
// stmt converts the statement n to SSA and adds it to s.
func (s *state) stmt(n ir.Node) {
if !(n.Op() == ir.OVARKILL || n.Op() == ir.OVARLIVE || n.Op() == ir.OVARDEF) {
// OVARKILL, OVARLIVE, and OVARDEF are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging.
s.pushLine(n.Pos())
defer s.popLine()
}
// If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere),
// then this code is dead. Stop here.
if s.curBlock == nil && n.Op() != ir.OLABEL {
return
}
s.stmtList(n.Init())
switch n.Op() {
case ir.OBLOCK:
n := n.(*ir.BlockStmt)
s.stmtList(n.List)
// No-ops
case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL:
// Expression statements
case ir.OCALLFUNC:
n := n.(*ir.CallExpr)
if ir.IsIntrinsicCall(n) {
s.intrinsicCall(n)
return
}
fallthrough
case ir.OCALLINTER:
n := n.(*ir.CallExpr)
s.callResult(n, callNormal)
if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class == ir.PFUNC {
if fn := n.X.Sym().Name; base.Flag.CompilingRuntime && fn == "throw" ||
n.X.Sym().Pkg == ir.Pkgs.Runtime && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") {
m := s.mem()
b := s.endBlock()
b.Kind = ssa.BlockExit
b.SetControl(m)
// TODO: never rewrite OPANIC to OCALLFUNC in the
// first place. Need to wait until all backends
// go through SSA.
}
}
case ir.ODEFER:
n := n.(*ir.GoDeferStmt)
if base.Debug.Defer > 0 {
var defertype string
if s.hasOpenDefers {
defertype = "open-coded"
} else if n.Esc() == ir.EscNever {
defertype = "stack-allocated"
} else {
defertype = "heap-allocated"
}
base.WarnfAt(n.Pos(), "%s defer", defertype)
}
if s.hasOpenDefers {
s.openDeferRecord(n.Call.(*ir.CallExpr))
} else {
d := callDefer
if n.Esc() == ir.EscNever {
d = callDeferStack
}
s.callResult(n.Call.(*ir.CallExpr), d)
}
case ir.OGO:
n := n.(*ir.GoDeferStmt)
s.callResult(n.Call.(*ir.CallExpr), callGo)
case ir.OAS2DOTTYPE:
n := n.(*ir.AssignListStmt)
var res, resok *ssa.Value
if n.Rhs[0].Op() == ir.ODOTTYPE2 {
res, resok = s.dottype(n.Rhs[0].(*ir.TypeAssertExpr), true)
} else {
res, resok = s.dynamicDottype(n.Rhs[0].(*ir.DynamicTypeAssertExpr), true)
}
deref := false
if !TypeOK(n.Rhs[0].Type()) {
if res.Op != ssa.OpLoad {
s.Fatalf("dottype of non-load")
}
mem := s.mem()
if mem.Op == ssa.OpVarKill {
mem = mem.Args[0]
}
if res.Args[1] != mem {
s.Fatalf("memory no longer live from 2-result dottype load")
}
deref = true
res = res.Args[0]
}
s.assign(n.Lhs[0], res, deref, 0)
s.assign(n.Lhs[1], resok, false, 0)
return
case ir.OAS2FUNC:
// We come here only when it is an intrinsic call returning two values.
n := n.(*ir.AssignListStmt)
call := n.Rhs[0].(*ir.CallExpr)
if !ir.IsIntrinsicCall(call) {
s.Fatalf("non-intrinsic AS2FUNC not expanded %v", call)
}
v := s.intrinsicCall(call)
v1 := s.newValue1(ssa.OpSelect0, n.Lhs[0].Type(), v)
v2 := s.newValue1(ssa.OpSelect1, n.Lhs[1].Type(), v)
s.assign(n.Lhs[0], v1, false, 0)
s.assign(n.Lhs[1], v2, false, 0)
return
case ir.ODCL:
n := n.(*ir.Decl)
if v := n.X; v.Esc() == ir.EscHeap {
s.newHeapaddr(v)
}
case ir.OLABEL:
n := n.(*ir.LabelStmt)
sym := n.Label
lab := s.label(sym)
// The label might already have a target block via a goto.
if lab.target == nil {
lab.target = s.f.NewBlock(ssa.BlockPlain)
}
// Go to that label.
// (We pretend "label:" is preceded by "goto label", unless the predecessor is unreachable.)
if s.curBlock != nil {
b := s.endBlock()
b.AddEdgeTo(lab.target)
}
s.startBlock(lab.target)
case ir.OGOTO:
n := n.(*ir.BranchStmt)
sym := n.Label
lab := s.label(sym)
if lab.target == nil {
lab.target = s.f.NewBlock(ssa.BlockPlain)
}
b := s.endBlock()
b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
b.AddEdgeTo(lab.target)
case ir.OAS:
n := n.(*ir.AssignStmt)
if n.X == n.Y && n.X.Op() == ir.ONAME {
// An x=x assignment. No point in doing anything
// here. In addition, skipping this assignment
// prevents generating:
// VARDEF x
// COPY x -> x
// which is bad because x is incorrectly considered
// dead before the vardef. See issue #14904.
return
}
// Evaluate RHS.
rhs := n.Y
if rhs != nil {
switch rhs.Op() {
case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT:
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
if !ir.IsZero(rhs) {
s.Fatalf("literal with nonzero value in SSA: %v", rhs)
}
rhs = nil
case ir.OAPPEND:
rhs := rhs.(*ir.CallExpr)
// Check whether we're writing the result of an append back to the same slice.
// If so, we handle it specially to avoid write barriers on the fast
// (non-growth) path.
if !ir.SameSafeExpr(n.X, rhs.Args[0]) || base.Flag.N != 0 {
break
}
// If the slice can be SSA'd, it'll be on the stack,
// so there will be no write barriers,
// so there's no need to attempt to prevent them.
if s.canSSA(n.X) {
if base.Debug.Append > 0 { // replicating old diagnostic message
base.WarnfAt(n.Pos(), "append: len-only update (in local slice)")
}
break
}
if base.Debug.Append > 0 {
base.WarnfAt(n.Pos(), "append: len-only update")
}
s.append(rhs, true)
return
}
}
if ir.IsBlank(n.X) {
// _ = rhs
// Just evaluate rhs for side-effects.
if rhs != nil {
s.expr(rhs)
}
return
}
var t *types.Type
if n.Y != nil {
t = n.Y.Type()
} else {
t = n.X.Type()
}
var r *ssa.Value
deref := !TypeOK(t)
if deref {
if rhs == nil {
r = nil // Signal assign to use OpZero.
} else {
r = s.addr(rhs)
}
} else {
if rhs == nil {
r = s.zeroVal(t)
} else {
r = s.expr(rhs)
}
}
var skip skipMask
if rhs != nil && (rhs.Op() == ir.OSLICE || rhs.Op() == ir.OSLICE3 || rhs.Op() == ir.OSLICESTR) && ir.SameSafeExpr(rhs.(*ir.SliceExpr).X, n.X) {
// We're assigning a slicing operation back to its source.
// Don't write back fields we aren't changing. See issue #14855.
rhs := rhs.(*ir.SliceExpr)
i, j, k := rhs.Low, rhs.High, rhs.Max
if i != nil && (i.Op() == ir.OLITERAL && i.Val().Kind() == constant.Int && ir.Int64Val(i) == 0) {
// [0:...] is the same as [:...]
i = nil
}
// TODO: detect defaults for len/cap also.
// Currently doesn't really work because (*p)[:len(*p)] appears here as:
// tmp = len(*p)
// (*p)[:tmp]
//if j != nil && (j.Op == OLEN && SameSafeExpr(j.Left, n.Left)) {
// j = nil
//}
//if k != nil && (k.Op == OCAP && SameSafeExpr(k.Left, n.Left)) {
// k = nil
//}
if i == nil {
skip |= skipPtr
if j == nil {
skip |= skipLen
}
if k == nil {
skip |= skipCap
}
}
}
s.assign(n.X, r, deref, skip)
case ir.OIF:
n := n.(*ir.IfStmt)
if ir.IsConst(n.Cond, constant.Bool) {
s.stmtList(n.Cond.Init())
if ir.BoolVal(n.Cond) {
s.stmtList(n.Body)
} else {
s.stmtList(n.Else)
}
break
}
bEnd := s.f.NewBlock(ssa.BlockPlain)
var likely int8
if n.Likely {
likely = 1
}
var bThen *ssa.Block
if len(n.Body) != 0 {
bThen = s.f.NewBlock(ssa.BlockPlain)
} else {
bThen = bEnd
}
var bElse *ssa.Block
if len(n.Else) != 0 {
bElse = s.f.NewBlock(ssa.BlockPlain)
} else {
bElse = bEnd
}
s.condBranch(n.Cond, bThen, bElse, likely)
if len(n.Body) != 0 {
s.startBlock(bThen)
s.stmtList(n.Body)
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
}
if len(n.Else) != 0 {
s.startBlock(bElse)
s.stmtList(n.Else)
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
}
s.startBlock(bEnd)
case ir.ORETURN:
n := n.(*ir.ReturnStmt)
s.stmtList(n.Results)
b := s.exit()
b.Pos = s.lastPos.WithIsStmt()
case ir.OTAILCALL:
n := n.(*ir.TailCallStmt)
b := s.exit()
b.Kind = ssa.BlockRetJmp // override BlockRet
b.Aux = callTargetLSym(n.Target)
case ir.OCONTINUE, ir.OBREAK:
n := n.(*ir.BranchStmt)
var to *ssa.Block
if n.Label == nil {
// plain break/continue
switch n.Op() {
case ir.OCONTINUE:
to = s.continueTo
case ir.OBREAK:
to = s.breakTo
}
} else {
// labeled break/continue; look up the target
sym := n.Label
lab := s.label(sym)
switch n.Op() {
case ir.OCONTINUE:
to = lab.continueTarget
case ir.OBREAK:
to = lab.breakTarget
}
}
b := s.endBlock()
b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
b.AddEdgeTo(to)
case ir.OFOR, ir.OFORUNTIL:
// OFOR: for Ninit; Left; Right { Nbody }
// cond (Left); body (Nbody); incr (Right)
//
// OFORUNTIL: for Ninit; Left; Right; List { Nbody }
// => body: { Nbody }; incr: Right; if Left { lateincr: List; goto body }; end:
n := n.(*ir.ForStmt)
bCond := s.f.NewBlock(ssa.BlockPlain)
bBody := s.f.NewBlock(ssa.BlockPlain)
bIncr := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
// ensure empty for loops have correct position; issue #30167
bBody.Pos = n.Pos()
// first, jump to condition test (OFOR) or body (OFORUNTIL)
b := s.endBlock()
if n.Op() == ir.OFOR {
b.AddEdgeTo(bCond)
// generate code to test condition
s.startBlock(bCond)
if n.Cond != nil {
s.condBranch(n.Cond, bBody, bEnd, 1)
} else {
b := s.endBlock()
b.Kind = ssa.BlockPlain
b.AddEdgeTo(bBody)
}
} else {
b.AddEdgeTo(bBody)
}
// set up for continue/break in body
prevContinue := s.continueTo
prevBreak := s.breakTo
s.continueTo = bIncr
s.breakTo = bEnd
var lab *ssaLabel
if sym := n.Label; sym != nil {
// labeled for loop
lab = s.label(sym)
lab.continueTarget = bIncr
lab.breakTarget = bEnd
}
// generate body
s.startBlock(bBody)
s.stmtList(n.Body)
// tear down continue/break
s.continueTo = prevContinue
s.breakTo = prevBreak
if lab != nil {
lab.continueTarget = nil
lab.breakTarget = nil
}
// done with body, goto incr
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bIncr)
}
// generate incr (and, for OFORUNTIL, condition)
s.startBlock(bIncr)
if n.Post != nil {
s.stmt(n.Post)
}
if n.Op() == ir.OFOR {
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bCond)
// It can happen that bIncr ends in a block containing only VARKILL,
// and that muddles the debugging experience.
if b.Pos == src.NoXPos {
b.Pos = bCond.Pos
}
}
} else {
// bCond is unused in OFORUNTIL, so repurpose it.
bLateIncr := bCond
// test condition
s.condBranch(n.Cond, bLateIncr, bEnd, 1)
// generate late increment
s.startBlock(bLateIncr)
s.stmtList(n.Late)
s.endBlock().AddEdgeTo(bBody)
}
s.startBlock(bEnd)
case ir.OSWITCH, ir.OSELECT:
// These have been mostly rewritten by the front end into their Nbody fields.
// Our main task is to correctly hook up any break statements.
bEnd := s.f.NewBlock(ssa.BlockPlain)
prevBreak := s.breakTo
s.breakTo = bEnd
var sym *types.Sym
var body ir.Nodes
if n.Op() == ir.OSWITCH {
n := n.(*ir.SwitchStmt)
sym = n.Label
body = n.Compiled
} else {
n := n.(*ir.SelectStmt)
sym = n.Label
body = n.Compiled
}
var lab *ssaLabel
if sym != nil {
// labeled
lab = s.label(sym)
lab.breakTarget = bEnd
}
// generate body code
s.stmtList(body)
s.breakTo = prevBreak
if lab != nil {
lab.breakTarget = nil
}
// walk adds explicit OBREAK nodes to the end of all reachable code paths.
// If we still have a current block here, then mark it unreachable.
if s.curBlock != nil {
m := s.mem()
b := s.endBlock()
b.Kind = ssa.BlockExit
b.SetControl(m)
}
s.startBlock(bEnd)
case ir.OVARDEF:
n := n.(*ir.UnaryExpr)
if !s.canSSA(n.X) {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.X.(*ir.Name), s.mem(), false)
}
case ir.OVARKILL:
// Insert a varkill op to record that a variable is no longer live.
// We only care about liveness info at call sites, so putting the
// varkill in the store chain is enough to keep it correctly ordered
// with respect to call ops.
n := n.(*ir.UnaryExpr)
if !s.canSSA(n.X) {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.X.(*ir.Name), s.mem(), false)
}
case ir.OVARLIVE:
// Insert a varlive op to record that a variable is still live.
n := n.(*ir.UnaryExpr)
v := n.X.(*ir.Name)
if !v.Addrtaken() {
s.Fatalf("VARLIVE variable %v must have Addrtaken set", v)
}
switch v.Class {
case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT:
default:
s.Fatalf("VARLIVE variable %v must be Auto or Arg", v)
}
s.vars[memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, v, s.mem())
case ir.OCHECKNIL:
n := n.(*ir.UnaryExpr)
p := s.expr(n.X)
s.nilCheck(p)
case ir.OINLMARK:
n := n.(*ir.InlineMarkStmt)
s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Index, s.mem())
default:
s.Fatalf("unhandled stmt %v", n.Op())
}
}
// If true, share as many open-coded defer exits as possible (with the downside of
// worse line-number information)
const shareDeferExits = false
// exit processes any code that needs to be generated just before returning.
// It returns a BlockRet block that ends the control flow. Its control value
// will be set to the final memory state.
func (s *state) exit() *ssa.Block {
if s.hasdefer {
if s.hasOpenDefers {
if shareDeferExits && s.lastDeferExit != nil && len(s.openDefers) == s.lastDeferCount {
if s.curBlock.Kind != ssa.BlockPlain {
panic("Block for an exit should be BlockPlain")
}
s.curBlock.AddEdgeTo(s.lastDeferExit)
s.endBlock()
return s.lastDeferFinalBlock
}
s.openDeferExit()
} else {
s.rtcall(ir.Syms.Deferreturn, true, nil)
}
}
var b *ssa.Block
var m *ssa.Value
// Do actual return.
// These currently turn into self-copies (in many cases).
resultFields := s.curfn.Type().Results().FieldSlice()
results := make([]*ssa.Value, len(resultFields)+1, len(resultFields)+1)
m = s.newValue0(ssa.OpMakeResult, s.f.OwnAux.LateExpansionResultType())
// Store SSAable and heap-escaped PPARAMOUT variables back to stack locations.
for i, f := range resultFields {
n := f.Nname.(*ir.Name)
if s.canSSA(n) { // result is in some SSA variable
if !n.IsOutputParamInRegisters() {
// We are about to store to the result slot.
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
}
results[i] = s.variable(n, n.Type())
} else if !n.OnStack() { // result is actually heap allocated
// We are about to copy the in-heap result to the result slot.
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
ha := s.expr(n.Heapaddr)
s.instrumentFields(n.Type(), ha, instrumentRead)
results[i] = s.newValue2(ssa.OpDereference, n.Type(), ha, s.mem())
} else { // result is not SSA-able; not escaped, so not on heap, but too large for SSA.
// Before register ABI this ought to be a self-move, home=dest,
// With register ABI, it's still a self-move if parameter is on stack (i.e., too big or overflowed)
// No VarDef, as the result slot is already holding live value.
results[i] = s.newValue2(ssa.OpDereference, n.Type(), s.addr(n), s.mem())
}
}
// Run exit code. Today, this is just racefuncexit, in -race mode.
// TODO(register args) this seems risky here with a register-ABI, but not clear it is right to do it earlier either.
// Spills in register allocation might just fix it.
s.stmtList(s.curfn.Exit)
results[len(results)-1] = s.mem()
m.AddArgs(results...)
b = s.endBlock()
b.Kind = ssa.BlockRet
b.SetControl(m)
if s.hasdefer && s.hasOpenDefers {
s.lastDeferFinalBlock = b
}
return b
}
type opAndType struct {
op ir.Op
etype types.Kind
}
var opToSSA = map[opAndType]ssa.Op{
opAndType{ir.OADD, types.TINT8}: ssa.OpAdd8,
opAndType{ir.OADD, types.TUINT8}: ssa.OpAdd8,
opAndType{ir.OADD, types.TINT16}: ssa.OpAdd16,
opAndType{ir.OADD, types.TUINT16}: ssa.OpAdd16,
opAndType{ir.OADD, types.TINT32}: ssa.OpAdd32,
opAndType{ir.OADD, types.TUINT32}: ssa.OpAdd32,
opAndType{ir.OADD, types.TINT64}: ssa.OpAdd64,
opAndType{ir.OADD, types.TUINT64}: ssa.OpAdd64,
opAndType{ir.OADD, types.TFLOAT32}: ssa.OpAdd32F,
opAndType{ir.OADD, types.TFLOAT64}: ssa.OpAdd64F,
opAndType{ir.OSUB, types.TINT8}: ssa.OpSub8,
opAndType{ir.OSUB, types.TUINT8}: ssa.OpSub8,
opAndType{ir.OSUB, types.TINT16}: ssa.OpSub16,
opAndType{ir.OSUB, types.TUINT16}: ssa.OpSub16,
opAndType{ir.OSUB, types.TINT32}: ssa.OpSub32,
opAndType{ir.OSUB, types.TUINT32}: ssa.OpSub32,
opAndType{ir.OSUB, types.TINT64}: ssa.OpSub64,
opAndType{ir.OSUB, types.TUINT64}: ssa.OpSub64,
opAndType{ir.OSUB, types.TFLOAT32}: ssa.OpSub32F,
opAndType{ir.OSUB, types.TFLOAT64}: ssa.OpSub64F,
opAndType{ir.ONOT, types.TBOOL}: ssa.OpNot,
opAndType{ir.ONEG, types.TINT8}: ssa.OpNeg8,
opAndType{ir.ONEG, types.TUINT8}: ssa.OpNeg8,
opAndType{ir.ONEG, types.TINT16}: ssa.OpNeg16,
opAndType{ir.ONEG, types.TUINT16}: ssa.OpNeg16,
opAndType{ir.ONEG, types.TINT32}: ssa.OpNeg32,
opAndType{ir.ONEG, types.TUINT32}: ssa.OpNeg32,
opAndType{ir.ONEG, types.TINT64}: ssa.OpNeg64,
opAndType{ir.ONEG, types.TUINT64}: ssa.OpNeg64,
opAndType{ir.ONEG, types.TFLOAT32}: ssa.OpNeg32F,
opAndType{ir.ONEG, types.TFLOAT64}: ssa.OpNeg64F,
opAndType{ir.OBITNOT, types.TINT8}: ssa.OpCom8,
opAndType{ir.OBITNOT, types.TUINT8}: ssa.OpCom8,
opAndType{ir.OBITNOT, types.TINT16}: ssa.OpCom16,
opAndType{ir.OBITNOT, types.TUINT16}: ssa.OpCom16,
opAndType{ir.OBITNOT, types.TINT32}: ssa.OpCom32,
opAndType{ir.OBITNOT, types.TUINT32}: ssa.OpCom32,
opAndType{ir.OBITNOT, types.TINT64}: ssa.OpCom64,
opAndType{ir.OBITNOT, types.TUINT64}: ssa.OpCom64,
opAndType{ir.OIMAG, types.TCOMPLEX64}: ssa.OpComplexImag,
opAndType{ir.OIMAG, types.TCOMPLEX128}: ssa.OpComplexImag,
opAndType{ir.OREAL, types.TCOMPLEX64}: ssa.OpComplexReal,
opAndType{ir.OREAL, types.TCOMPLEX128}: ssa.OpComplexReal,
opAndType{ir.OMUL, types.TINT8}: ssa.OpMul8,
opAndType{ir.OMUL, types.TUINT8}: ssa.OpMul8,
opAndType{ir.OMUL, types.TINT16}: ssa.OpMul16,
opAndType{ir.OMUL, types.TUINT16}: ssa.OpMul16,
opAndType{ir.OMUL, types.TINT32}: ssa.OpMul32,
opAndType{ir.OMUL, types.TUINT32}: ssa.OpMul32,
opAndType{ir.OMUL, types.TINT64}: ssa.OpMul64,
opAndType{ir.OMUL, types.TUINT64}: ssa.OpMul64,
opAndType{ir.OMUL, types.TFLOAT32}: ssa.OpMul32F,
opAndType{ir.OMUL, types.TFLOAT64}: ssa.OpMul64F,
opAndType{ir.ODIV, types.TFLOAT32}: ssa.OpDiv32F,
opAndType{ir.ODIV, types.TFLOAT64}: ssa.OpDiv64F,
opAndType{ir.ODIV, types.TINT8}: ssa.OpDiv8,
opAndType{ir.ODIV, types.TUINT8}: ssa.OpDiv8u,
opAndType{ir.ODIV, types.TINT16}: ssa.OpDiv16,
opAndType{ir.ODIV, types.TUINT16}: ssa.OpDiv16u,
opAndType{ir.ODIV, types.TINT32}: ssa.OpDiv32,
opAndType{ir.ODIV, types.TUINT32}: ssa.OpDiv32u,
opAndType{ir.ODIV, types.TINT64}: ssa.OpDiv64,
opAndType{ir.ODIV, types.TUINT64}: ssa.OpDiv64u,
opAndType{ir.OMOD, types.TINT8}: ssa.OpMod8,
opAndType{ir.OMOD, types.TUINT8}: ssa.OpMod8u,
opAndType{ir.OMOD, types.TINT16}: ssa.OpMod16,
opAndType{ir.OMOD, types.TUINT16}: ssa.OpMod16u,
opAndType{ir.OMOD, types.TINT32}: ssa.OpMod32,
opAndType{ir.OMOD, types.TUINT32}: ssa.OpMod32u,
opAndType{ir.OMOD, types.TINT64}: ssa.OpMod64,
opAndType{ir.OMOD, types.TUINT64}: ssa.OpMod64u,
opAndType{ir.OAND, types.TINT8}: ssa.OpAnd8,
opAndType{ir.OAND, types.TUINT8}: ssa.OpAnd8,
opAndType{ir.OAND, types.TINT16}: ssa.OpAnd16,
opAndType{ir.OAND, types.TUINT16}: ssa.OpAnd16,
opAndType{ir.OAND, types.TINT32}: ssa.OpAnd32,
opAndType{ir.OAND, types.TUINT32}: ssa.OpAnd32,
opAndType{ir.OAND, types.TINT64}: ssa.OpAnd64,
opAndType{ir.OAND, types.TUINT64}: ssa.OpAnd64,
opAndType{ir.OOR, types.TINT8}: ssa.OpOr8,
opAndType{ir.OOR, types.TUINT8}: ssa.OpOr8,
opAndType{ir.OOR, types.TINT16}: ssa.OpOr16,
opAndType{ir.OOR, types.TUINT16}: ssa.OpOr16,
opAndType{ir.OOR, types.TINT32}: ssa.OpOr32,
opAndType{ir.OOR, types.TUINT32}: ssa.OpOr32,
opAndType{ir.OOR, types.TINT64}: ssa.OpOr64,
opAndType{ir.OOR, types.TUINT64}: ssa.OpOr64,
opAndType{ir.OXOR, types.TINT8}: ssa.OpXor8,
opAndType{ir.OXOR, types.TUINT8}: ssa.OpXor8,
opAndType{ir.OXOR, types.TINT16}: ssa.OpXor16,
opAndType{ir.OXOR, types.TUINT16}: ssa.OpXor16,
opAndType{ir.OXOR, types.TINT32}: ssa.OpXor32,
opAndType{ir.OXOR, types.TUINT32}: ssa.OpXor32,
opAndType{ir.OXOR, types.TINT64}: ssa.OpXor64,
opAndType{ir.OXOR, types.TUINT64}: ssa.OpXor64,
opAndType{ir.OEQ, types.TBOOL}: ssa.OpEqB,
opAndType{ir.OEQ, types.TINT8}: ssa.OpEq8,
opAndType{ir.OEQ, types.TUINT8}: ssa.OpEq8,
opAndType{ir.OEQ, types.TINT16}: ssa.OpEq16,
opAndType{ir.OEQ, types.TUINT16}: ssa.OpEq16,
opAndType{ir.OEQ, types.TINT32}: ssa.OpEq32,
opAndType{ir.OEQ, types.TUINT32}: ssa.OpEq32,
opAndType{ir.OEQ, types.TINT64}: ssa.OpEq64,
opAndType{ir.OEQ, types.TUINT64}: ssa.OpEq64,
opAndType{ir.OEQ, types.TINTER}: ssa.OpEqInter,
opAndType{ir.OEQ, types.TSLICE}: ssa.OpEqSlice,
opAndType{ir.OEQ, types.TFUNC}: ssa.OpEqPtr,
opAndType{ir.OEQ, types.TMAP}: ssa.OpEqPtr,
opAndType{ir.OEQ, types.TCHAN}: ssa.OpEqPtr,
opAndType{ir.OEQ, types.TPTR}: ssa.OpEqPtr,
opAndType{ir.OEQ, types.TUINTPTR}: ssa.OpEqPtr,
opAndType{ir.OEQ, types.TUNSAFEPTR}: ssa.OpEqPtr,
opAndType{ir.OEQ, types.TFLOAT64}: ssa.OpEq64F,
opAndType{ir.OEQ, types.TFLOAT32}: ssa.OpEq32F,
opAndType{ir.ONE, types.TBOOL}: ssa.OpNeqB,
opAndType{ir.ONE, types.TINT8}: ssa.OpNeq8,
opAndType{ir.ONE, types.TUINT8}: ssa.OpNeq8,
opAndType{ir.ONE, types.TINT16}: ssa.OpNeq16,
opAndType{ir.ONE, types.TUINT16}: ssa.OpNeq16,
opAndType{ir.ONE, types.TINT32}: ssa.OpNeq32,
opAndType{ir.ONE, types.TUINT32}: ssa.OpNeq32,
opAndType{ir.ONE, types.TINT64}: ssa.OpNeq64,
opAndType{ir.ONE, types.TUINT64}: ssa.OpNeq64,
opAndType{ir.ONE, types.TINTER}: ssa.OpNeqInter,
opAndType{ir.ONE, types.TSLICE}: ssa.OpNeqSlice,
opAndType{ir.ONE, types.TFUNC}: ssa.OpNeqPtr,
opAndType{ir.ONE, types.TMAP}: ssa.OpNeqPtr,
opAndType{ir.ONE, types.TCHAN}: ssa.OpNeqPtr,
opAndType{ir.ONE, types.TPTR}: ssa.OpNeqPtr,
opAndType{ir.ONE, types.TUINTPTR}: ssa.OpNeqPtr,
opAndType{ir.ONE, types.TUNSAFEPTR}: ssa.OpNeqPtr,
opAndType{ir.ONE, types.TFLOAT64}: ssa.OpNeq64F,
opAndType{ir.ONE, types.TFLOAT32}: ssa.OpNeq32F,
opAndType{ir.OLT, types.TINT8}: ssa.OpLess8,
opAndType{ir.OLT, types.TUINT8}: ssa.OpLess8U,
opAndType{ir.OLT, types.TINT16}: ssa.OpLess16,
opAndType{ir.OLT, types.TUINT16}: ssa.OpLess16U,
opAndType{ir.OLT, types.TINT32}: ssa.OpLess32,
opAndType{ir.OLT, types.TUINT32}: ssa.OpLess32U,
opAndType{ir.OLT, types.TINT64}: ssa.OpLess64,
opAndType{ir.OLT, types.TUINT64}: ssa.OpLess64U,
opAndType{ir.OLT, types.TFLOAT64}: ssa.OpLess64F,
opAndType{ir.OLT, types.TFLOAT32}: ssa.OpLess32F,
opAndType{ir.OLE, types.TINT8}: ssa.OpLeq8,
opAndType{ir.OLE, types.TUINT8}: ssa.OpLeq8U,
opAndType{ir.OLE, types.TINT16}: ssa.OpLeq16,
opAndType{ir.OLE, types.TUINT16}: ssa.OpLeq16U,
opAndType{ir.OLE, types.TINT32}: ssa.OpLeq32,
opAndType{ir.OLE, types.TUINT32}: ssa.OpLeq32U,
opAndType{ir.OLE, types.TINT64}: ssa.OpLeq64,
opAndType{ir.OLE, types.TUINT64}: ssa.OpLeq64U,
opAndType{ir.OLE, types.TFLOAT64}: ssa.OpLeq64F,
opAndType{ir.OLE, types.TFLOAT32}: ssa.OpLeq32F,
}
func (s *state) concreteEtype(t *types.Type) types.Kind {
e := t.Kind()
switch e {
default:
return e
case types.TINT:
if s.config.PtrSize == 8 {
return types.TINT64
}
return types.TINT32
case types.TUINT:
if s.config.PtrSize == 8 {
return types.TUINT64
}
return types.TUINT32
case types.TUINTPTR:
if s.config.PtrSize == 8 {
return types.TUINT64
}
return types.TUINT32
}
}
func (s *state) ssaOp(op ir.Op, t *types.Type) ssa.Op {
etype := s.concreteEtype(t)
x, ok := opToSSA[opAndType{op, etype}]
if !ok {
s.Fatalf("unhandled binary op %v %s", op, etype)
}
return x
}
type opAndTwoTypes struct {
op ir.Op
etype1 types.Kind
etype2 types.Kind
}
type twoTypes struct {
etype1 types.Kind
etype2 types.Kind
}
type twoOpsAndType struct {
op1 ssa.Op
op2 ssa.Op
intermediateType types.Kind
}
var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
twoTypes{types.TINT8, types.TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, types.TINT32},
twoTypes{types.TINT16, types.TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, types.TINT32},
twoTypes{types.TINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, types.TINT32},
twoTypes{types.TINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, types.TINT64},
twoTypes{types.TINT8, types.TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, types.TINT32},
twoTypes{types.TINT16, types.TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, types.TINT32},
twoTypes{types.TINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, types.TINT32},
twoTypes{types.TINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, types.TINT64},
twoTypes{types.TFLOAT32, types.TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
twoTypes{types.TFLOAT32, types.TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
twoTypes{types.TFLOAT32, types.TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, types.TINT32},
twoTypes{types.TFLOAT32, types.TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, types.TINT64},
twoTypes{types.TFLOAT64, types.TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
twoTypes{types.TFLOAT64, types.TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
twoTypes{types.TFLOAT64, types.TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, types.TINT32},
twoTypes{types.TFLOAT64, types.TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, types.TINT64},
// unsigned
twoTypes{types.TUINT8, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, types.TINT32},
twoTypes{types.TUINT16, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, types.TINT32},
twoTypes{types.TUINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, types.TINT64}, // go wide to dodge unsigned
twoTypes{types.TUINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto32F, branchy code expansion instead
twoTypes{types.TUINT8, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, types.TINT32},
twoTypes{types.TUINT16, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, types.TINT32},
twoTypes{types.TUINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, types.TINT64}, // go wide to dodge unsigned
twoTypes{types.TUINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto64F, branchy code expansion instead
twoTypes{types.TFLOAT32, types.TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
twoTypes{types.TFLOAT32, types.TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
twoTypes{types.TFLOAT32, types.TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
twoTypes{types.TFLOAT32, types.TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt32Fto64U, branchy code expansion instead
twoTypes{types.TFLOAT64, types.TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
twoTypes{types.TFLOAT64, types.TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
twoTypes{types.TFLOAT64, types.TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
twoTypes{types.TFLOAT64, types.TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt64Fto64U, branchy code expansion instead
// float
twoTypes{types.TFLOAT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, types.TFLOAT32},
twoTypes{types.TFLOAT64, types.TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, types.TFLOAT64},
twoTypes{types.TFLOAT32, types.TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, types.TFLOAT32},
twoTypes{types.TFLOAT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, types.TFLOAT64},
}
// this map is used only for 32-bit arch, and only includes the difference
// on 32-bit arch, don't use int64<->float conversion for uint32
var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{
twoTypes{types.TUINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, types.TUINT32},
twoTypes{types.TUINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, types.TUINT32},
twoTypes{types.TFLOAT32, types.TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, types.TUINT32},
twoTypes{types.TFLOAT64, types.TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, types.TUINT32},
}
// uint64<->float conversions, only on machines that have instructions for that
var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{
twoTypes{types.TUINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, types.TUINT64},
twoTypes{types.TUINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, types.TUINT64},
twoTypes{types.TFLOAT32, types.TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, types.TUINT64},
twoTypes{types.TFLOAT64, types.TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, types.TUINT64},
}
var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT8}: ssa.OpLsh8x8,
opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT8}: ssa.OpLsh8x8,
opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT16}: ssa.OpLsh8x16,
opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT16}: ssa.OpLsh8x16,
opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT32}: ssa.OpLsh8x32,
opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT32}: ssa.OpLsh8x32,
opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT64}: ssa.OpLsh8x64,
opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT64}: ssa.OpLsh8x64,
opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT8}: ssa.OpLsh16x8,
opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT8}: ssa.OpLsh16x8,
opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT16}: ssa.OpLsh16x16,
opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT16}: ssa.OpLsh16x16,
opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT32}: ssa.OpLsh16x32,
opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT32}: ssa.OpLsh16x32,
opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT64}: ssa.OpLsh16x64,
opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT64}: ssa.OpLsh16x64,
opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT8}: ssa.OpLsh32x8,
opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT8}: ssa.OpLsh32x8,
opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT16}: ssa.OpLsh32x16,
opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT16}: ssa.OpLsh32x16,
opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT32}: ssa.OpLsh32x32,
opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT32}: ssa.OpLsh32x32,
opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT64}: ssa.OpLsh32x64,
opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT64}: ssa.OpLsh32x64,
opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT8}: ssa.OpLsh64x8,
opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT8}: ssa.OpLsh64x8,
opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT16}: ssa.OpLsh64x16,
opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT16}: ssa.OpLsh64x16,
opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT32}: ssa.OpLsh64x32,
opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT32}: ssa.OpLsh64x32,
opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT64}: ssa.OpLsh64x64,
opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT64}: ssa.OpLsh64x64,
opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT8}: ssa.OpRsh8x8,
opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT8}: ssa.OpRsh8Ux8,
opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT16}: ssa.OpRsh8x16,
opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT16}: ssa.OpRsh8Ux16,
opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT32}: ssa.OpRsh8x32,
opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT32}: ssa.OpRsh8Ux32,
opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT64}: ssa.OpRsh8x64,
opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT64}: ssa.OpRsh8Ux64,
opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT8}: ssa.OpRsh16x8,
opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT8}: ssa.OpRsh16Ux8,
opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT16}: ssa.OpRsh16x16,
opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT16}: ssa.OpRsh16Ux16,
opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT32}: ssa.OpRsh16x32,
opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT32}: ssa.OpRsh16Ux32,
opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT64}: ssa.OpRsh16x64,
opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT64}: ssa.OpRsh16Ux64,
opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT8}: ssa.OpRsh32x8,
opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT8}: ssa.OpRsh32Ux8,
opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT16}: ssa.OpRsh32x16,
opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT16}: ssa.OpRsh32Ux16,
opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT32}: ssa.OpRsh32x32,
opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT32}: ssa.OpRsh32Ux32,
opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT64}: ssa.OpRsh32x64,
opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT64}: ssa.OpRsh32Ux64,
opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT8}: ssa.OpRsh64x8,
opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT8}: ssa.OpRsh64Ux8,
opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT16}: ssa.OpRsh64x16,
opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT16}: ssa.OpRsh64Ux16,
opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT32}: ssa.OpRsh64x32,
opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT32}: ssa.OpRsh64Ux32,
opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT64}: ssa.OpRsh64x64,
opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT64}: ssa.OpRsh64Ux64,
}
func (s *state) ssaShiftOp(op ir.Op, t *types.Type, u *types.Type) ssa.Op {
etype1 := s.concreteEtype(t)
etype2 := s.concreteEtype(u)
x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
if !ok {
s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2)
}
return x
}
// expr converts the expression n to ssa, adds it to s and returns the ssa result.
func (s *state) expr(n ir.Node) *ssa.Value {
if ir.HasUniquePos(n) {
// ONAMEs and named OLITERALs have the line number
// of the decl, not the use. See issue 14742.
s.pushLine(n.Pos())
defer s.popLine()
}
s.stmtList(n.Init())
switch n.Op() {
case ir.OBYTES2STRTMP:
n := n.(*ir.ConvExpr)
slice := s.expr(n.X)
ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
return s.newValue2(ssa.OpStringMake, n.Type(), ptr, len)
case ir.OSTR2BYTESTMP:
n := n.(*ir.ConvExpr)
str := s.expr(n.X)
ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], str)
return s.newValue3(ssa.OpSliceMake, n.Type(), ptr, len, len)
case ir.OCFUNC:
n := n.(*ir.UnaryExpr)
aux := n.X.(*ir.Name).Linksym()
// OCFUNC is used to build function values, which must
// always reference ABIInternal entry points.
if aux.ABI() != obj.ABIInternal {
s.Fatalf("expected ABIInternal: %v", aux.ABI())
}
return s.entryNewValue1A(ssa.OpAddr, n.Type(), aux, s.sb)
case ir.ONAME:
n := n.(*ir.Name)
if n.Class == ir.PFUNC {
// "value" of a function is the address of the function's closure
sym := staticdata.FuncLinksym(n)
return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb)
}
if s.canSSA(n) {
return s.variable(n, n.Type())
}
return s.load(n.Type(), s.addr(n))
case ir.OLINKSYMOFFSET:
n := n.(*ir.LinksymOffsetExpr)
return s.load(n.Type(), s.addr(n))
case ir.ONIL:
n := n.(*ir.NilExpr)
t := n.Type()
switch {
case t.IsSlice():
return s.constSlice(t)
case t.IsInterface():
return s.constInterface(t)
default:
return s.constNil(t)
}
case ir.OLITERAL:
switch u := n.Val(); u.Kind() {
case constant.Int:
i := ir.IntVal(n.Type(), u)
switch n.Type().Size() {
case 1:
return s.constInt8(n.Type(), int8(i))
case 2:
return s.constInt16(n.Type(), int16(i))
case 4:
return s.constInt32(n.Type(), int32(i))
case 8:
return s.constInt64(n.Type(), i)
default:
s.Fatalf("bad integer size %d", n.Type().Size())
return nil
}
case constant.String:
i := constant.StringVal(u)
if i == "" {
return s.constEmptyString(n.Type())
}
return s.entryNewValue0A(ssa.OpConstString, n.Type(), ssa.StringToAux(i))
case constant.Bool:
return s.constBool(constant.BoolVal(u))
case constant.Float:
f, _ := constant.Float64Val(u)
switch n.Type().Size() {
case 4:
return s.constFloat32(n.Type(), f)
case 8:
return s.constFloat64(n.Type(), f)
default:
s.Fatalf("bad float size %d", n.Type().Size())
return nil
}
case constant.Complex:
re, _ := constant.Float64Val(constant.Real(u))
im, _ := constant.Float64Val(constant.Imag(u))
switch n.Type().Size() {
case 8:
pt := types.Types[types.TFLOAT32]
return s.newValue2(ssa.OpComplexMake, n.Type(),
s.constFloat32(pt, re),
s.constFloat32(pt, im))
case 16:
pt := types.Types[types.TFLOAT64]
return s.newValue2(ssa.OpComplexMake, n.Type(),
s.constFloat64(pt, re),
s.constFloat64(pt, im))
default:
s.Fatalf("bad complex size %d", n.Type().Size())
return nil
}
default:
s.Fatalf("unhandled OLITERAL %v", u.Kind())
return nil
}
case ir.OCONVNOP:
n := n.(*ir.ConvExpr)
to := n.Type()
from := n.X.Type()
// Assume everything will work out, so set up our return value.
// Anything interesting that happens from here is a fatal.
x := s.expr(n.X)
if to == from {
return x
}
// Special case for not confusing GC and liveness.
// We don't want pointers accidentally classified
// as not-pointers or vice-versa because of copy
// elision.
if to.IsPtrShaped() != from.IsPtrShaped() {
return s.newValue2(ssa.OpConvert, to, x, s.mem())
}
v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
// CONVNOP closure
if to.Kind() == types.TFUNC && from.IsPtrShaped() {
return v
}
// named <--> unnamed type or typed <--> untyped const
if from.Kind() == to.Kind() {
return v
}
// unsafe.Pointer <--> *T
if to.IsUnsafePtr() && from.IsPtrShaped() || from.IsUnsafePtr() && to.IsPtrShaped() {
return v
}
// map <--> *hmap
if to.Kind() == types.TMAP && from.IsPtr() &&
to.MapType().Hmap == from.Elem() {
return v
}
types.CalcSize(from)
types.CalcSize(to)
if from.Size() != to.Size() {
s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Size(), to, to.Size())
return nil
}
if etypesign(from.Kind()) != etypesign(to.Kind()) {
s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Kind(), to, to.Kind())
return nil
}
if base.Flag.Cfg.Instrumenting {
// These appear to be fine, but they fail the
// integer constraint below, so okay them here.
// Sample non-integer conversion: map[string]string -> *uint8
return v
}
if etypesign(from.Kind()) == 0 {
s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to)
return nil
}
// integer, same width, same sign
return v
case ir.OCONV:
n := n.(*ir.ConvExpr)
x := s.expr(n.X)
ft := n.X.Type() // from type
tt := n.Type() // to type
if ft.IsBoolean() && tt.IsKind(types.TUINT8) {
// Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
return s.newValue1(ssa.OpCopy, n.Type(), x)
}
if ft.IsInteger() && tt.IsInteger() {
var op ssa.Op
if tt.Size() == ft.Size() {
op = ssa.OpCopy
} else if tt.Size() < ft.Size() {
// truncation
switch 10*ft.Size() + tt.Size() {
case 21:
op = ssa.OpTrunc16to8
case 41:
op = ssa.OpTrunc32to8
case 42:
op = ssa.OpTrunc32to16
case 81:
op = ssa.OpTrunc64to8
case 82:
op = ssa.OpTrunc64to16
case 84:
op = ssa.OpTrunc64to32
default:
s.Fatalf("weird integer truncation %v -> %v", ft, tt)
}
} else if ft.IsSigned() {
// sign extension
switch 10*ft.Size() + tt.Size() {
case 12:
op = ssa.OpSignExt8to16
case 14:
op = ssa.OpSignExt8to32
case 18:
op = ssa.OpSignExt8to64
case 24:
op = ssa.OpSignExt16to32
case 28:
op = ssa.OpSignExt16to64
case 48:
op = ssa.OpSignExt32to64
default:
s.Fatalf("bad integer sign extension %v -> %v", ft, tt)
}
} else {
// zero extension
switch 10*ft.Size() + tt.Size() {
case 12:
op = ssa.OpZeroExt8to16
case 14:
op = ssa.OpZeroExt8to32
case 18:
op = ssa.OpZeroExt8to64
case 24:
op = ssa.OpZeroExt16to32
case 28:
op = ssa.OpZeroExt16to64
case 48:
op = ssa.OpZeroExt32to64
default:
s.Fatalf("weird integer sign extension %v -> %v", ft, tt)
}
}
return s.newValue1(op, n.Type(), x)
}
if ft.IsFloat() || tt.IsFloat() {
conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
if s.config.RegSize == 4 && Arch.LinkArch.Family != sys.MIPS && !s.softFloat {
if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
conv = conv1
}
}
if Arch.LinkArch.Family == sys.ARM64 || Arch.LinkArch.Family == sys.Wasm || Arch.LinkArch.Family == sys.S390X || s.softFloat {
if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
conv = conv1
}
}
if Arch.LinkArch.Family == sys.MIPS && !s.softFloat {
if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
// tt is float32 or float64, and ft is also unsigned
if tt.Size() == 4 {
return s.uint32Tofloat32(n, x, ft, tt)
}
if tt.Size() == 8 {
return s.uint32Tofloat64(n, x, ft, tt)
}
} else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() {
// ft is float32 or float64, and tt is unsigned integer
if ft.Size() == 4 {
return s.float32ToUint32(n, x, ft, tt)
}
if ft.Size() == 8 {
return s.float64ToUint32(n, x, ft, tt)
}
}
}
if !ok {
s.Fatalf("weird float conversion %v -> %v", ft, tt)
}
op1, op2, it := conv.op1, conv.op2, conv.intermediateType
if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid {
// normal case, not tripping over unsigned 64
if op1 == ssa.OpCopy {
if op2 == ssa.OpCopy {
return x
}
return s.newValueOrSfCall1(op2, n.Type(), x)
}
if op2 == ssa.OpCopy {
return s.newValueOrSfCall1(op1, n.Type(), x)
}
return s.newValueOrSfCall1(op2, n.Type(), s.newValueOrSfCall1(op1, types.Types[it], x))
}
// Tricky 64-bit unsigned cases.
if ft.IsInteger() {
// tt is float32 or float64, and ft is also unsigned
if tt.Size() == 4 {
return s.uint64Tofloat32(n, x, ft, tt)
}
if tt.Size() == 8 {
return s.uint64Tofloat64(n, x, ft, tt)
}
s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt)
}
// ft is float32 or float64, and tt is unsigned integer
if ft.Size() == 4 {
return s.float32ToUint64(n, x, ft, tt)
}
if ft.Size() == 8 {
return s.float64ToUint64(n, x, ft, tt)
}
s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt)
return nil
}
if ft.IsComplex() && tt.IsComplex() {
var op ssa.Op
if ft.Size() == tt.Size() {
switch ft.Size() {
case 8:
op = ssa.OpRound32F
case 16:
op = ssa.OpRound64F
default:
s.Fatalf("weird complex conversion %v -> %v", ft, tt)
}
} else if ft.Size() == 8 && tt.Size() == 16 {
op = ssa.OpCvt32Fto64F
} else if ft.Size() == 16 && tt.Size() == 8 {
op = ssa.OpCvt64Fto32F
} else {
s.Fatalf("weird complex conversion %v -> %v", ft, tt)
}
ftp := types.FloatForComplex(ft)
ttp := types.FloatForComplex(tt)
return s.newValue2(ssa.OpComplexMake, tt,
s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)),
s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
}
s.Fatalf("unhandled OCONV %s -> %s", n.X.Type().Kind(), n.Type().Kind())
return nil
case ir.ODOTTYPE:
n := n.(*ir.TypeAssertExpr)
res, _ := s.dottype(n, false)
return res
case ir.ODYNAMICDOTTYPE:
n := n.(*ir.DynamicTypeAssertExpr)
res, _ := s.dynamicDottype(n, false)
return res
// binary ops
case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
n := n.(*ir.BinaryExpr)
a := s.expr(n.X)
b := s.expr(n.Y)
if n.X.Type().IsComplex() {
pt := types.FloatForComplex(n.X.Type())
op := s.ssaOp(ir.OEQ, pt)
r := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
i := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
c := s.newValue2(ssa.OpAndB, types.Types[types.TBOOL], r, i)
switch n.Op() {
case ir.OEQ:
return c
case ir.ONE:
return s.newValue1(ssa.OpNot, types.Types[types.TBOOL], c)
default:
s.Fatalf("ordered complex compare %v", n.Op())
}
}
// Convert OGE and OGT into OLE and OLT.
op := n.Op()
switch op {
case ir.OGE:
op, a, b = ir.OLE, b, a
case ir.OGT:
op, a, b = ir.OLT, b, a
}
if n.X.Type().IsFloat() {
// float comparison
return s.newValueOrSfCall2(s.ssaOp(op, n.X.Type()), types.Types[types.TBOOL], a, b)
}
// integer comparison
return s.newValue2(s.ssaOp(op, n.X.Type()), types.Types[types.TBOOL], a, b)
case ir.OMUL:
n := n.(*ir.BinaryExpr)
a := s.expr(n.X)
b := s.expr(n.Y)
if n.Type().IsComplex() {
mulop := ssa.OpMul64F
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
pt := types.FloatForComplex(n.Type()) // Could be Float32 or Float64
wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
breal := s.newValue1(ssa.OpComplexReal, pt, b)
aimag := s.newValue1(ssa.OpComplexImag, pt, a)
bimag := s.newValue1(ssa.OpComplexImag, pt, b)
if pt != wt { // Widen for calculation
areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
}
xreal := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
ximag := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, bimag), s.newValueOrSfCall2(mulop, wt, aimag, breal))
if pt != wt { // Narrow to store back
xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
}
return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
}
if n.Type().IsFloat() {
return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
}
return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
case ir.ODIV:
n := n.(*ir.BinaryExpr)
a := s.expr(n.X)
b := s.expr(n.Y)
if n.Type().IsComplex() {
// TODO this is not executed because the front-end substitutes a runtime call.
// That probably ought to change; with modest optimization the widen/narrow
// conversions could all be elided in larger expression trees.
mulop := ssa.OpMul64F
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
divop := ssa.OpDiv64F
pt := types.FloatForComplex(n.Type()) // Could be Float32 or Float64
wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
breal := s.newValue1(ssa.OpComplexReal, pt, b)
aimag := s.newValue1(ssa.OpComplexImag, pt, a)
bimag := s.newValue1(ssa.OpComplexImag, pt, b)
if pt != wt { // Widen for calculation
areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
}
denom := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, breal, breal), s.newValueOrSfCall2(mulop, wt, bimag, bimag))
xreal := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
ximag := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, aimag, breal), s.newValueOrSfCall2(mulop, wt, areal, bimag))
// TODO not sure if this is best done in wide precision or narrow
// Double-rounding might be an issue.
// Note that the pre-SSA implementation does the entire calculation
// in wide format, so wide is compatible.
xreal = s.newValueOrSfCall2(divop, wt, xreal, denom)
ximag = s.newValueOrSfCall2(divop, wt, ximag, denom)
if pt != wt { // Narrow to store back
xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
}
return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
}
if n.Type().IsFloat() {
return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
}
return s.intDivide(n, a, b)
case ir.OMOD:
n := n.(*ir.BinaryExpr)
a := s.expr(n.X)
b := s.expr(n.Y)
return s.intDivide(n, a, b)
case ir.OADD, ir.OSUB:
n := n.(*ir.BinaryExpr)
a := s.expr(n.X)
b := s.expr(n.Y)
if n.Type().IsComplex() {
pt := types.FloatForComplex(n.Type())
op := s.ssaOp(n.Op(), pt)
return s.newValue2(ssa.OpComplexMake, n.Type(),
s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
}
if n.Type().IsFloat() {
return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
}
return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
case ir.OAND, ir.OOR, ir.OXOR:
n := n.(*ir.BinaryExpr)
a := s.expr(n.X)
b := s.expr(n.Y)
return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
case ir.OANDNOT:
n := n.(*ir.BinaryExpr)
a := s.expr(n.X)
b := s.expr(n.Y)
b = s.newValue1(s.ssaOp(ir.OBITNOT, b.Type), b.Type, b)
return s.newValue2(s.ssaOp(ir.OAND, n.Type()), a.Type, a, b)
case ir.OLSH, ir.ORSH:
n := n.(*ir.BinaryExpr)
a := s.expr(n.X)
b := s.expr(n.Y)
bt := b.Type
if bt.IsSigned() {
cmp := s.newValue2(s.ssaOp(ir.OLE, bt), types.Types[types.TBOOL], s.zeroVal(bt), b)
s.check(cmp, ir.Syms.Panicshift)
bt = bt.ToUnsigned()
}
return s.newValue2(s.ssaShiftOp(n.Op(), n.Type(), bt), a.Type, a, b)
case ir.OANDAND, ir.OOROR:
// To implement OANDAND (and OOROR), we introduce a
// new temporary variable to hold the result. The
// variable is associated with the OANDAND node in the
// s.vars table (normally variables are only
// associated with ONAME nodes). We convert
// A && B
// to
// var = A
// if var {
// var = B
// }
// Using var in the subsequent block introduces the
// necessary phi variable.
n := n.(*ir.LogicalExpr)
el := s.expr(n.X)
s.vars[n] = el
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(el)
// In theory, we should set b.Likely here based on context.
// However, gc only gives us likeliness hints
// in a single place, for plain OIF statements,
// and passing around context is finnicky, so don't bother for now.
bRight := s.f.NewBlock(ssa.BlockPlain)
bResult := s.f.NewBlock(ssa.BlockPlain)
if n.Op() == ir.OANDAND {
b.AddEdgeTo(bRight)
b.AddEdgeTo(bResult)
} else if n.Op() == ir.OOROR {
b.AddEdgeTo(bResult)
b.AddEdgeTo(bRight)
}
s.startBlock(bRight)
er := s.expr(n.Y)
s.vars[n] = er
b = s.endBlock()
b.AddEdgeTo(bResult)
s.startBlock(bResult)
return s.variable(n, types.Types[types.TBOOL])
case ir.OCOMPLEX:
n := n.(*ir.BinaryExpr)
r := s.expr(n.X)
i := s.expr(n.Y)
return s.newValue2(ssa.OpComplexMake, n.Type(), r, i)
// unary ops
case ir.ONEG:
n := n.(*ir.UnaryExpr)
a := s.expr(n.X)
if n.Type().IsComplex() {
tp := types.FloatForComplex(n.Type())
negop := s.ssaOp(n.Op(), tp)
return s.newValue2(ssa.OpComplexMake, n.Type(),
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
}
return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
case ir.ONOT, ir.OBITNOT:
n := n.(*ir.UnaryExpr)
a := s.expr(n.X)
return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
case ir.OIMAG, ir.OREAL:
n := n.(*ir.UnaryExpr)
a := s.expr(n.X)
return s.newValue1(s.ssaOp(n.Op(), n.X.Type()), n.Type(), a)
case ir.OPLUS:
n := n.(*ir.UnaryExpr)
return s.expr(n.X)
case ir.OADDR:
n := n.(*ir.AddrExpr)
return s.addr(n.X)
case ir.ORESULT:
n := n.(*ir.ResultExpr)
if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
panic("Expected to see a previous call")
}
which := n.Index
if which == -1 {
panic(fmt.Errorf("ORESULT %v does not match call %s", n, s.prevCall))
}
return s.resultOfCall(s.prevCall, which, n.Type())
case ir.ODEREF:
n := n.(*ir.StarExpr)
p := s.exprPtr(n.X, n.Bounded(), n.Pos())
return s.load(n.Type(), p)
case ir.ODOT:
n := n.(*ir.SelectorExpr)
if n.X.Op() == ir.OSTRUCTLIT {
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
if !ir.IsZero(n.X) {
s.Fatalf("literal with nonzero value in SSA: %v", n.X)
}
return s.zeroVal(n.Type())
}
// If n is addressable and can't be represented in
// SSA, then load just the selected field. This
// prevents false memory dependencies in race/msan
// instrumentation.
if ir.IsAddressable(n) && !s.canSSA(n) {
p := s.addr(n)
return s.load(n.Type(), p)
}
v := s.expr(n.X)
return s.newValue1I(ssa.OpStructSelect, n.Type(), int64(fieldIdx(n)), v)
case ir.ODOTPTR:
n := n.(*ir.SelectorExpr)
p := s.exprPtr(n.X, n.Bounded(), n.Pos())
p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type()), n.Offset(), p)
return s.load(n.Type(), p)
case ir.OINDEX:
n := n.(*ir.IndexExpr)
switch {
case n.X.Type().IsString():
if n.Bounded() && ir.IsConst(n.X, constant.String) && ir.IsConst(n.Index, constant.Int) {
// Replace "abc"[1] with 'b'.
// Delayed until now because "abc"[1] is not an ideal constant.
// See test/fixedbugs/issue11370.go.
return s.newValue0I(ssa.OpConst8, types.Types[types.TUINT8], int64(int8(ir.StringVal(n.X)[ir.Int64Val(n.Index)])))
}
a := s.expr(n.X)
i := s.expr(n.Index)
len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], a)
i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
ptrtyp := s.f.Config.Types.BytePtr
ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
if ir.IsConst(n.Index, constant.Int) {
ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, ir.Int64Val(n.Index), ptr)
} else {
ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
}
return s.load(types.Types[types.TUINT8], ptr)
case n.X.Type().IsSlice():
p := s.addr(n)
return s.load(n.X.Type().Elem(), p)
case n.X.Type().IsArray():
if TypeOK(n.X.Type()) {
// SSA can handle arrays of length at most 1.
bound := n.X.Type().NumElem()
a := s.expr(n.X)
i := s.expr(n.Index)
if bound == 0 {
// Bounds check will never succeed. Might as well
// use constants for the bounds check.
z := s.constInt(types.Types[types.TINT], 0)
s.boundsCheck(z, z, ssa.BoundsIndex, false)
// The return value won't be live, return junk.
// But not quite junk, in case bounds checks are turned off. See issue 48092.
return s.zeroVal(n.Type())
}
len := s.constInt(types.Types[types.TINT], bound)
s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) // checks i == 0
return s.newValue1I(ssa.OpArraySelect, n.Type(), 0, a)
}
p := s.addr(n)
return s.load(n.X.Type().Elem(), p)
default:
s.Fatalf("bad type for index %v", n.X.Type())
return nil
}
case ir.OLEN, ir.OCAP:
n := n.(*ir.UnaryExpr)
switch {
case n.X.Type().IsSlice():
op := ssa.OpSliceLen
if n.Op() == ir.OCAP {
op = ssa.OpSliceCap
}
return s.newValue1(op, types.Types[types.TINT], s.expr(n.X))
case n.X.Type().IsString(): // string; not reachable for OCAP
return s.newValue1(ssa.OpStringLen, types.Types[types.TINT], s.expr(n.X))
case n.X.Type().IsMap(), n.X.Type().IsChan():
return s.referenceTypeBuiltin(n, s.expr(n.X))
default: // array
return s.constInt(types.Types[types.TINT], n.X.Type().NumElem())
}
case ir.OSPTR:
n := n.(*ir.UnaryExpr)
a := s.expr(n.X)
if n.X.Type().IsSlice() {
return s.newValue1(ssa.OpSlicePtr, n.Type(), a)
} else {
return s.newValue1(ssa.OpStringPtr, n.Type(), a)
}
case ir.OITAB:
n := n.(*ir.UnaryExpr)
a := s.expr(n.X)
return s.newValue1(ssa.OpITab, n.Type(), a)
case ir.OIDATA:
n := n.(*ir.UnaryExpr)
a := s.expr(n.X)
return s.newValue1(ssa.OpIData, n.Type(), a)
case ir.OEFACE:
n := n.(*ir.BinaryExpr)
tab := s.expr(n.X)
data := s.expr(n.Y)
return s.newValue2(ssa.OpIMake, n.Type(), tab, data)
case ir.OSLICEHEADER:
n := n.(*ir.SliceHeaderExpr)
p := s.expr(n.Ptr)
l := s.expr(n.Len)
c := s.expr(n.Cap)
return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR:
n := n.(*ir.SliceExpr)
v := s.expr(n.X)
var i, j, k *ssa.Value
if n.Low != nil {
i = s.expr(n.Low)
}
if n.High != nil {
j = s.expr(n.High)
}
if n.Max != nil {
k = s.expr(n.Max)
}
p, l, c := s.slice(v, i, j, k, n.Bounded())
if n.CheckPtrCall != nil {
s.stmt(n.CheckPtrCall)
}
return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
case ir.OSLICESTR:
n := n.(*ir.SliceExpr)
v := s.expr(n.X)
var i, j *ssa.Value
if n.Low != nil {
i = s.expr(n.Low)
}
if n.High != nil {
j = s.expr(n.High)
}
p, l, _ := s.slice(v, i, j, nil, n.Bounded())
return s.newValue2(ssa.OpStringMake, n.Type(), p, l)
case ir.OSLICE2ARRPTR:
// if arrlen > slice.len {
// panic(...)
// }
// slice.ptr
n := n.(*ir.ConvExpr)
v := s.expr(n.X)
arrlen := s.constInt(types.Types[types.TINT], n.Type().Elem().NumElem())
cap := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], v)
s.boundsCheck(arrlen, cap, ssa.BoundsConvert, false)
return s.newValue1(ssa.OpSlicePtrUnchecked, n.Type(), v)
case ir.OCALLFUNC:
n := n.(*ir.CallExpr)
if ir.IsIntrinsicCall(n) {
return s.intrinsicCall(n)
}
fallthrough
case ir.OCALLINTER:
n := n.(*ir.CallExpr)
return s.callResult(n, callNormal)
case ir.OGETG:
n := n.(*ir.CallExpr)
return s.newValue1(ssa.OpGetG, n.Type(), s.mem())
case ir.OGETCALLERPC:
n := n.(*ir.CallExpr)
return s.newValue0(ssa.OpGetCallerPC, n.Type())
case ir.OGETCALLERSP:
n := n.(*ir.CallExpr)
return s.newValue0(ssa.OpGetCallerSP, n.Type())
case ir.OAPPEND:
return s.append(n.(*ir.CallExpr), false)
case ir.OSTRUCTLIT, ir.OARRAYLIT:
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
n := n.(*ir.CompLitExpr)
if !ir.IsZero(n) {
s.Fatalf("literal with nonzero value in SSA: %v", n)
}
return s.zeroVal(n.Type())
case ir.ONEW:
n := n.(*ir.UnaryExpr)
return s.newObject(n.Type().Elem())
case ir.OUNSAFEADD:
n := n.(*ir.BinaryExpr)
ptr := s.expr(n.X)
len := s.expr(n.Y)
return s.newValue2(ssa.OpAddPtr, n.Type(), ptr, len)
default:
s.Fatalf("unhandled expr %v", n.Op())
return nil
}
}
func (s *state) resultOfCall(c *ssa.Value, which int64, t *types.Type) *ssa.Value {
aux := c.Aux.(*ssa.AuxCall)
pa := aux.ParamAssignmentForResult(which)
// TODO(register args) determine if in-memory TypeOK is better loaded early from SelectNAddr or later when SelectN is expanded.
// SelectN is better for pattern-matching and possible call-aware analysis we might want to do in the future.
if len(pa.Registers) == 0 && !TypeOK(t) {
addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), which, c)
return s.rawLoad(t, addr)
}
return s.newValue1I(ssa.OpSelectN, t, which, c)
}
func (s *state) resultAddrOfCall(c *ssa.Value, which int64, t *types.Type) *ssa.Value {
aux := c.Aux.(*ssa.AuxCall)
pa := aux.ParamAssignmentForResult(which)
if len(pa.Registers) == 0 {
return s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), which, c)
}
_, addr := s.temp(c.Pos, t)
rval := s.newValue1I(ssa.OpSelectN, t, which, c)
s.vars[memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, addr, rval, s.mem(), false)
return addr
}
// append converts an OAPPEND node to SSA.
// If inplace is false, it converts the OAPPEND expression n to an ssa.Value,
// adds it to s, and returns the Value.
// If inplace is true, it writes the result of the OAPPEND expression n
// back to the slice being appended to, and returns nil.
// inplace MUST be set to false if the slice can be SSA'd.
func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value {
// If inplace is false, process as expression "append(s, e1, e2, e3)":
//
// ptr, len, cap := s
// newlen := len + 3
// if newlen > cap {
// ptr, len, cap = growslice(s, newlen)
// newlen = len + 3 // recalculate to avoid a spill
// }
// // with write barriers, if needed:
// *(ptr+len) = e1
// *(ptr+len+1) = e2
// *(ptr+len+2) = e3
// return makeslice(ptr, newlen, cap)
//
//
// If inplace is true, process as statement "s = append(s, e1, e2, e3)":
//
// a := &s
// ptr, len, cap := s
// newlen := len + 3
// if uint(newlen) > uint(cap) {
// newptr, len, newcap = growslice(ptr, len, cap, newlen)
// vardef(a) // if necessary, advise liveness we are writing a new a
// *a.cap = newcap // write before ptr to avoid a spill
// *a.ptr = newptr // with write barrier
// }
// newlen = len + 3 // recalculate to avoid a spill
// *a.len = newlen
// // with write barriers, if needed:
// *(ptr+len) = e1
// *(ptr+len+1) = e2
// *(ptr+len+2) = e3
et := n.Type().Elem()
pt := types.NewPtr(et)
// Evaluate slice
sn := n.Args[0] // the slice node is the first in the list
var slice, addr *ssa.Value
if inplace {
addr = s.addr(sn)
slice = s.load(n.Type(), addr)
} else {
slice = s.expr(sn)
}
// Allocate new blocks
grow := s.f.NewBlock(ssa.BlockPlain)
assign := s.f.NewBlock(ssa.BlockPlain)
// Decide if we need to grow
nargs := int64(len(n.Args) - 1)
p := s.newValue1(ssa.OpSlicePtr, pt, slice)
l := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
c := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], slice)
nl := s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, s.constInt(types.Types[types.TINT], nargs))
cmp := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT]), types.Types[types.TBOOL], c, nl)
s.vars[ptrVar] = p
if !inplace {
s.vars[newlenVar] = nl
s.vars[capVar] = c
} else {
s.vars[lenVar] = l
}
b := s.endBlock()
b.Kind = ssa.BlockIf
b.Likely = ssa.BranchUnlikely
b.SetControl(cmp)
b.AddEdgeTo(grow)
b.AddEdgeTo(assign)
// Call growslice
s.startBlock(grow)
taddr := s.expr(n.X)
r := s.rtcall(ir.Syms.Growslice, true, []*types.Type{pt, types.Types[types.TINT], types.Types[types.TINT]}, taddr, p, l, c, nl)
if inplace {
if sn.Op() == ir.ONAME {
sn := sn.(*ir.Name)
if sn.Class != ir.PEXTERN {
// Tell liveness we're about to build a new slice
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
}
}
capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, types.SliceCapOffset, addr)
s.store(types.Types[types.TINT], capaddr, r[2])
s.store(pt, addr, r[0])
// load the value we just stored to avoid having to spill it
s.vars[ptrVar] = s.load(pt, addr)
s.vars[lenVar] = r[1] // avoid a spill in the fast path
} else {
s.vars[ptrVar] = r[0]
s.vars[newlenVar] = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], r[1], s.constInt(types.Types[types.TINT], nargs))
s.vars[capVar] = r[2]
}
b = s.endBlock()
b.AddEdgeTo(assign)
// assign new elements to slots
s.startBlock(assign)
if inplace {
l = s.variable(lenVar, types.Types[types.TINT]) // generates phi for len
nl = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, s.constInt(types.Types[types.TINT], nargs))
lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, types.SliceLenOffset, addr)
s.store(types.Types[types.TINT], lenaddr, nl)
}
// Evaluate args
type argRec struct {
// if store is true, we're appending the value v. If false, we're appending the
// value at *v.
v *ssa.Value
store bool
}
args := make([]argRec, 0, nargs)
for _, n := range n.Args[1:] {
if TypeOK(n.Type()) {
args = append(args, argRec{v: s.expr(n), store: true})
} else {
v := s.addr(n)
args = append(args, argRec{v: v})
}
}
p = s.variable(ptrVar, pt) // generates phi for ptr
if !inplace {
nl = s.variable(newlenVar, types.Types[types.TINT]) // generates phi for nl
c = s.variable(capVar, types.Types[types.TINT]) // generates phi for cap
}
p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
for i, arg := range args {
addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[types.TINT], int64(i)))
if arg.store {
s.storeType(et, addr, arg.v, 0, true)
} else {
s.move(et, addr, arg.v)
}
}
delete(s.vars, ptrVar)
if inplace {
delete(s.vars, lenVar)
return nil
}
delete(s.vars, newlenVar)
delete(s.vars, capVar)
// make result
return s.newValue3(ssa.OpSliceMake, n.Type(), p, nl, c)
}
// condBranch evaluates the boolean expression cond and branches to yes
// if cond is true and no if cond is false.
// This function is intended to handle && and || better than just calling
// s.expr(cond) and branching on the result.
func (s *state) condBranch(cond ir.Node, yes, no *ssa.Block, likely int8) {
switch cond.Op() {
case ir.OANDAND:
cond := cond.(*ir.LogicalExpr)
mid := s.f.NewBlock(ssa.BlockPlain)
s.stmtList(cond.Init())
s.condBranch(cond.X, mid, no, max8(likely, 0))
s.startBlock(mid)
s.condBranch(cond.Y, yes, no, likely)
return
// Note: if likely==1, then both recursive calls pass 1.
// If likely==-1, then we don't have enough information to decide
// whether the first branch is likely or not. So we pass 0 for
// the likeliness of the first branch.
// TODO: have the frontend give us branch prediction hints for
// OANDAND and OOROR nodes (if it ever has such info).
case ir.OOROR:
cond := cond.(*ir.LogicalExpr)
mid := s.f.NewBlock(ssa.BlockPlain)
s.stmtList(cond.Init())
s.condBranch(cond.X, yes, mid, min8(likely, 0))
s.startBlock(mid)
s.condBranch(cond.Y, yes, no, likely)
return
// Note: if likely==-1, then both recursive calls pass -1.
// If likely==1, then we don't have enough info to decide
// the likelihood of the first branch.
case ir.ONOT:
cond := cond.(*ir.UnaryExpr)
s.stmtList(cond.Init())
s.condBranch(cond.X, no, yes, -likely)
return
case ir.OCONVNOP:
cond := cond.(*ir.ConvExpr)
s.stmtList(cond.Init())
s.condBranch(cond.X, yes, no, likely)
return
}
c := s.expr(cond)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(c)
b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness
b.AddEdgeTo(yes)
b.AddEdgeTo(no)
}
type skipMask uint8
const (
skipPtr skipMask = 1 << iota
skipLen
skipCap
)
// assign does left = right.
// Right has already been evaluated to ssa, left has not.
// If deref is true, then we do left = *right instead (and right has already been nil-checked).
// If deref is true and right == nil, just do left = 0.
// skip indicates assignments (at the top level) that can be avoided.
func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask) {
if left.Op() == ir.ONAME && ir.IsBlank(left) {
return
}
t := left.Type()
types.CalcSize(t)
if s.canSSA(left) {
if deref {
s.Fatalf("can SSA LHS %v but not RHS %s", left, right)
}
if left.Op() == ir.ODOT {
// We're assigning to a field of an ssa-able value.
// We need to build a new structure with the new value for the
// field we're assigning and the old values for the other fields.
// For instance:
// type T struct {a, b, c int}
// var T x
// x.b = 5
// For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
// Grab information about the structure type.
left := left.(*ir.SelectorExpr)
t := left.X.Type()
nf := t.NumFields()
idx := fieldIdx(left)
// Grab old value of structure.
old := s.expr(left.X)
// Make new structure.
new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
// Add fields as args.
for i := 0; i < nf; i++ {
if i == idx {
new.AddArg(right)
} else {
new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old))
}
}
// Recursively assign the new value we've made to the base of the dot op.
s.assign(left.X, new, false, 0)
// TODO: do we need to update named values here?
return
}
if left.Op() == ir.OINDEX && left.(*ir.IndexExpr).X.Type().IsArray() {
left := left.(*ir.IndexExpr)
s.pushLine(left.Pos())
defer s.popLine()
// We're assigning to an element of an ssa-able array.
// a[i] = v
t := left.X.Type()
n := t.NumElem()
i := s.expr(left.Index) // index
if n == 0 {
// The bounds check must fail. Might as well
// ignore the actual index and just use zeros.
z := s.constInt(types.Types[types.TINT], 0)
s.boundsCheck(z, z, ssa.BoundsIndex, false)
return
}
if n != 1 {
s.Fatalf("assigning to non-1-length array")
}
// Rewrite to a = [1]{v}
len := s.constInt(types.Types[types.TINT], 1)
s.boundsCheck(i, len, ssa.BoundsIndex, false) // checks i == 0
v := s.newValue1(ssa.OpArrayMake1, t, right)
s.assign(left.X, v, false, 0)
return
}
left := left.(*ir.Name)
// Update variable assignment.
s.vars[left] = right
s.addNamedValue(left, right)
return
}
// If this assignment clobbers an entire local variable, then emit
// OpVarDef so liveness analysis knows the variable is redefined.
if base, ok := clobberBase(left).(*ir.Name); ok && base.OnStack() && skip == 0 {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !ir.IsAutoTmp(base))
}
// Left is not ssa-able. Compute its address.
addr := s.addr(left)
if ir.IsReflectHeaderDataField(left) {
// Package unsafe's documentation says storing pointers into
// reflect.SliceHeader and reflect.StringHeader's Data fields
// is valid, even though they have type uintptr (#19168).
// Mark it pointer type to signal the writebarrier pass to
// insert a write barrier.
t = types.Types[types.TUNSAFEPTR]
}
if deref {
// Treat as a mem->mem move.
if right == nil {
s.zero(t, addr)
} else {
s.move(t, addr, right)
}
return
}
// Treat as a store.
s.storeType(t, addr, right, skip, !ir.IsAutoTmp(left))
}
// zeroVal returns the zero value for type t.
func (s *state) zeroVal(t *types.Type) *ssa.Value {
switch {
case t.IsInteger():
switch t.Size() {
case 1:
return s.constInt8(t, 0)
case 2:
return s.constInt16(t, 0)
case 4:
return s.constInt32(t, 0)
case 8:
return s.constInt64(t, 0)
default:
s.Fatalf("bad sized integer type %v", t)
}
case t.IsFloat():
switch t.Size() {
case 4:
return s.constFloat32(t, 0)
case 8:
return s.constFloat64(t, 0)
default:
s.Fatalf("bad sized float type %v", t)
}
case t.IsComplex():
switch t.Size() {
case 8:
z := s.constFloat32(types.Types[types.TFLOAT32], 0)
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
case 16:
z := s.constFloat64(types.Types[types.TFLOAT64], 0)
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
default:
s.Fatalf("bad sized complex type %v", t)
}
case t.IsString():
return s.constEmptyString(t)
case t.IsPtrShaped():
return s.constNil(t)
case t.IsBoolean():
return s.constBool(false)
case t.IsInterface():
return s.constInterface(t)
case t.IsSlice():
return s.constSlice(t)
case t.IsStruct():
n := t.NumFields()
v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
for i := 0; i < n; i++ {
v.AddArg(s.zeroVal(t.FieldType(i)))
}
return v
case t.IsArray():
switch t.NumElem() {
case 0:
return s.entryNewValue0(ssa.OpArrayMake0, t)
case 1:
return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem()))
}
}
s.Fatalf("zero for type %v not implemented", t)
return nil
}
type callKind int8
const (
callNormal callKind = iota
callDefer
callDeferStack
callGo
)
type sfRtCallDef struct {
rtfn *obj.LSym
rtype types.Kind
}
var softFloatOps map[ssa.Op]sfRtCallDef
func softfloatInit() {
// Some of these operations get transformed by sfcall.
softFloatOps = map[ssa.Op]sfRtCallDef{
ssa.OpAdd32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd32"), types.TFLOAT32},
ssa.OpAdd64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd64"), types.TFLOAT64},
ssa.OpSub32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd32"), types.TFLOAT32},
ssa.OpSub64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd64"), types.TFLOAT64},
ssa.OpMul32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fmul32"), types.TFLOAT32},
ssa.OpMul64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fmul64"), types.TFLOAT64},
ssa.OpDiv32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fdiv32"), types.TFLOAT32},
ssa.OpDiv64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fdiv64"), types.TFLOAT64},
ssa.OpEq64F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq64"), types.TBOOL},
ssa.OpEq32F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq32"), types.TBOOL},
ssa.OpNeq64F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq64"), types.TBOOL},
ssa.OpNeq32F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq32"), types.TBOOL},
ssa.OpLess64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fgt64"), types.TBOOL},
ssa.OpLess32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fgt32"), types.TBOOL},
ssa.OpLeq64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fge64"), types.TBOOL},
ssa.OpLeq32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fge32"), types.TBOOL},
ssa.OpCvt32to32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint32to32"), types.TFLOAT32},
ssa.OpCvt32Fto32: sfRtCallDef{typecheck.LookupRuntimeFunc("f32toint32"), types.TINT32},
ssa.OpCvt64to32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint64to32"), types.TFLOAT32},
ssa.OpCvt32Fto64: sfRtCallDef{typecheck.LookupRuntimeFunc("f32toint64"), types.TINT64},
ssa.OpCvt64Uto32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fuint64to32"), types.TFLOAT32},
ssa.OpCvt32Fto64U: sfRtCallDef{typecheck.LookupRuntimeFunc("f32touint64"), types.TUINT64},
ssa.OpCvt32to64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint32to64"), types.TFLOAT64},
ssa.OpCvt64Fto32: sfRtCallDef{typecheck.LookupRuntimeFunc("f64toint32"), types.TINT32},
ssa.OpCvt64to64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint64to64"), types.TFLOAT64},
ssa.OpCvt64Fto64: sfRtCallDef{typecheck.LookupRuntimeFunc("f64toint64"), types.TINT64},
ssa.OpCvt64Uto64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fuint64to64"), types.TFLOAT64},
ssa.OpCvt64Fto64U: sfRtCallDef{typecheck.LookupRuntimeFunc("f64touint64"), types.TUINT64},
ssa.OpCvt32Fto64F: sfRtCallDef{typecheck.LookupRuntimeFunc("f32to64"), types.TFLOAT64},
ssa.OpCvt64Fto32F: sfRtCallDef{typecheck.LookupRuntimeFunc("f64to32"), types.TFLOAT32},
}
}
// TODO: do not emit sfcall if operation can be optimized to constant in later
// opt phase
func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) {
f2i := func(t *types.Type) *types.Type {
switch t.Kind() {
case types.TFLOAT32:
return types.Types[types.TUINT32]
case types.TFLOAT64:
return types.Types[types.TUINT64]
}
return t
}
if callDef, ok := softFloatOps[op]; ok {
switch op {
case ssa.OpLess32F,
ssa.OpLess64F,
ssa.OpLeq32F,
ssa.OpLeq64F:
args[0], args[1] = args[1], args[0]
case ssa.OpSub32F,
ssa.OpSub64F:
args[1] = s.newValue1(s.ssaOp(ir.ONEG, types.Types[callDef.rtype]), args[1].Type, args[1])
}
// runtime functions take uints for floats and returns uints.
// Convert to uints so we use the right calling convention.
for i, a := range args {
if a.Type.IsFloat() {
args[i] = s.newValue1(ssa.OpCopy, f2i(a.Type), a)
}
}
rt := types.Types[callDef.rtype]
result := s.rtcall(callDef.rtfn, true, []*types.Type{f2i(rt)}, args...)[0]
if rt.IsFloat() {
result = s.newValue1(ssa.OpCopy, rt, result)
}
if op == ssa.OpNeq32F || op == ssa.OpNeq64F {
result = s.newValue1(ssa.OpNot, result.Type, result)
}
return result, true
}
return nil, false
}
var intrinsics map[intrinsicKey]intrinsicBuilder
// An intrinsicBuilder converts a call node n into an ssa value that
// implements that call as an intrinsic. args is a list of arguments to the func.
type intrinsicBuilder func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value
type intrinsicKey struct {
arch *sys.Arch
pkg string
fn string
}
func InitTables() {
intrinsics = map[intrinsicKey]intrinsicBuilder{}
var all []*sys.Arch
var p4 []*sys.Arch
var p8 []*sys.Arch
var lwatomics []*sys.Arch
for _, a := range &sys.Archs {
all = append(all, a)
if a.PtrSize == 4 {
p4 = append(p4, a)
} else {
p8 = append(p8, a)
}
if a.Family != sys.PPC64 {
lwatomics = append(lwatomics, a)
}
}
// add adds the intrinsic b for pkg.fn for the given list of architectures.
add := func(pkg, fn string, b intrinsicBuilder, archs ...*sys.Arch) {
for _, a := range archs {
intrinsics[intrinsicKey{a, pkg, fn}] = b
}
}
// addF does the same as add but operates on architecture families.
addF := func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily) {
m := 0
for _, f := range archFamilies {
if f >= 32 {
panic("too many architecture families")
}
m |= 1 << uint(f)
}
for _, a := range all {
if m>>uint(a.Family)&1 != 0 {
intrinsics[intrinsicKey{a, pkg, fn}] = b
}
}
}
// alias defines pkg.fn = pkg2.fn2 for all architectures in archs for which pkg2.fn2 exists.
alias := func(pkg, fn, pkg2, fn2 string, archs ...*sys.Arch) {
aliased := false
for _, a := range archs {
if b, ok := intrinsics[intrinsicKey{a, pkg2, fn2}]; ok {
intrinsics[intrinsicKey{a, pkg, fn}] = b
aliased = true
}
}
if !aliased {
panic(fmt.Sprintf("attempted to alias undefined intrinsic: %s.%s", pkg, fn))
}
}
/******** runtime ********/
if !base.Flag.Cfg.Instrumenting {
add("runtime", "slicebytetostringtmp",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
// Compiler frontend optimizations emit OBYTES2STRTMP nodes
// for the backend instead of slicebytetostringtmp calls
// when not instrumenting.
return s.newValue2(ssa.OpStringMake, n.Type(), args[0], args[1])
},
all...)
}
addF("runtime/internal/math", "MulUintptr",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
}
return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
},
sys.AMD64, sys.I386, sys.MIPS64, sys.RISCV64)
add("runtime", "KeepAlive",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
s.vars[memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
return nil
},
all...)
add("runtime", "getclosureptr",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr)
},
all...)
add("runtime", "getcallerpc",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr)
},
all...)
add("runtime", "getcallersp",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr)
},
all...)
/******** runtime/internal/sys ********/
addF("runtime/internal/sys", "Ctz32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/sys", "Ctz64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/sys", "Bswap32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBswap32, types.Types[types.TUINT32], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
addF("runtime/internal/sys", "Bswap64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBswap64, types.Types[types.TUINT64], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
/****** Prefetch ******/
makePrefetchFunc := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue2(op, types.TypeMem, args[0], s.mem())
return nil
}
}
// Make Prefetch intrinsics for supported platforms
// On the unsupported platforms stub function will be eliminated
addF("runtime/internal/sys", "Prefetch", makePrefetchFunc(ssa.OpPrefetchCache),
sys.AMD64, sys.ARM64)
addF("runtime/internal/sys", "PrefetchStreamed", makePrefetchFunc(ssa.OpPrefetchCacheStreamed),
sys.AMD64, sys.ARM64)
/******** runtime/internal/atomic ********/
addF("runtime/internal/atomic", "Load",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Load8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[types.TUINT8], types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT8], v)
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Load64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "LoadAcq",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "LoadAcq64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.PPC64)
addF("runtime/internal/atomic", "Loadp",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "StorepNoWB",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "StoreRel",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "StoreRel64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.PPC64)
addF("runtime/internal/atomic", "Xchg",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Xchg64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
type atomicOpEmitter func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind)
makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.Kind, emit atomicOpEmitter) intrinsicBuilder {
return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
// Target Atomic feature is identified by dynamic detection
addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), ir.Syms.ARM64HasATOMICS, s.sb)
v := s.load(types.Types[types.TBOOL], addr)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
bTrue := s.f.NewBlock(ssa.BlockPlain)
bFalse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bTrue)
b.AddEdgeTo(bFalse)
b.Likely = ssa.BranchLikely
// We have atomic instructions - use it directly.
s.startBlock(bTrue)
emit(s, n, args, op1, typ)
s.endBlock().AddEdgeTo(bEnd)
// Use original instruction sequence.
s.startBlock(bFalse)
emit(s, n, args, op0, typ)
s.endBlock().AddEdgeTo(bEnd)
// Merge results.
s.startBlock(bEnd)
if rtyp == types.TNIL {
return nil
} else {
return s.variable(n, types.Types[rtyp])
}
}
}
atomicXchgXaddEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
}
addF("runtime/internal/atomic", "Xchg",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange32, ssa.OpAtomicExchange32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Xchg64",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange64, ssa.OpAtomicExchange64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Xadd",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Xadd64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Xadd",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Xadd64",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Cas",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
},
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Cas64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
},
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "CasRel",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
},
sys.PPC64)
atomicCasEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
v := s.newValue4(op, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
}
addF("runtime/internal/atomic", "Cas",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap32, ssa.OpAtomicCompareAndSwap32Variant, types.TUINT32, types.TBOOL, atomicCasEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Cas64",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap64, ssa.OpAtomicCompareAndSwap64Variant, types.TUINT64, types.TBOOL, atomicCasEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "And8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "And",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Or8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Or",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
atomicAndOrEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
s.vars[memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem())
}
addF("runtime/internal/atomic", "And8",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd8, ssa.OpAtomicAnd8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "And",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd32, ssa.OpAtomicAnd32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Or8",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr8, ssa.OpAtomicOr8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Or",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
// Aliases for atomic load operations
alias("runtime/internal/atomic", "Loadint32", "runtime/internal/atomic", "Load", all...)
alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...)
alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...)
alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...)
alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", p4...)
alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...)
alias("runtime/internal/atomic", "LoadAcq", "runtime/internal/atomic", "Load", lwatomics...)
alias("runtime/internal/atomic", "LoadAcq64", "runtime/internal/atomic", "Load64", lwatomics...)
alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...)
alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...) // linknamed
alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...)
alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...) // linknamed
// Aliases for atomic store operations
alias("runtime/internal/atomic", "Storeint32", "runtime/internal/atomic", "Store", all...)
alias("runtime/internal/atomic", "Storeint64", "runtime/internal/atomic", "Store64", all...)
alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...)
alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...)
alias("runtime/internal/atomic", "StoreRel", "runtime/internal/atomic", "Store", lwatomics...)
alias("runtime/internal/atomic", "StoreRel64", "runtime/internal/atomic", "Store64", lwatomics...)
alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...)
alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...) // linknamed
alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...)
alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...) // linknamed
// Aliases for atomic swap operations
alias("runtime/internal/atomic", "Xchgint32", "runtime/internal/atomic", "Xchg", all...)
alias("runtime/internal/atomic", "Xchgint64", "runtime/internal/atomic", "Xchg64", all...)
alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...)
alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...)
// Aliases for atomic add operations
alias("runtime/internal/atomic", "Xaddint32", "runtime/internal/atomic", "Xadd", all...)
alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...)
alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...)
alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...)
// Aliases for atomic CAS operations
alias("runtime/internal/atomic", "Casint32", "runtime/internal/atomic", "Cas", all...)
alias("runtime/internal/atomic", "Casint64", "runtime/internal/atomic", "Cas64", all...)
alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...)
alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...)
alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...)
alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...)
alias("runtime/internal/atomic", "CasRel", "runtime/internal/atomic", "Cas", lwatomics...)
/******** math ********/
addF("math", "Sqrt",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpSqrt, types.Types[types.TFLOAT64], args[0])
},
sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm)
addF("math", "Trunc",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpTrunc, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
addF("math", "Ceil",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCeil, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
addF("math", "Floor",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpFloor, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
addF("math", "Round",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpRound, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X)
addF("math", "RoundToEven",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpRoundToEven, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.S390X, sys.Wasm)
addF("math", "Abs",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpAbs, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.ARM, sys.PPC64, sys.Wasm)
addF("math", "Copysign",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpCopysign, types.Types[types.TFLOAT64], args[0], args[1])
},
sys.PPC64, sys.Wasm)
addF("math", "FMA",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
},
sys.ARM64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("math", "FMA",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if !s.config.UseFMA {
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
return s.variable(n, types.Types[types.TFLOAT64])
}
v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasFMA)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
bTrue := s.f.NewBlock(ssa.BlockPlain)
bFalse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bTrue)
b.AddEdgeTo(bFalse)
b.Likely = ssa.BranchLikely // >= haswell cpus are common
// We have the intrinsic - use it directly.
s.startBlock(bTrue)
s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
s.startBlock(bFalse)
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
s.endBlock().AddEdgeTo(bEnd)
// Merge results.
s.startBlock(bEnd)
return s.variable(n, types.Types[types.TFLOAT64])
},
sys.AMD64)
addF("math", "FMA",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if !s.config.UseFMA {
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
return s.variable(n, types.Types[types.TFLOAT64])
}
addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), ir.Syms.ARMHasVFPv4, s.sb)
v := s.load(types.Types[types.TBOOL], addr)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
bTrue := s.f.NewBlock(ssa.BlockPlain)
bFalse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bTrue)
b.AddEdgeTo(bFalse)
b.Likely = ssa.BranchLikely
// We have the intrinsic - use it directly.
s.startBlock(bTrue)
s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
s.startBlock(bFalse)
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
s.endBlock().AddEdgeTo(bEnd)
// Merge results.
s.startBlock(bEnd)
return s.variable(n, types.Types[types.TFLOAT64])
},
sys.ARM)
makeRoundAMD64 := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasSSE41)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
bTrue := s.f.NewBlock(ssa.BlockPlain)
bFalse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bTrue)
b.AddEdgeTo(bFalse)
b.Likely = ssa.BranchLikely // most machines have sse4.1 nowadays
// We have the intrinsic - use it directly.
s.startBlock(bTrue)
s.vars[n] = s.newValue1(op, types.Types[types.TFLOAT64], args[0])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
s.startBlock(bFalse)
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
s.endBlock().AddEdgeTo(bEnd)
// Merge results.
s.startBlock(bEnd)
return s.variable(n, types.Types[types.TFLOAT64])
}
}
addF("math", "RoundToEven",
makeRoundAMD64(ssa.OpRoundToEven),
sys.AMD64)
addF("math", "Floor",
makeRoundAMD64(ssa.OpFloor),
sys.AMD64)
addF("math", "Ceil",
makeRoundAMD64(ssa.OpCeil),
sys.AMD64)
addF("math", "Trunc",
makeRoundAMD64(ssa.OpTrunc),
sys.AMD64)
/******** math/bits ********/
addF("math/bits", "TrailingZeros64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "TrailingZeros32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "TrailingZeros16",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
c := s.constInt32(types.Types[types.TUINT32], 1<<16)
y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y)
},
sys.MIPS)
addF("math/bits", "TrailingZeros16",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz16, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.I386, sys.ARM, sys.ARM64, sys.Wasm)
addF("math/bits", "TrailingZeros16",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
c := s.constInt64(types.Types[types.TUINT64], 1<<16)
y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y)
},
sys.S390X, sys.PPC64)
addF("math/bits", "TrailingZeros8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
c := s.constInt32(types.Types[types.TUINT32], 1<<8)
y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y)
},
sys.MIPS)
addF("math/bits", "TrailingZeros8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz8, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM, sys.ARM64, sys.Wasm)
addF("math/bits", "TrailingZeros8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
c := s.constInt64(types.Types[types.TUINT64], 1<<8)
y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y)
},
sys.S390X)
alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...)
alias("math/bits", "ReverseBytes32", "runtime/internal/sys", "Bswap32", all...)
// ReverseBytes inlines correctly, no need to intrinsify it.
// ReverseBytes16 lowers to a rotate, no need for anything special here.
addF("math/bits", "Len64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64)
addF("math/bits", "Len32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
}
x := s.newValue1(ssa.OpZeroExt32to64, types.Types[types.TUINT64], args[0])
return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
},
sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len16",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
}
x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
},
sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len16",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen16, types.Types[types.TINT], args[0])
},
sys.AMD64)
addF("math/bits", "Len8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
}
x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
},
sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen8, types.Types[types.TINT], args[0])
},
sys.AMD64)
addF("math/bits", "Len",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
}
return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
// LeadingZeros is handled because it trivially calls Len.
addF("math/bits", "Reverse64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse16",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev16, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev8, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
}
return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "RotateLeft8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpRotateLeft8, types.Types[types.TUINT8], args[0], args[1])
},
sys.AMD64)
addF("math/bits", "RotateLeft16",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpRotateLeft16, types.Types[types.TUINT16], args[0], args[1])
},
sys.AMD64)
addF("math/bits", "RotateLeft32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpRotateLeft32, types.Types[types.TUINT32], args[0], args[1])
},
sys.AMD64, sys.ARM, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
addF("math/bits", "RotateLeft64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpRotateLeft64, types.Types[types.TUINT64], args[0], args[1])
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
alias("math/bits", "RotateLeft", "math/bits", "RotateLeft64", p8...)
makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasPOPCNT)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
bTrue := s.f.NewBlock(ssa.BlockPlain)
bFalse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bTrue)
b.AddEdgeTo(bFalse)
b.Likely = ssa.BranchLikely // most machines have popcnt nowadays
// We have the intrinsic - use it directly.
s.startBlock(bTrue)
op := op64
if s.config.PtrSize == 4 {
op = op32
}
s.vars[n] = s.newValue1(op, types.Types[types.TINT], args[0])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
s.startBlock(bFalse)
s.vars[n] = s.callResult(n, callNormal) // types.Types[TINT]
s.endBlock().AddEdgeTo(bEnd)
// Merge results.
s.startBlock(bEnd)
return s.variable(n, types.Types[types.TINT])
}
}
addF("math/bits", "OnesCount64",
makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount64),
sys.AMD64)
addF("math/bits", "OnesCount64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpPopCount64, types.Types[types.TINT], args[0])
},
sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
addF("math/bits", "OnesCount32",
makeOnesCountAMD64(ssa.OpPopCount32, ssa.OpPopCount32),
sys.AMD64)
addF("math/bits", "OnesCount32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpPopCount32, types.Types[types.TINT], args[0])
},
sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
addF("math/bits", "OnesCount16",
makeOnesCountAMD64(ssa.OpPopCount16, ssa.OpPopCount16),
sys.AMD64)
addF("math/bits", "OnesCount16",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpPopCount16, types.Types[types.TINT], args[0])
},
sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
addF("math/bits", "OnesCount8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpPopCount8, types.Types[types.TINT], args[0])
},
sys.S390X, sys.PPC64, sys.Wasm)
addF("math/bits", "OnesCount",
makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32),
sys.AMD64)
addF("math/bits", "Mul64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
},
sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64, sys.RISCV64)
alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE, sys.ArchRISCV64)
alias("runtime/internal/math", "Mul64", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE, sys.ArchRISCV64)
addF("math/bits", "Add64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
},
sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X)
alias("math/bits", "Add", "math/bits", "Add64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X)
addF("math/bits", "Sub64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
},
sys.AMD64, sys.ARM64, sys.S390X)
alias("math/bits", "Sub", "math/bits", "Sub64", sys.ArchAMD64, sys.ArchARM64, sys.ArchS390X)
addF("math/bits", "Div64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
// check for divide-by-zero/overflow and panic with appropriate message
cmpZero := s.newValue2(s.ssaOp(ir.ONE, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[2], s.zeroVal(types.Types[types.TUINT64]))
s.check(cmpZero, ir.Syms.Panicdivide)
cmpOverflow := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[0], args[2])
s.check(cmpOverflow, ir.Syms.Panicoverflow)
return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
},
sys.AMD64)
alias("math/bits", "Div", "math/bits", "Div64", sys.ArchAMD64)
alias("runtime/internal/sys", "Ctz8", "math/bits", "TrailingZeros8", all...)
alias("runtime/internal/sys", "TrailingZeros8", "math/bits", "TrailingZeros8", all...)
alias("runtime/internal/sys", "TrailingZeros64", "math/bits", "TrailingZeros64", all...)
alias("runtime/internal/sys", "Len8", "math/bits", "Len8", all...)
alias("runtime/internal/sys", "Len64", "math/bits", "Len64", all...)
alias("runtime/internal/sys", "OnesCount64", "math/bits", "OnesCount64", all...)
/******** sync/atomic ********/
// Note: these are disabled by flag_race in findIntrinsic below.
alias("sync/atomic", "LoadInt32", "runtime/internal/atomic", "Load", all...)
alias("sync/atomic", "LoadInt64", "runtime/internal/atomic", "Load64", all...)
alias("sync/atomic", "LoadPointer", "runtime/internal/atomic", "Loadp", all...)
alias("sync/atomic", "LoadUint32", "runtime/internal/atomic", "Load", all...)
alias("sync/atomic", "LoadUint64", "runtime/internal/atomic", "Load64", all...)
alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load", p4...)
alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load64", p8...)
alias("sync/atomic", "StoreInt32", "runtime/internal/atomic", "Store", all...)
alias("sync/atomic", "StoreInt64", "runtime/internal/atomic", "Store64", all...)
// Note: not StorePointer, that needs a write barrier. Same below for {CompareAnd}Swap.
alias("sync/atomic", "StoreUint32", "runtime/internal/atomic", "Store", all...)
alias("sync/atomic", "StoreUint64", "runtime/internal/atomic", "Store64", all...)
alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store", p4...)
alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store64", p8...)
alias("sync/atomic", "SwapInt32", "runtime/internal/atomic", "Xchg", all...)
alias("sync/atomic", "SwapInt64", "runtime/internal/atomic", "Xchg64", all...)
alias("sync/atomic", "SwapUint32", "runtime/internal/atomic", "Xchg", all...)
alias("sync/atomic", "SwapUint64", "runtime/internal/atomic", "Xchg64", all...)
alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg", p4...)
alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg64", p8...)
alias("sync/atomic", "CompareAndSwapInt32", "runtime/internal/atomic", "Cas", all...)
alias("sync/atomic", "CompareAndSwapInt64", "runtime/internal/atomic", "Cas64", all...)
alias("sync/atomic", "CompareAndSwapUint32", "runtime/internal/atomic", "Cas", all...)
alias("sync/atomic", "CompareAndSwapUint64", "runtime/internal/atomic", "Cas64", all...)
alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas", p4...)
alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas64", p8...)
alias("sync/atomic", "AddInt32", "runtime/internal/atomic", "Xadd", all...)
alias("sync/atomic", "AddInt64", "runtime/internal/atomic", "Xadd64", all...)
alias("sync/atomic", "AddUint32", "runtime/internal/atomic", "Xadd", all...)
alias("sync/atomic", "AddUint64", "runtime/internal/atomic", "Xadd64", all...)
alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd", p4...)
alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd64", p8...)
/******** math/big ********/
add("math/big", "mulWW",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
},
sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64LE, sys.ArchPPC64, sys.ArchS390X)
}
// findIntrinsic returns a function which builds the SSA equivalent of the
// function identified by the symbol sym. If sym is not an intrinsic call, returns nil.
func findIntrinsic(sym *types.Sym) intrinsicBuilder {
if sym == nil || sym.Pkg == nil {
return nil
}
pkg := sym.Pkg.Path
if sym.Pkg == types.LocalPkg {
pkg = base.Ctxt.Pkgpath
}
if sym.Pkg == ir.Pkgs.Runtime {
pkg = "runtime"
}
if base.Flag.Race && pkg == "sync/atomic" {
// The race detector needs to be able to intercept these calls.
// We can't intrinsify them.
return nil
}
// Skip intrinsifying math functions (which may contain hard-float
// instructions) when soft-float
if Arch.SoftFloat && pkg == "math" {
return nil
}
fn := sym.Name
if ssa.IntrinsicsDisable {
if pkg == "runtime" && (fn == "getcallerpc" || fn == "getcallersp" || fn == "getclosureptr") {
// These runtime functions don't have definitions, must be intrinsics.
} else {
return nil
}
}
return intrinsics[intrinsicKey{Arch.LinkArch.Arch, pkg, fn}]
}
func IsIntrinsicCall(n *ir.CallExpr) bool {
if n == nil {
return false
}
name, ok := n.X.(*ir.Name)
if !ok {
return false
}
return findIntrinsic(name.Sym()) != nil
}
// intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
func (s *state) intrinsicCall(n *ir.CallExpr) *ssa.Value {
v := findIntrinsic(n.X.Sym())(s, n, s.intrinsicArgs(n))
if ssa.IntrinsicsDebug > 0 {
x := v
if x == nil {
x = s.mem()
}
if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
x = x.Args[0]
}
base.WarnfAt(n.Pos(), "intrinsic substitution for %v with %s", n.X.Sym().Name, x.LongString())
}
return v
}
// intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value {
args := make([]*ssa.Value, len(n.Args))
for i, n := range n.Args {
args[i] = s.expr(n)
}
return args
}
// openDeferRecord adds code to evaluate and store the function for an open-code defer
// call, and records info about the defer, so we can generate proper code on the
// exit paths. n is the sub-node of the defer node that is the actual function
// call. We will also record funcdata information on where the function is stored
// (as well as the deferBits variable), and this will enable us to run the proper
// defer calls during panics.
func (s *state) openDeferRecord(n *ir.CallExpr) {
if len(n.Args) != 0 || n.Op() != ir.OCALLFUNC || n.X.Type().NumResults() != 0 {
s.Fatalf("defer call with arguments or results: %v", n)
}
opendefer := &openDeferInfo{
n: n,
}
fn := n.X
// We must always store the function value in a stack slot for the
// runtime panic code to use. But in the defer exit code, we will
// call the function directly if it is a static function.
closureVal := s.expr(fn)
closure := s.openDeferSave(fn.Type(), closureVal)
opendefer.closureNode = closure.Aux.(*ir.Name)
if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC) {
opendefer.closure = closure
}
index := len(s.openDefers)
s.openDefers = append(s.openDefers, opendefer)
// Update deferBits only after evaluation and storage to stack of
// the function is successful.
bitvalue := s.constInt8(types.Types[types.TUINT8], 1<<uint(index))
newDeferBits := s.newValue2(ssa.OpOr8, types.Types[types.TUINT8], s.variable(deferBitsVar, types.Types[types.TUINT8]), bitvalue)
s.vars[deferBitsVar] = newDeferBits
s.store(types.Types[types.TUINT8], s.deferBitsAddr, newDeferBits)
}
// openDeferSave generates SSA nodes to store a value (with type t) for an
// open-coded defer at an explicit autotmp location on the stack, so it can be
// reloaded and used for the appropriate call on exit. Type t must be a function type
// (therefore SSAable). val is the value to be stored. The function returns an SSA
// value representing a pointer to the autotmp location.
func (s *state) openDeferSave(t *types.Type, val *ssa.Value) *ssa.Value {
if !TypeOK(t) {
s.Fatalf("openDeferSave of non-SSA-able type %v val=%v", t, val)
}
if !t.HasPointers() {
s.Fatalf("openDeferSave of pointerless type %v val=%v", t, val)
}
pos := val.Pos
temp := typecheck.TempAt(pos.WithNotStmt(), s.curfn, t)
temp.SetOpenDeferSlot(true)
var addrTemp *ssa.Value
// Use OpVarLive to make sure stack slot for the closure is not removed by
// dead-store elimination
if s.curBlock.ID != s.f.Entry.ID {
// Force the tmp storing this defer function to be declared in the entry
// block, so that it will be live for the defer exit code (which will
// actually access it only if the associated defer call has been activated).
s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarDef, types.TypeMem, temp, s.defvars[s.f.Entry.ID][memVar])
s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarLive, types.TypeMem, temp, s.defvars[s.f.Entry.ID][memVar])
addrTemp = s.f.Entry.NewValue2A(src.NoXPos, ssa.OpLocalAddr, types.NewPtr(temp.Type()), temp, s.sp, s.defvars[s.f.Entry.ID][memVar])
} else {
// Special case if we're still in the entry block. We can't use
// the above code, since s.defvars[s.f.Entry.ID] isn't defined
// until we end the entry block with s.endBlock().
s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, temp, s.mem(), false)
s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, temp, s.mem(), false)
addrTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(temp.Type()), temp, s.sp, s.mem(), false)
}
// Since we may use this temp during exit depending on the
// deferBits, we must define it unconditionally on entry.
// Therefore, we must make sure it is zeroed out in the entry
// block if it contains pointers, else GC may wrongly follow an
// uninitialized pointer value.
temp.SetNeedzero(true)
// We are storing to the stack, hence we can avoid the full checks in
// storeType() (no write barrier) and do a simple store().
s.store(t, addrTemp, val)
return addrTemp
}
// openDeferExit generates SSA for processing all the open coded defers at exit.
// The code involves loading deferBits, and checking each of the bits to see if
// the corresponding defer statement was executed. For each bit that is turned
// on, the associated defer call is made.
func (s *state) openDeferExit() {
deferExit := s.f.NewBlock(ssa.BlockPlain)
s.endBlock().AddEdgeTo(deferExit)
s.startBlock(deferExit)
s.lastDeferExit = deferExit
s.lastDeferCount = len(s.openDefers)
zeroval := s.constInt8(types.Types[types.TUINT8], 0)
// Test for and run defers in reverse order
for i := len(s.openDefers) - 1; i >= 0; i-- {
r := s.openDefers[i]
bCond := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
deferBits := s.variable(deferBitsVar, types.Types[types.TUINT8])
// Generate code to check if the bit associated with the current
// defer is set.
bitval := s.constInt8(types.Types[types.TUINT8], 1<<uint(i))
andval := s.newValue2(ssa.OpAnd8, types.Types[types.TUINT8], deferBits, bitval)
eqVal := s.newValue2(ssa.OpEq8, types.Types[types.TBOOL], andval, zeroval)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(eqVal)
b.AddEdgeTo(bEnd)
b.AddEdgeTo(bCond)
bCond.AddEdgeTo(bEnd)
s.startBlock(bCond)
// Clear this bit in deferBits and force store back to stack, so
// we will not try to re-run this defer call if this defer call panics.
nbitval := s.newValue1(ssa.OpCom8, types.Types[types.TUINT8], bitval)
maskedval := s.newValue2(ssa.OpAnd8, types.Types[types.TUINT8], deferBits, nbitval)
s.store(types.Types[types.TUINT8], s.deferBitsAddr, maskedval)
// Use this value for following tests, so we keep previous
// bits cleared.
s.vars[deferBitsVar] = maskedval
// Generate code to call the function call of the defer, using the
// closure that were stored in argtmps at the point of the defer
// statement.
fn := r.n.X
stksize := fn.Type().ArgWidth()
var callArgs []*ssa.Value
var call *ssa.Value
if r.closure != nil {
v := s.load(r.closure.Type.Elem(), r.closure)
s.maybeNilCheckClosure(v, callDefer)
codeptr := s.rawLoad(types.Types[types.TUINTPTR], v)
aux := ssa.ClosureAuxCall(s.f.ABIDefault.ABIAnalyzeTypes(nil, nil, nil))
call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v)
} else {
aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), s.f.ABIDefault.ABIAnalyzeTypes(nil, nil, nil))
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
}
callArgs = append(callArgs, s.mem())
call.AddArgs(callArgs...)
call.AuxInt = stksize
s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, 0, call)
// Make sure that the stack slots with pointers are kept live
// through the call (which is a pre-emption point). Also, we will
// use the first call of the last defer exit to compute liveness
// for the deferreturn, so we want all stack slots to be live.
if r.closureNode != nil {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
}
s.endBlock()
s.startBlock(bEnd)
}
}
func (s *state) callResult(n *ir.CallExpr, k callKind) *ssa.Value {
return s.call(n, k, false)
}
func (s *state) callAddr(n *ir.CallExpr, k callKind) *ssa.Value {
return s.call(n, k, true)
}
// Calls the function n using the specified call type.
// Returns the address of the return value (or nil if none).
func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Value {
s.prevCall = nil
var callee *ir.Name // target function (if static)
var closure *ssa.Value // ptr to closure to run (if dynamic)
var codeptr *ssa.Value // ptr to target code (if dynamic)
var rcvr *ssa.Value // receiver to set
fn := n.X
var ACArgs []*types.Type // AuxCall args
var ACResults []*types.Type // AuxCall results
var callArgs []*ssa.Value // For late-expansion, the args themselves (not stored, args to the call instead).
callABI := s.f.ABIDefault
if !buildcfg.Experiment.RegabiArgs {
var magicFnNameSym *types.Sym
if fn.Name() != nil {
magicFnNameSym = fn.Name().Sym()
ss := magicFnNameSym.Name
if strings.HasSuffix(ss, magicNameDotSuffix) {
callABI = s.f.ABI1
}
}
if magicFnNameSym == nil && n.Op() == ir.OCALLINTER {
magicFnNameSym = fn.(*ir.SelectorExpr).Sym()
ss := magicFnNameSym.Name
if strings.HasSuffix(ss, magicNameDotSuffix[1:]) {
callABI = s.f.ABI1
}
}
}
if k != callNormal && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) {
s.Fatalf("go/defer call with arguments: %v", n)
}
switch n.Op() {
case ir.OCALLFUNC:
if k == callNormal && fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC {
fn := fn.(*ir.Name)
callee = fn
if buildcfg.Experiment.RegabiArgs {
// This is a static call, so it may be
// a direct call to a non-ABIInternal
// function. fn.Func may be nil for
// some compiler-generated functions,
// but those are all ABIInternal.
if fn.Func != nil {
callABI = abiForFunc(fn.Func, s.f.ABI0, s.f.ABI1)
}
} else {
// TODO(register args) remove after register abi is working
inRegistersImported := fn.Pragma()&ir.RegisterParams != 0
inRegistersSamePackage := fn.Func != nil && fn.Func.Pragma&ir.RegisterParams != 0
if inRegistersImported || inRegistersSamePackage {
callABI = s.f.ABI1
}
}
break
}
closure = s.expr(fn)
if k != callDefer && k != callDeferStack {
// Deferred nil function needs to panic when the function is invoked,
// not the point of defer statement.
s.maybeNilCheckClosure(closure, k)
}
case ir.OCALLINTER:
if fn.Op() != ir.ODOTINTER {
s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
}
fn := fn.(*ir.SelectorExpr)
var iclosure *ssa.Value
iclosure, rcvr = s.getClosureAndRcvr(fn)
if k == callNormal {
codeptr = s.load(types.Types[types.TUINTPTR], iclosure)
} else {
closure = iclosure
}
}
if !buildcfg.Experiment.RegabiArgs {
if regAbiForFuncType(n.X.Type().FuncType()) {
// Magic last type in input args to call
callABI = s.f.ABI1
}
}
params := callABI.ABIAnalyze(n.X.Type(), false /* Do not set (register) nNames from caller side -- can cause races. */)
types.CalcSize(fn.Type())
stksize := params.ArgWidth() // includes receiver, args, and results
res := n.X.Type().Results()
if k == callNormal {
for _, p := range params.OutParams() {
ACResults = append(ACResults, p.Type)
}
}
var call *ssa.Value
if k == callDeferStack {
// Make a defer struct d on the stack.
if stksize != 0 {
s.Fatalf("deferprocStack with non-zero stack size %d: %v", stksize, n)
}
t := deferstruct()
d := typecheck.TempAt(n.Pos(), s.curfn, t)
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
addr := s.addr(d)
// Must match deferstruct() below and src/runtime/runtime2.go:_defer.
// 0: started, set in deferprocStack
// 1: heap, set in deferprocStack
// 2: openDefer
// 3: sp, set in deferprocStack
// 4: pc, set in deferprocStack
// 5: fn
s.store(closure.Type,
s.newValue1I(ssa.OpOffPtr, closure.Type.PtrTo(), t.FieldOff(5), addr),
closure)
// 6: panic, set in deferprocStack
// 7: link, set in deferprocStack
// 8: fd
// 9: varp
// 10: framepc
// Call runtime.deferprocStack with pointer to _defer record.
ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
aux := ssa.StaticAuxCall(ir.Syms.DeferprocStack, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
callArgs = append(callArgs, addr, s.mem())
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
call.AddArgs(callArgs...)
call.AuxInt = int64(types.PtrSize) // deferprocStack takes a *_defer arg
} else {
// Store arguments to stack, including defer/go arguments and receiver for method calls.
// These are written in SP-offset order.
argStart := base.Ctxt.FixedFrameSize()
// Defer/go args.
if k != callNormal {
// Write closure (arg to newproc/deferproc).
ACArgs = append(ACArgs, types.Types[types.TUINTPTR]) // not argExtra
callArgs = append(callArgs, closure)
stksize += int64(types.PtrSize)
argStart += int64(types.PtrSize)
}
// Set receiver (for interface calls).
if rcvr != nil {
callArgs = append(callArgs, rcvr)
}
// Write args.
t := n.X.Type()
args := n.Args
for _, p := range params.InParams() { // includes receiver for interface calls
ACArgs = append(ACArgs, p.Type)
}
for i, n := range args {
callArgs = append(callArgs, s.putArg(n, t.Params().Field(i).Type))
}
callArgs = append(callArgs, s.mem())
// call target
switch {
case k == callDefer:
aux := ssa.StaticAuxCall(ir.Syms.Deferproc, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults)) // TODO paramResultInfo for DeferProc
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
case k == callGo:
aux := ssa.StaticAuxCall(ir.Syms.Newproc, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) // TODO paramResultInfo for NewProc
case closure != nil:
// rawLoad because loading the code pointer from a
// closure is always safe, but IsSanitizerSafeAddr
// can't always figure that out currently, and it's
// critical that we not clobber any arguments already
// stored onto the stack.
codeptr = s.rawLoad(types.Types[types.TUINTPTR], closure)
aux := ssa.ClosureAuxCall(callABI.ABIAnalyzeTypes(nil, ACArgs, ACResults))
call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, closure)
case codeptr != nil:
// Note that the "receiver" parameter is nil because the actual receiver is the first input parameter.
aux := ssa.InterfaceAuxCall(params)
call = s.newValue1A(ssa.OpInterLECall, aux.LateExpansionResultType(), aux, codeptr)
case callee != nil:
aux := ssa.StaticAuxCall(callTargetLSym(callee), params)
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
default:
s.Fatalf("bad call type %v %v", n.Op(), n)
}
call.AddArgs(callArgs...)
call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
}
s.prevCall = call
s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
// Insert OVARLIVE nodes
for _, name := range n.KeepAlive {
s.stmt(ir.NewUnaryExpr(n.Pos(), ir.OVARLIVE, name))
}
// Finish block for defers
if k == callDefer || k == callDeferStack {
b := s.endBlock()
b.Kind = ssa.BlockDefer
b.SetControl(call)
bNext := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bNext)
// Add recover edge to exit code.
r := s.f.NewBlock(ssa.BlockPlain)
s.startBlock(r)
s.exit()
b.AddEdgeTo(r)
b.Likely = ssa.BranchLikely
s.startBlock(bNext)
}
if res.NumFields() == 0 || k != callNormal {
// call has no return value. Continue with the next statement.
return nil
}
fp := res.Field(0)
if returnResultAddr {
return s.resultAddrOfCall(call, 0, fp.Type)
}
return s.newValue1I(ssa.OpSelectN, fp.Type, 0, call)
}
// maybeNilCheckClosure checks if a nil check of a closure is needed in some
// architecture-dependent situations and, if so, emits the nil check.
func (s *state) maybeNilCheckClosure(closure *ssa.Value, k callKind) {
if Arch.LinkArch.Family == sys.Wasm || buildcfg.GOOS == "aix" && k != callGo {
// On AIX, the closure needs to be verified as fn can be nil, except if it's a call go. This needs to be handled by the runtime to have the "go of nil func value" error.
// TODO(neelance): On other architectures this should be eliminated by the optimization steps
s.nilCheck(closure)
}
}
// getClosureAndRcvr returns values for the appropriate closure and receiver of an
// interface call
func (s *state) getClosureAndRcvr(fn *ir.SelectorExpr) (*ssa.Value, *ssa.Value) {
i := s.expr(fn.X)
itab := s.newValue1(ssa.OpITab, types.Types[types.TUINTPTR], i)
s.nilCheck(itab)
itabidx := fn.Offset() + 2*int64(types.PtrSize) + 8 // offset of fun field in runtime.itab
closure := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
rcvr := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, i)
return closure, rcvr
}
// etypesign returns the signed-ness of e, for integer/pointer etypes.
// -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
func etypesign(e types.Kind) int8 {
switch e {
case types.TINT8, types.TINT16, types.TINT32, types.TINT64, types.TINT:
return -1
case types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINT, types.TUINTPTR, types.TUNSAFEPTR:
return +1
}
return 0
}
// addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
// The value that the returned Value represents is guaranteed to be non-nil.
func (s *state) addr(n ir.Node) *ssa.Value {
if n.Op() != ir.ONAME {
s.pushLine(n.Pos())
defer s.popLine()
}
if s.canSSA(n) {
s.Fatalf("addr of canSSA expression: %+v", n)
}
t := types.NewPtr(n.Type())
linksymOffset := func(lsym *obj.LSym, offset int64) *ssa.Value {
v := s.entryNewValue1A(ssa.OpAddr, t, lsym, s.sb)
// TODO: Make OpAddr use AuxInt as well as Aux.
if offset != 0 {
v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, offset, v)
}
return v
}
switch n.Op() {
case ir.OLINKSYMOFFSET:
no := n.(*ir.LinksymOffsetExpr)
return linksymOffset(no.Linksym, no.Offset_)
case ir.ONAME:
n := n.(*ir.Name)
if n.Heapaddr != nil {
return s.expr(n.Heapaddr)
}
switch n.Class {
case ir.PEXTERN:
// global variable
return linksymOffset(n.Linksym(), 0)
case ir.PPARAM:
// parameter slot
v := s.decladdrs[n]
if v != nil {
return v
}
s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
return nil
case ir.PAUTO:
return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), !ir.IsAutoTmp(n))
case ir.PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
// ensure that we reuse symbols for out parameters so
// that cse works on their addresses
return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), true)
default:
s.Fatalf("variable address class %v not implemented", n.Class)
return nil
}
case ir.ORESULT:
// load return from callee
n := n.(*ir.ResultExpr)
return s.resultAddrOfCall(s.prevCall, n.Index, n.Type())
case ir.OINDEX:
n := n.(*ir.IndexExpr)
if n.X.Type().IsSlice() {
a := s.expr(n.X)
i := s.expr(n.Index)
len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], a)
i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
p := s.newValue1(ssa.OpSlicePtr, t, a)
return s.newValue2(ssa.OpPtrIndex, t, p, i)
} else { // array
a := s.addr(n.X)
i := s.expr(n.Index)
len := s.constInt(types.Types[types.TINT], n.X.Type().NumElem())
i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.X.Type().Elem()), a, i)
}
case ir.ODEREF:
n := n.(*ir.StarExpr)
return s.exprPtr(n.X, n.Bounded(), n.Pos())
case ir.ODOT:
n := n.(*ir.SelectorExpr)
p := s.addr(n.X)
return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
case ir.ODOTPTR:
n := n.(*ir.SelectorExpr)
p := s.exprPtr(n.X, n.Bounded(), n.Pos())
return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
case ir.OCONVNOP:
n := n.(*ir.ConvExpr)
if n.Type() == n.X.Type() {
return s.addr(n.X)
}
addr := s.addr(n.X)
return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
case ir.OCALLFUNC, ir.OCALLINTER:
n := n.(*ir.CallExpr)
return s.callAddr(n, callNormal)
case ir.ODOTTYPE, ir.ODYNAMICDOTTYPE:
var v *ssa.Value
if n.Op() == ir.ODOTTYPE {
v, _ = s.dottype(n.(*ir.TypeAssertExpr), false)
} else {
v, _ = s.dynamicDottype(n.(*ir.DynamicTypeAssertExpr), false)
}
if v.Op != ssa.OpLoad {
s.Fatalf("dottype of non-load")
}
if v.Args[1] != s.mem() {
s.Fatalf("memory no longer live from dottype load")
}
return v.Args[0]
default:
s.Fatalf("unhandled addr %v", n.Op())
return nil
}
}
// canSSA reports whether n is SSA-able.
// n must be an ONAME (or an ODOT sequence with an ONAME base).
func (s *state) canSSA(n ir.Node) bool {
if base.Flag.N != 0 {
return false
}
for {
nn := n
if nn.Op() == ir.ODOT {
nn := nn.(*ir.SelectorExpr)
n = nn.X
continue
}
if nn.Op() == ir.OINDEX {
nn := nn.(*ir.IndexExpr)
if nn.X.Type().IsArray() {
n = nn.X
continue
}
}
break
}
if n.Op() != ir.ONAME {
return false
}
return s.canSSAName(n.(*ir.Name)) && TypeOK(n.Type())
}
func (s *state) canSSAName(name *ir.Name) bool {
if name.Addrtaken() || !name.OnStack() {
return false
}
switch name.Class {
case ir.PPARAMOUT:
if s.hasdefer {
// TODO: handle this case? Named return values must be
// in memory so that the deferred function can see them.
// Maybe do: if !strings.HasPrefix(n.String(), "~") { return false }
// Or maybe not, see issue 18860. Even unnamed return values
// must be written back so if a defer recovers, the caller can see them.
return false
}
if s.cgoUnsafeArgs {
// Cgo effectively takes the address of all result args,
// but the compiler can't see that.
return false
}
}
if name.Class == ir.PPARAM && name.Sym() != nil && name.Sym().Name == ".this" {
// wrappers generated by genwrapper need to update
// the .this pointer in place.
// TODO: treat as a PPARAMOUT?
return false
}
return true
// TODO: try to make more variables SSAable?
}
// TypeOK reports whether variables of type t are SSA-able.
func TypeOK(t *types.Type) bool {
types.CalcSize(t)
if t.Size() > int64(4*types.PtrSize) {
// 4*Widthptr is an arbitrary constant. We want it
// to be at least 3*Widthptr so slices can be registerized.
// Too big and we'll introduce too much register pressure.
return false
}
switch t.Kind() {
case types.TARRAY:
// We can't do larger arrays because dynamic indexing is
// not supported on SSA variables.
// TODO: allow if all indexes are constant.
if t.NumElem() <= 1 {
return TypeOK(t.Elem())
}
return false
case types.TSTRUCT:
if t.NumFields() > ssa.MaxStruct {
return false
}
for _, t1 := range t.Fields().Slice() {
if !TypeOK(t1.Type) {
return false
}
}
return true
default:
return true
}
}
// exprPtr evaluates n to a pointer and nil-checks it.
func (s *state) exprPtr(n ir.Node, bounded bool, lineno src.XPos) *ssa.Value {
p := s.expr(n)
if bounded || n.NonNil() {
if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 {
s.f.Warnl(lineno, "removed nil check")
}
return p
}
s.nilCheck(p)
return p
}
// nilCheck generates nil pointer checking code.
// Used only for automatically inserted nil checks,
// not for user code like 'x != nil'.
func (s *state) nilCheck(ptr *ssa.Value) {
if base.Debug.DisableNil != 0 || s.curfn.NilCheckDisabled() {
return
}
s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem())
}
// boundsCheck generates bounds checking code. Checks if 0 <= idx <[=] len, branches to exit if not.
// Starts a new block on return.
// On input, len must be converted to full int width and be nonnegative.
// Returns idx converted to full int width.
// If bounded is true then caller guarantees the index is not out of bounds
// (but boundsCheck will still extend the index to full int width).
func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
idx = s.extendIndex(idx, len, kind, bounded)
if bounded || base.Flag.B != 0 {
// If bounded or bounds checking is flag-disabled, then no check necessary,
// just return the extended index.
//
// Here, bounded == true if the compiler generated the index itself,
// such as in the expansion of a slice initializer. These indexes are
// compiler-generated, not Go program variables, so they cannot be
// attacker-controlled, so we can omit Spectre masking as well.
//
// Note that we do not want to omit Spectre masking in code like:
//
// if 0 <= i && i < len(x) {
// use(x[i])
// }
//
// Lucky for us, bounded==false for that code.
// In that case (handled below), we emit a bound check (and Spectre mask)
// and then the prove pass will remove the bounds check.
// In theory the prove pass could potentially remove certain
// Spectre masks, but it's very delicate and probably better
// to be conservative and leave them all in.
return idx
}
bNext := s.f.NewBlock(ssa.BlockPlain)
bPanic := s.f.NewBlock(ssa.BlockExit)
if !idx.Type.IsSigned() {
switch kind {
case ssa.BoundsIndex:
kind = ssa.BoundsIndexU
case ssa.BoundsSliceAlen:
kind = ssa.BoundsSliceAlenU
case ssa.BoundsSliceAcap:
kind = ssa.BoundsSliceAcapU
case ssa.BoundsSliceB:
kind = ssa.BoundsSliceBU
case ssa.BoundsSlice3Alen:
kind = ssa.BoundsSlice3AlenU
case ssa.BoundsSlice3Acap:
kind = ssa.BoundsSlice3AcapU
case ssa.BoundsSlice3B:
kind = ssa.BoundsSlice3BU
case ssa.BoundsSlice3C:
kind = ssa.BoundsSlice3CU
}
}
var cmp *ssa.Value
if kind == ssa.BoundsIndex || kind == ssa.BoundsIndexU {
cmp = s.newValue2(ssa.OpIsInBounds, types.Types[types.TBOOL], idx, len)
} else {
cmp = s.newValue2(ssa.OpIsSliceInBounds, types.Types[types.TBOOL], idx, len)
}
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
b.AddEdgeTo(bNext)
b.AddEdgeTo(bPanic)
s.startBlock(bPanic)
if Arch.LinkArch.Family == sys.Wasm {
// TODO(khr): figure out how to do "register" based calling convention for bounds checks.
// Should be similar to gcWriteBarrier, but I can't make it work.
s.rtcall(BoundsCheckFunc[kind], false, nil, idx, len)
} else {
mem := s.newValue3I(ssa.OpPanicBounds, types.TypeMem, int64(kind), idx, len, s.mem())
s.endBlock().SetControl(mem)
}
s.startBlock(bNext)
// In Spectre index mode, apply an appropriate mask to avoid speculative out-of-bounds accesses.
if base.Flag.Cfg.SpectreIndex {
op := ssa.OpSpectreIndex
if kind != ssa.BoundsIndex && kind != ssa.BoundsIndexU {
op = ssa.OpSpectreSliceIndex
}
idx = s.newValue2(op, types.Types[types.TINT], idx, len)
}
return idx
}
// If cmp (a bool) is false, panic using the given function.
func (s *state) check(cmp *ssa.Value, fn *obj.LSym) {
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bNext := s.f.NewBlock(ssa.BlockPlain)
line := s.peekPos()
pos := base.Ctxt.PosTable.Pos(line)
fl := funcLine{f: fn, base: pos.Base(), line: pos.Line()}
bPanic := s.panics[fl]
if bPanic == nil {
bPanic = s.f.NewBlock(ssa.BlockPlain)
s.panics[fl] = bPanic
s.startBlock(bPanic)
// The panic call takes/returns memory to ensure that the right
// memory state is observed if the panic happens.
s.rtcall(fn, false, nil)
}
b.AddEdgeTo(bNext)
b.AddEdgeTo(bPanic)
s.startBlock(bNext)
}
func (s *state) intDivide(n ir.Node, a, b *ssa.Value) *ssa.Value {
needcheck := true
switch b.Op {
case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64:
if b.AuxInt != 0 {
needcheck = false
}
}
if needcheck {
// do a size-appropriate check for zero
cmp := s.newValue2(s.ssaOp(ir.ONE, n.Type()), types.Types[types.TBOOL], b, s.zeroVal(n.Type()))
s.check(cmp, ir.Syms.Panicdivide)
}
return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
}
// rtcall issues a call to the given runtime function fn with the listed args.
// Returns a slice of results of the given result types.
// The call is added to the end of the current block.
// If returns is false, the block is marked as an exit block.
func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value {
s.prevCall = nil
// Write args to the stack
off := base.Ctxt.FixedFrameSize()
var callArgs []*ssa.Value
var callArgTypes []*types.Type
for _, arg := range args {
t := arg.Type
off = types.Rnd(off, t.Alignment())
size := t.Size()
callArgs = append(callArgs, arg)
callArgTypes = append(callArgTypes, t)
off += size
}
off = types.Rnd(off, int64(types.RegSize))
// Accumulate results types and offsets
offR := off
for _, t := range results {
offR = types.Rnd(offR, t.Alignment())
offR += t.Size()
}
// Issue call
var call *ssa.Value
aux := ssa.StaticAuxCall(fn, s.f.ABIDefault.ABIAnalyzeTypes(nil, callArgTypes, results))
callArgs = append(callArgs, s.mem())
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
call.AddArgs(callArgs...)
s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(results)), call)
if !returns {
// Finish block
b := s.endBlock()
b.Kind = ssa.BlockExit
b.SetControl(call)
call.AuxInt = off - base.Ctxt.FixedFrameSize()
if len(results) > 0 {
s.Fatalf("panic call can't have results")
}
return nil
}
// Load results
res := make([]*ssa.Value, len(results))
for i, t := range results {
off = types.Rnd(off, t.Alignment())
res[i] = s.resultOfCall(call, int64(i), t)
off += t.Size()
}
off = types.Rnd(off, int64(types.PtrSize))
// Remember how much callee stack space we needed.
call.AuxInt = off
return res
}
// do *left = right for type t.
func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask, leftIsStmt bool) {
s.instrument(t, left, instrumentWrite)
if skip == 0 && (!t.HasPointers() || ssa.IsStackAddr(left)) {
// Known to not have write barrier. Store the whole type.
s.vars[memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt)
return
}
// store scalar fields first, so write barrier stores for
// pointer fields can be grouped together, and scalar values
// don't need to be live across the write barrier call.
// TODO: if the writebarrier pass knows how to reorder stores,
// we can do a single store here as long as skip==0.
s.storeTypeScalars(t, left, right, skip)
if skip&skipPtr == 0 && t.HasPointers() {
s.storeTypePtrs(t, left, right)
}
}
// do *left = right for all scalar (non-pointer) parts of t.
func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) {
switch {
case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
s.store(t, left, right)
case t.IsPtrShaped():
if t.IsPtr() && t.Elem().NotInHeap() {
s.store(t, left, right) // see issue 42032
}
// otherwise, no scalar fields.
case t.IsString():
if skip&skipLen != 0 {
return
}
len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], right)
lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
s.store(types.Types[types.TINT], lenAddr, len)
case t.IsSlice():
if skip&skipLen == 0 {
len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], right)
lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
s.store(types.Types[types.TINT], lenAddr, len)
}
if skip&skipCap == 0 {
cap := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], right)
capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left)
s.store(types.Types[types.TINT], capAddr, cap)
}
case t.IsInterface():
// itab field doesn't need a write barrier (even though it is a pointer).
itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right)
s.store(types.Types[types.TUINTPTR], left, itab)
case t.IsStruct():
n := t.NumFields()
for i := 0; i < n; i++ {
ft := t.FieldType(i)
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
s.storeTypeScalars(ft, addr, val, 0)
}
case t.IsArray() && t.NumElem() == 0:
// nothing
case t.IsArray() && t.NumElem() == 1:
s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0)
default:
s.Fatalf("bad write barrier type %v", t)
}
}
// do *left = right for all pointer parts of t.
func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
switch {
case t.IsPtrShaped():
if t.IsPtr() && t.Elem().NotInHeap() {
break // see issue 42032
}
s.store(t, left, right)
case t.IsString():
ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right)
s.store(s.f.Config.Types.BytePtr, left, ptr)
case t.IsSlice():
elType := types.NewPtr(t.Elem())
ptr := s.newValue1(ssa.OpSlicePtr, elType, right)
s.store(elType, left, ptr)
case t.IsInterface():
// itab field is treated as a scalar.
idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right)
idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left)
s.store(s.f.Config.Types.BytePtr, idataAddr, idata)
case t.IsStruct():
n := t.NumFields()
for i := 0; i < n; i++ {
ft := t.FieldType(i)
if !ft.HasPointers() {
continue
}
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
s.storeTypePtrs(ft, addr, val)
}
case t.IsArray() && t.NumElem() == 0:
// nothing
case t.IsArray() && t.NumElem() == 1:
s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right))
default:
s.Fatalf("bad write barrier type %v", t)
}
}
// putArg evaluates n for the purpose of passing it as an argument to a function and returns the value for the call.
func (s *state) putArg(n ir.Node, t *types.Type) *ssa.Value {
var a *ssa.Value
if !TypeOK(t) {
a = s.newValue2(ssa.OpDereference, t, s.addr(n), s.mem())
} else {
a = s.expr(n)
}
return a
}
func (s *state) storeArgWithBase(n ir.Node, t *types.Type, base *ssa.Value, off int64) {
pt := types.NewPtr(t)
var addr *ssa.Value
if base == s.sp {
// Use special routine that avoids allocation on duplicate offsets.
addr = s.constOffPtrSP(pt, off)
} else {
addr = s.newValue1I(ssa.OpOffPtr, pt, off, base)
}
if !TypeOK(t) {
a := s.addr(n)
s.move(t, addr, a)
return
}
a := s.expr(n)
s.storeType(t, addr, a, 0, false)
}
// slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
// i,j,k may be nil, in which case they are set to their default value.
// v may be a slice, string or pointer to an array.
func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value) {
t := v.Type
var ptr, len, cap *ssa.Value
switch {
case t.IsSlice():
ptr = s.newValue1(ssa.OpSlicePtr, types.NewPtr(t.Elem()), v)
len = s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], v)
cap = s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], v)
case t.IsString():
ptr = s.newValue1(ssa.OpStringPtr, types.NewPtr(types.Types[types.TUINT8]), v)
len = s.newValue1(ssa.OpStringLen, types.Types[types.TINT], v)
cap = len
case t.IsPtr():
if !t.Elem().IsArray() {
s.Fatalf("bad ptr to array in slice %v\n", t)
}
s.nilCheck(v)
ptr = s.newValue1(ssa.OpCopy, types.NewPtr(t.Elem().Elem()), v)
len = s.constInt(types.Types[types.TINT], t.Elem().NumElem())
cap = len
default:
s.Fatalf("bad type in slice %v\n", t)
}
// Set default values
if i == nil {
i = s.constInt(types.Types[types.TINT], 0)
}
if j == nil {
j = len
}
three := true
if k == nil {
three = false
k = cap
}
// Panic if slice indices are not in bounds.
// Make sure we check these in reverse order so that we're always
// comparing against a value known to be nonnegative. See issue 28797.
if three {
if k != cap {
kind := ssa.BoundsSlice3Alen
if t.IsSlice() {
kind = ssa.BoundsSlice3Acap
}
k = s.boundsCheck(k, cap, kind, bounded)
}
if j != k {
j = s.boundsCheck(j, k, ssa.BoundsSlice3B, bounded)
}
i = s.boundsCheck(i, j, ssa.BoundsSlice3C, bounded)
} else {
if j != k {
kind := ssa.BoundsSliceAlen
if t.IsSlice() {
kind = ssa.BoundsSliceAcap
}
j = s.boundsCheck(j, k, kind, bounded)
}
i = s.boundsCheck(i, j, ssa.BoundsSliceB, bounded)
}
// Word-sized integer operations.
subOp := s.ssaOp(ir.OSUB, types.Types[types.TINT])
mulOp := s.ssaOp(ir.OMUL, types.Types[types.TINT])
andOp := s.ssaOp(ir.OAND, types.Types[types.TINT])
// Calculate the length (rlen) and capacity (rcap) of the new slice.
// For strings the capacity of the result is unimportant. However,
// we use rcap to test if we've generated a zero-length slice.
// Use length of strings for that.
rlen := s.newValue2(subOp, types.Types[types.TINT], j, i)
rcap := rlen
if j != k && !t.IsString() {
rcap = s.newValue2(subOp, types.Types[types.TINT], k, i)
}
if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 {
// No pointer arithmetic necessary.
return ptr, rlen, rcap
}
// Calculate the base pointer (rptr) for the new slice.
//
// Generate the following code assuming that indexes are in bounds.
// The masking is to make sure that we don't generate a slice
// that points to the next object in memory. We cannot just set
// the pointer to nil because then we would create a nil slice or
// string.
//
// rcap = k - i
// rlen = j - i
// rptr = ptr + (mask(rcap) & (i * stride))
//
// Where mask(x) is 0 if x==0 and -1 if x>0 and stride is the width
// of the element type.
stride := s.constInt(types.Types[types.TINT], ptr.Type.Elem().Size())
// The delta is the number of bytes to offset ptr by.
delta := s.newValue2(mulOp, types.Types[types.TINT], i, stride)
// If we're slicing to the point where the capacity is zero,
// zero out the delta.
mask := s.newValue1(ssa.OpSlicemask, types.Types[types.TINT], rcap)
delta = s.newValue2(andOp, types.Types[types.TINT], delta, mask)
// Compute rptr = ptr + delta.
rptr := s.newValue2(ssa.OpAddPtr, ptr.Type, ptr, delta)
return rptr, rlen, rcap
}
type u642fcvtTab struct {
leq, cvt2F, and, rsh, or, add ssa.Op
one func(*state, *types.Type, int64) *ssa.Value
}
var u64_f64 = u642fcvtTab{
leq: ssa.OpLeq64,
cvt2F: ssa.OpCvt64to64F,
and: ssa.OpAnd64,
rsh: ssa.OpRsh64Ux64,
or: ssa.OpOr64,
add: ssa.OpAdd64F,
one: (*state).constInt64,
}
var u64_f32 = u642fcvtTab{
leq: ssa.OpLeq64,
cvt2F: ssa.OpCvt64to32F,
and: ssa.OpAnd64,
rsh: ssa.OpRsh64Ux64,
or: ssa.OpOr64,
add: ssa.OpAdd32F,
one: (*state).constInt64,
}
func (s *state) uint64Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint64Tofloat(&u64_f64, n, x, ft, tt)
}
func (s *state) uint64Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint64Tofloat(&u64_f32, n, x, ft, tt)
}
func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// if x >= 0 {
// result = (floatY) x
// } else {
// y = uintX(x) ; y = x & 1
// z = uintX(x) ; z = z >> 1
// z = z | y
// result = floatY(z)
// result = result + result
// }
//
// Code borrowed from old code generator.
// What's going on: large 64-bit "unsigned" looks like
// negative number to hardware's integer-to-float
// conversion. However, because the mantissa is only
// 63 bits, we don't need the LSB, so instead we do an
// unsigned right shift (divide by two), convert, and
// double. However, before we do that, we need to be
// sure that we do not lose a "1" if that made the
// difference in the resulting rounding. Therefore, we
// preserve it, and OR (not ADD) it back in. The case
// that matters is when the eleven discarded bits are
// equal to 10000000001; that rounds up, and the 1 cannot
// be lost else it would round down if the LSB of the
// candidate mantissa is 0.
cmp := s.newValue2(cvttab.leq, types.Types[types.TBOOL], s.zeroVal(ft), x)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bThen)
s.startBlock(bThen)
a0 := s.newValue1(cvttab.cvt2F, tt, x)
s.vars[n] = a0
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
one := cvttab.one(s, ft, 1)
y := s.newValue2(cvttab.and, ft, x, one)
z := s.newValue2(cvttab.rsh, ft, x, one)
z = s.newValue2(cvttab.or, ft, z, y)
a := s.newValue1(cvttab.cvt2F, tt, z)
a1 := s.newValue2(cvttab.add, tt, a, a)
s.vars[n] = a1
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, n.Type())
}
type u322fcvtTab struct {
cvtI2F, cvtF2F ssa.Op
}
var u32_f64 = u322fcvtTab{
cvtI2F: ssa.OpCvt32to64F,
cvtF2F: ssa.OpCopy,
}
var u32_f32 = u322fcvtTab{
cvtI2F: ssa.OpCvt32to32F,
cvtF2F: ssa.OpCvt64Fto32F,
}
func (s *state) uint32Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint32Tofloat(&u32_f64, n, x, ft, tt)
}
func (s *state) uint32Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint32Tofloat(&u32_f32, n, x, ft, tt)
}
func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// if x >= 0 {
// result = floatY(x)
// } else {
// result = floatY(float64(x) + (1<<32))
// }
cmp := s.newValue2(ssa.OpLeq32, types.Types[types.TBOOL], s.zeroVal(ft), x)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bThen)
s.startBlock(bThen)
a0 := s.newValue1(cvttab.cvtI2F, tt, x)
s.vars[n] = a0
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[types.TFLOAT64], x)
twoToThe32 := s.constFloat64(types.Types[types.TFLOAT64], float64(1<<32))
a2 := s.newValue2(ssa.OpAdd64F, types.Types[types.TFLOAT64], a1, twoToThe32)
a3 := s.newValue1(cvttab.cvtF2F, tt, a2)
s.vars[n] = a3
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, n.Type())
}
// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
func (s *state) referenceTypeBuiltin(n *ir.UnaryExpr, x *ssa.Value) *ssa.Value {
if !n.X.Type().IsMap() && !n.X.Type().IsChan() {
s.Fatalf("node must be a map or a channel")
}
// if n == nil {
// return 0
// } else {
// // len
// return *((*int)n)
// // cap
// return *(((*int)n)+1)
// }
lenType := n.Type()
nilValue := s.constNil(types.Types[types.TUINTPTR])
cmp := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], x, nilValue)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchUnlikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
// length/capacity of a nil map/chan is zero
b.AddEdgeTo(bThen)
s.startBlock(bThen)
s.vars[n] = s.zeroVal(lenType)
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
switch n.Op() {
case ir.OLEN:
// length is stored in the first word for map/chan
s.vars[n] = s.load(lenType, x)
case ir.OCAP:
// capacity is stored in the second word for chan
sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Size(), x)
s.vars[n] = s.load(lenType, sw)
default:
s.Fatalf("op must be OLEN or OCAP")
}
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, lenType)
}
type f2uCvtTab struct {
ltf, cvt2U, subf, or ssa.Op
floatValue func(*state, *types.Type, float64) *ssa.Value
intValue func(*state, *types.Type, int64) *ssa.Value
cutoff uint64
}
var f32_u64 = f2uCvtTab{
ltf: ssa.OpLess32F,
cvt2U: ssa.OpCvt32Fto64,
subf: ssa.OpSub32F,
or: ssa.OpOr64,
floatValue: (*state).constFloat32,
intValue: (*state).constInt64,
cutoff: 1 << 63,
}
var f64_u64 = f2uCvtTab{
ltf: ssa.OpLess64F,
cvt2U: ssa.OpCvt64Fto64,
subf: ssa.OpSub64F,
or: ssa.OpOr64,
floatValue: (*state).constFloat64,
intValue: (*state).constInt64,
cutoff: 1 << 63,
}
var f32_u32 = f2uCvtTab{
ltf: ssa.OpLess32F,
cvt2U: ssa.OpCvt32Fto32,
subf: ssa.OpSub32F,
or: ssa.OpOr32,
floatValue: (*state).constFloat32,
intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
cutoff: 1 << 31,
}
var f64_u32 = f2uCvtTab{
ltf: ssa.OpLess64F,
cvt2U: ssa.OpCvt64Fto32,
subf: ssa.OpSub64F,
or: ssa.OpOr32,
floatValue: (*state).constFloat64,
intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
cutoff: 1 << 31,
}
func (s *state) float32ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f32_u64, n, x, ft, tt)
}
func (s *state) float64ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f64_u64, n, x, ft, tt)
}
func (s *state) float32ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f32_u32, n, x, ft, tt)
}
func (s *state) float64ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f64_u32, n, x, ft, tt)
}
func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// cutoff:=1<<(intY_Size-1)
// if x < floatX(cutoff) {
// result = uintY(x)
// } else {
// y = x - floatX(cutoff)
// z = uintY(y)
// result = z | -(cutoff)
// }
cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff))
cmp := s.newValue2(cvttab.ltf, types.Types[types.TBOOL], x, cutoff)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bThen)
s.startBlock(bThen)
a0 := s.newValue1(cvttab.cvt2U, tt, x)
s.vars[n] = a0
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
y := s.newValue2(cvttab.subf, ft, x, cutoff)
y = s.newValue1(cvttab.cvt2U, tt, y)
z := cvttab.intValue(s, tt, int64(-cvttab.cutoff))
a1 := s.newValue2(cvttab.or, tt, y, z)
s.vars[n] = a1
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, n.Type())
}
// dottype generates SSA for a type assertion node.
// commaok indicates whether to panic or return a bool.
// If commaok is false, resok will be nil.
func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
iface := s.expr(n.X) // input interface
target := s.reflectType(n.Type()) // target type
var targetItab *ssa.Value
if n.Itab != nil {
targetItab = s.expr(n.Itab)
}
return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, target, targetItab, commaok)
}
func (s *state) dynamicDottype(n *ir.DynamicTypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
iface := s.expr(n.X)
target := s.expr(n.T)
var itab *ssa.Value
if !n.X.Type().IsEmptyInterface() && !n.Type().IsInterface() {
byteptr := s.f.Config.Types.BytePtr
itab = target
target = s.load(byteptr, s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)) // itab.typ
}
return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, target, itab, commaok)
}
// dottype1 implements a x.(T) operation. iface is the argument (x), dst is the type we're asserting to (T)
// and src is the type we're asserting from.
// target is the *runtime._type of dst.
// If src is a nonempty interface and dst is not an interface, targetItab is an itab representing (dst, src). Otherwise it is nil.
// commaok is true if the caller wants a boolean success value. Otherwise, the generated code panics if the conversion fails.
func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, target, targetItab *ssa.Value, commaok bool) (res, resok *ssa.Value) {
byteptr := s.f.Config.Types.BytePtr
if dst.IsInterface() {
if dst.IsEmptyInterface() {
// Converting to an empty interface.
// Input could be an empty or nonempty interface.
if base.Debug.TypeAssert > 0 {
base.WarnfAt(pos, "type assertion inlined")
}
// Get itab/type field from input.
itab := s.newValue1(ssa.OpITab, byteptr, iface)
// Conversion succeeds iff that field is not nil.
cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr))
if src.IsEmptyInterface() && commaok {
// Converting empty interface to empty interface with ,ok is just a nil check.
return iface, cond
}
// Branch on nilness.
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cond)
b.Likely = ssa.BranchLikely
bOk := s.f.NewBlock(ssa.BlockPlain)
bFail := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bOk)
b.AddEdgeTo(bFail)
if !commaok {
// On failure, panic by calling panicnildottype.
s.startBlock(bFail)
s.rtcall(ir.Syms.Panicnildottype, false, nil, target)
// On success, return (perhaps modified) input interface.
s.startBlock(bOk)
if src.IsEmptyInterface() {
res = iface // Use input interface unchanged.
return
}
// Load type out of itab, build interface with existing idata.
off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)
typ := s.load(byteptr, off)
idata := s.newValue1(ssa.OpIData, byteptr, iface)
res = s.newValue2(ssa.OpIMake, dst, typ, idata)
return
}
s.startBlock(bOk)
// nonempty -> empty
// Need to load type from itab
off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)
s.vars[typVar] = s.load(byteptr, off)
s.endBlock()
// itab is nil, might as well use that as the nil result.
s.startBlock(bFail)
s.vars[typVar] = itab
s.endBlock()
// Merge point.
bEnd := s.f.NewBlock(ssa.BlockPlain)
bOk.AddEdgeTo(bEnd)
bFail.AddEdgeTo(bEnd)
s.startBlock(bEnd)
idata := s.newValue1(ssa.OpIData, byteptr, iface)
res = s.newValue2(ssa.OpIMake, dst, s.variable(typVar, byteptr), idata)
resok = cond
delete(s.vars, typVar)
return
}
// converting to a nonempty interface needs a runtime call.
if base.Debug.TypeAssert > 0 {
base.WarnfAt(pos, "type assertion not inlined")
}
if !commaok {
fn := ir.Syms.AssertI2I
if src.IsEmptyInterface() {
fn = ir.Syms.AssertE2I
}
data := s.newValue1(ssa.OpIData, types.Types[types.TUNSAFEPTR], iface)
tab := s.newValue1(ssa.OpITab, byteptr, iface)
tab = s.rtcall(fn, true, []*types.Type{byteptr}, target, tab)[0]
return s.newValue2(ssa.OpIMake, dst, tab, data), nil
}
fn := ir.Syms.AssertI2I2
if src.IsEmptyInterface() {
fn = ir.Syms.AssertE2I2
}
res = s.rtcall(fn, true, []*types.Type{dst}, target, iface)[0]
resok = s.newValue2(ssa.OpNeqInter, types.Types[types.TBOOL], res, s.constInterface(dst))
return
}
if base.Debug.TypeAssert > 0 {
base.WarnfAt(pos, "type assertion inlined")
}
// Converting to a concrete type.
direct := types.IsDirectIface(dst)
itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface
if base.Debug.TypeAssert > 0 {
base.WarnfAt(pos, "type assertion inlined")
}
var wantedFirstWord *ssa.Value
if src.IsEmptyInterface() {
// Looking for pointer to target type.
wantedFirstWord = target
} else {
// Looking for pointer to itab for target type and source interface.
wantedFirstWord = targetItab
}
var tmp ir.Node // temporary for use with large types
var addr *ssa.Value // address of tmp
if commaok && !TypeOK(dst) {
// unSSAable type, use temporary.
// TODO: get rid of some of these temporaries.
tmp, addr = s.temp(pos, dst)
}
cond := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], itab, wantedFirstWord)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cond)
b.Likely = ssa.BranchLikely
bOk := s.f.NewBlock(ssa.BlockPlain)
bFail := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bOk)
b.AddEdgeTo(bFail)
if !commaok {
// on failure, panic by calling panicdottype
s.startBlock(bFail)
taddr := s.reflectType(src)
if src.IsEmptyInterface() {
s.rtcall(ir.Syms.PanicdottypeE, false, nil, itab, target, taddr)
} else {
s.rtcall(ir.Syms.PanicdottypeI, false, nil, itab, target, taddr)
}
// on success, return data from interface
s.startBlock(bOk)
if direct {
return s.newValue1(ssa.OpIData, dst, iface), nil
}
p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface)
return s.load(dst, p), nil
}
// commaok is the more complicated case because we have
// a control flow merge point.
bEnd := s.f.NewBlock(ssa.BlockPlain)
// Note that we need a new valVar each time (unlike okVar where we can
// reuse the variable) because it might have a different type every time.
valVar := ssaMarker("val")
// type assertion succeeded
s.startBlock(bOk)
if tmp == nil {
if direct {
s.vars[valVar] = s.newValue1(ssa.OpIData, dst, iface)
} else {
p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface)
s.vars[valVar] = s.load(dst, p)
}
} else {
p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface)
s.move(dst, addr, p)
}
s.vars[okVar] = s.constBool(true)
s.endBlock()
bOk.AddEdgeTo(bEnd)
// type assertion failed
s.startBlock(bFail)
if tmp == nil {
s.vars[valVar] = s.zeroVal(dst)
} else {
s.zero(dst, addr)
}
s.vars[okVar] = s.constBool(false)
s.endBlock()
bFail.AddEdgeTo(bEnd)
// merge point
s.startBlock(bEnd)
if tmp == nil {
res = s.variable(valVar, dst)
delete(s.vars, valVar)
} else {
res = s.load(dst, addr)
s.vars[memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp.(*ir.Name), s.mem())
}
resok = s.variable(okVar, types.Types[types.TBOOL])
delete(s.vars, okVar)
return res, resok
}
// temp allocates a temp of type t at position pos
func (s *state) temp(pos src.XPos, t *types.Type) (*ir.Name, *ssa.Value) {
tmp := typecheck.TempAt(pos, s.curfn, t)
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem())
addr := s.addr(tmp)
return tmp, addr
}
// variable returns the value of a variable at the current location.
func (s *state) variable(n ir.Node, t *types.Type) *ssa.Value {
v := s.vars[n]
if v != nil {
return v
}
v = s.fwdVars[n]
if v != nil {
return v
}
if s.curBlock == s.f.Entry {
// No variable should be live at entry.
s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, n, v)
}
// Make a FwdRef, which records a value that's live on block input.
// We'll find the matching definition as part of insertPhis.
v = s.newValue0A(ssa.OpFwdRef, t, fwdRefAux{N: n})
s.fwdVars[n] = v
if n.Op() == ir.ONAME {
s.addNamedValue(n.(*ir.Name), v)
}
return v
}
func (s *state) mem() *ssa.Value {
return s.variable(memVar, types.TypeMem)
}
func (s *state) addNamedValue(n *ir.Name, v *ssa.Value) {
if n.Class == ir.Pxxx {
// Don't track our marker nodes (memVar etc.).
return
}
if ir.IsAutoTmp(n) {
// Don't track temporary variables.
return
}
if n.Class == ir.PPARAMOUT {
// Don't track named output values. This prevents return values
// from being assigned too early. See #14591 and #14762. TODO: allow this.
return
}
loc := ssa.LocalSlot{N: n, Type: n.Type(), Off: 0}
values, ok := s.f.NamedValues[loc]
if !ok {
s.f.Names = append(s.f.Names, &loc)
s.f.CanonicalLocalSlots[loc] = &loc
}
s.f.NamedValues[loc] = append(values, v)
}
// Branch is an unresolved branch.
type Branch struct {
P *obj.Prog // branch instruction
B *ssa.Block // target
}
// State contains state needed during Prog generation.
type State struct {
ABI obj.ABI
pp *objw.Progs
// Branches remembers all the branch instructions we've seen
// and where they would like to go.
Branches []Branch
// bstart remembers where each block starts (indexed by block ID)
bstart []*obj.Prog
maxarg int64 // largest frame size for arguments to calls made by the function
// Map from GC safe points to liveness index, generated by
// liveness analysis.
livenessMap liveness.Map
// partLiveArgs includes arguments that may be partially live, for which we
// need to generate instructions that spill the argument registers.
partLiveArgs map[*ir.Name]bool
// lineRunStart records the beginning of the current run of instructions
// within a single block sharing the same line number
// Used to move statement marks to the beginning of such runs.
lineRunStart *obj.Prog
// wasm: The number of values on the WebAssembly stack. This is only used as a safeguard.
OnWasmStackSkipped int
}
func (s *State) FuncInfo() *obj.FuncInfo {
return s.pp.CurFunc.LSym.Func()
}
// Prog appends a new Prog.
func (s *State) Prog(as obj.As) *obj.Prog {
p := s.pp.Prog(as)
if objw.LosesStmtMark(as) {
return p
}
// Float a statement start to the beginning of any same-line run.
// lineRunStart is reset at block boundaries, which appears to work well.
if s.lineRunStart == nil || s.lineRunStart.Pos.Line() != p.Pos.Line() {
s.lineRunStart = p
} else if p.Pos.IsStmt() == src.PosIsStmt {
s.lineRunStart.Pos = s.lineRunStart.Pos.WithIsStmt()
p.Pos = p.Pos.WithNotStmt()
}
return p
}
// Pc returns the current Prog.
func (s *State) Pc() *obj.Prog {
return s.pp.Next
}
// SetPos sets the current source position.
func (s *State) SetPos(pos src.XPos) {
s.pp.Pos = pos
}
// Br emits a single branch instruction and returns the instruction.
// Not all architectures need the returned instruction, but otherwise
// the boilerplate is common to all.
func (s *State) Br(op obj.As, target *ssa.Block) *obj.Prog {
p := s.Prog(op)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, Branch{P: p, B: target})
return p
}
// DebugFriendlySetPosFrom adjusts Pos.IsStmt subject to heuristics
// that reduce "jumpy" line number churn when debugging.
// Spill/fill/copy instructions from the register allocator,
// phi functions, and instructions with a no-pos position
// are examples of instructions that can cause churn.
func (s *State) DebugFriendlySetPosFrom(v *ssa.Value) {
switch v.Op {
case ssa.OpPhi, ssa.OpCopy, ssa.OpLoadReg, ssa.OpStoreReg:
// These are not statements
s.SetPos(v.Pos.WithNotStmt())
default:
p := v.Pos
if p != src.NoXPos {
// If the position is defined, update the position.
// Also convert default IsStmt to NotStmt; only
// explicit statement boundaries should appear
// in the generated code.
if p.IsStmt() != src.PosIsStmt {
p = p.WithNotStmt()
// Calls use the pos attached to v, but copy the statement mark from State
}
s.SetPos(p)
} else {
s.SetPos(s.pp.Pos.WithNotStmt())
}
}
}
// emit argument info (locations on stack) for traceback.
func emitArgInfo(e *ssafn, f *ssa.Func, pp *objw.Progs) {
ft := e.curfn.Type()
if ft.NumRecvs() == 0 && ft.NumParams() == 0 {
return
}
x := EmitArgInfo(e.curfn, f.OwnAux.ABIInfo())
e.curfn.LSym.Func().ArgInfo = x
// Emit a funcdata pointing at the arg info data.
p := pp.Prog(obj.AFUNCDATA)
p.From.SetConst(objabi.FUNCDATA_ArgInfo)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = x
}
// emit argument info (locations on stack) of f for traceback.
func EmitArgInfo(f *ir.Func, abiInfo *abi.ABIParamResultInfo) *obj.LSym {
x := base.Ctxt.Lookup(fmt.Sprintf("%s.arginfo%d", f.LSym.Name, f.ABI))
PtrSize := int64(types.PtrSize)
uintptrTyp := types.Types[types.TUINTPTR]
isAggregate := func(t *types.Type) bool {
return t.IsStruct() || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice()
}
// Populate the data.
// The data is a stream of bytes, which contains the offsets and sizes of the
// non-aggregate arguments or non-aggregate fields/elements of aggregate-typed
// arguments, along with special "operators". Specifically,
// - for each non-aggrgate arg/field/element, its offset from FP (1 byte) and
// size (1 byte)
// - special operators:
// - 0xff - end of sequence
// - 0xfe - print { (at the start of an aggregate-typed argument)
// - 0xfd - print } (at the end of an aggregate-typed argument)
// - 0xfc - print ... (more args/fields/elements)
// - 0xfb - print _ (offset too large)
// These constants need to be in sync with runtime.traceback.go:printArgs.
const (
_endSeq = 0xff
_startAgg = 0xfe
_endAgg = 0xfd
_dotdotdot = 0xfc
_offsetTooLarge = 0xfb
_special = 0xf0 // above this are operators, below this are ordinary offsets
)
const (
limit = 10 // print no more than 10 args/components
maxDepth = 5 // no more than 5 layers of nesting
// maxLen is a (conservative) upper bound of the byte stream length. For
// each arg/component, it has no more than 2 bytes of data (size, offset),
// and no more than one {, }, ... at each level (it cannot have both the
// data and ... unless it is the last one, just be conservative). Plus 1
// for _endSeq.
maxLen = (maxDepth*3+2)*limit + 1
)
wOff := 0
n := 0
writebyte := func(o uint8) { wOff = objw.Uint8(x, wOff, o) }
// Write one non-aggrgate arg/field/element.
write1 := func(sz, offset int64) {
if offset >= _special {
writebyte(_offsetTooLarge)
} else {
writebyte(uint8(offset))
writebyte(uint8(sz))
}
n++
}
// Visit t recursively and write it out.
// Returns whether to continue visiting.
var visitType func(baseOffset int64, t *types.Type, depth int) bool
visitType = func(baseOffset int64, t *types.Type, depth int) bool {
if n >= limit {
writebyte(_dotdotdot)
return false
}
if !isAggregate(t) {
write1(t.Size(), baseOffset)
return true
}
writebyte(_startAgg)
depth++
if depth >= maxDepth {
writebyte(_dotdotdot)
writebyte(_endAgg)
n++
return true
}
switch {
case t.IsInterface(), t.IsString():
_ = visitType(baseOffset, uintptrTyp, depth) &&
visitType(baseOffset+PtrSize, uintptrTyp, depth)
case t.IsSlice():
_ = visitType(baseOffset, uintptrTyp, depth) &&
visitType(baseOffset+PtrSize, uintptrTyp, depth) &&
visitType(baseOffset+PtrSize*2, uintptrTyp, depth)
case t.IsComplex():
_ = visitType(baseOffset, types.FloatForComplex(t), depth) &&
visitType(baseOffset+t.Size()/2, types.FloatForComplex(t), depth)
case t.IsArray():
if t.NumElem() == 0 {
n++ // {} counts as a component
break
}
for i := int64(0); i < t.NumElem(); i++ {
if !visitType(baseOffset, t.Elem(), depth) {
break
}
baseOffset += t.Elem().Size()
}
case t.IsStruct():
if t.NumFields() == 0 {
n++ // {} counts as a component
break
}
for _, field := range t.Fields().Slice() {
if !visitType(baseOffset+field.Offset, field.Type, depth) {
break
}
}
}
writebyte(_endAgg)
return true
}
for _, a := range abiInfo.InParams() {
if !visitType(a.FrameOffset(abiInfo), a.Type, 0) {
break
}
}
writebyte(_endSeq)
if wOff > maxLen {
base.Fatalf("ArgInfo too large")
}
return x
}
// genssa appends entries to pp for each instruction in f.
func genssa(f *ssa.Func, pp *objw.Progs) {
var s State
s.ABI = f.OwnAux.Fn.ABI()
e := f.Frontend().(*ssafn)
s.livenessMap, s.partLiveArgs = liveness.Compute(e.curfn, f, e.stkptrsize, pp)
emitArgInfo(e, f, pp)
openDeferInfo := e.curfn.LSym.Func().OpenCodedDeferInfo
if openDeferInfo != nil {
// This function uses open-coded defers -- write out the funcdata
// info that we computed at the end of genssa.
p := pp.Prog(obj.AFUNCDATA)
p.From.SetConst(objabi.FUNCDATA_OpenCodedDeferInfo)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = openDeferInfo
}
// Remember where each block starts.
s.bstart = make([]*obj.Prog, f.NumBlocks())
s.pp = pp
var progToValue map[*obj.Prog]*ssa.Value
var progToBlock map[*obj.Prog]*ssa.Block
var valueToProgAfter []*obj.Prog // The first Prog following computation of a value v; v is visible at this point.
if f.PrintOrHtmlSSA {
progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues())
progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
f.Logf("genssa %s\n", f.Name)
progToBlock[s.pp.Next] = f.Blocks[0]
}
if base.Ctxt.Flag_locationlists {
if cap(f.Cache.ValueToProgAfter) < f.NumValues() {
f.Cache.ValueToProgAfter = make([]*obj.Prog, f.NumValues())
}
valueToProgAfter = f.Cache.ValueToProgAfter[:f.NumValues()]
for i := range valueToProgAfter {
valueToProgAfter[i] = nil
}
}
// If the very first instruction is not tagged as a statement,
// debuggers may attribute it to previous function in program.
firstPos := src.NoXPos
for _, v := range f.Entry.Values {
if v.Pos.IsStmt() == src.PosIsStmt {
firstPos = v.Pos
v.Pos = firstPos.WithDefaultStmt()
break
}
}
// inlMarks has an entry for each Prog that implements an inline mark.
// It maps from that Prog to the global inlining id of the inlined body
// which should unwind to this Prog's location.
var inlMarks map[*obj.Prog]int32
var inlMarkList []*obj.Prog
// inlMarksByPos maps from a (column 1) source position to the set of
// Progs that are in the set above and have that source position.
var inlMarksByPos map[src.XPos][]*obj.Prog
// Emit basic blocks
for i, b := range f.Blocks {
s.bstart[b.ID] = s.pp.Next
s.lineRunStart = nil
// Attach a "default" liveness info. Normally this will be
// overwritten in the Values loop below for each Value. But
// for an empty block this will be used for its control
// instruction. We won't use the actual liveness map on a
// control instruction. Just mark it something that is
// preemptible, unless this function is "all unsafe".
s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: liveness.IsUnsafe(f)}
// Emit values in block
Arch.SSAMarkMoves(&s, b)
for _, v := range b.Values {
x := s.pp.Next
s.DebugFriendlySetPosFrom(v)
if v.Op.ResultInArg0() && v.ResultReg() != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
switch v.Op {
case ssa.OpInitMem:
// memory arg needs no code
case ssa.OpArg:
// input args need no code
case ssa.OpSP, ssa.OpSB:
// nothing to do
case ssa.OpSelect0, ssa.OpSelect1, ssa.OpSelectN, ssa.OpMakeResult:
// nothing to do
case ssa.OpGetG:
// nothing to do when there's a g register,
// and checkLower complains if there's not
case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive, ssa.OpVarKill:
// nothing to do; already used by liveness
case ssa.OpPhi:
CheckLoweredPhi(v)
case ssa.OpConvert:
// nothing to do; no-op conversion for liveness
if v.Args[0].Reg() != v.Reg() {
v.Fatalf("OpConvert should be a no-op: %s; %s", v.Args[0].LongString(), v.LongString())
}
case ssa.OpInlMark:
p := Arch.Ginsnop(s.pp)
if inlMarks == nil {
inlMarks = map[*obj.Prog]int32{}
inlMarksByPos = map[src.XPos][]*obj.Prog{}
}
inlMarks[p] = v.AuxInt32()
inlMarkList = append(inlMarkList, p)
pos := v.Pos.AtColumn1()
inlMarksByPos[pos] = append(inlMarksByPos[pos], p)
default:
// Special case for first line in function; move it to the start (which cannot be a register-valued instruction)
if firstPos != src.NoXPos && v.Op != ssa.OpArgIntReg && v.Op != ssa.OpArgFloatReg && v.Op != ssa.OpLoadReg && v.Op != ssa.OpStoreReg {
s.SetPos(firstPos)
firstPos = src.NoXPos
}
// Attach this safe point to the next
// instruction.
s.pp.NextLive = s.livenessMap.Get(v)
// let the backend handle it
Arch.SSAGenValue(&s, v)
}
if base.Ctxt.Flag_locationlists {
valueToProgAfter[v.ID] = s.pp.Next
}
if f.PrintOrHtmlSSA {
for ; x != s.pp.Next; x = x.Link {
progToValue[x] = v
}
}
}
// If this is an empty infinite loop, stick a hardware NOP in there so that debuggers are less confused.
if s.bstart[b.ID] == s.pp.Next && len(b.Succs) == 1 && b.Succs[0].Block() == b {
p := Arch.Ginsnop(s.pp)
p.Pos = p.Pos.WithIsStmt()
if b.Pos == src.NoXPos {
b.Pos = p.Pos // It needs a file, otherwise a no-file non-zero line causes confusion. See #35652.
if b.Pos == src.NoXPos {
b.Pos = pp.Text.Pos // Sometimes p.Pos is empty. See #35695.
}
}
b.Pos = b.Pos.WithBogusLine() // Debuggers are not good about infinite loops, force a change in line number
}
// Emit control flow instructions for block
var next *ssa.Block
if i < len(f.Blocks)-1 && base.Flag.N == 0 {
// If -N, leave next==nil so every block with successors
// ends in a JMP (except call blocks - plive doesn't like
// select{send,recv} followed by a JMP call). Helps keep
// line numbers for otherwise empty blocks.
next = f.Blocks[i+1]
}
x := s.pp.Next
s.SetPos(b.Pos)
Arch.SSAGenBlock(&s, b, next)
if f.PrintOrHtmlSSA {
for ; x != s.pp.Next; x = x.Link {
progToBlock[x] = b
}
}
}
if f.Blocks[len(f.Blocks)-1].Kind == ssa.BlockExit {
// We need the return address of a panic call to
// still be inside the function in question. So if
// it ends in a call which doesn't return, add a
// nop (which will never execute) after the call.
Arch.Ginsnop(pp)
}
if openDeferInfo != nil {
// When doing open-coded defers, generate a disconnected call to
// deferreturn and a return. This will be used to during panic
// recovery to unwind the stack and return back to the runtime.
s.pp.NextLive = s.livenessMap.DeferReturn
p := pp.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Deferreturn
// Load results into registers. So when a deferred function
// recovers a panic, it will return to caller with right results.
// The results are already in memory, because they are not SSA'd
// when the function has defers (see canSSAName).
for _, o := range f.OwnAux.ABIInfo().OutParams() {
n := o.Name.(*ir.Name)
rts, offs := o.RegisterTypesAndOffsets()
for i := range o.Registers {
Arch.LoadRegResult(&s, f, rts[i], ssa.ObjRegForAbiReg(o.Registers[i], f.Config), n, offs[i])
}
}
pp.Prog(obj.ARET)
}
if inlMarks != nil {
// We have some inline marks. Try to find other instructions we're
// going to emit anyway, and use those instructions instead of the
// inline marks.
for p := pp.Text; p != nil; p = p.Link {
if p.As == obj.ANOP || p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT || p.As == obj.APCALIGN || Arch.LinkArch.Family == sys.Wasm {
// Don't use 0-sized instructions as inline marks, because we need
// to identify inline mark instructions by pc offset.
// (Some of these instructions are sometimes zero-sized, sometimes not.
// We must not use anything that even might be zero-sized.)
// TODO: are there others?
continue
}
if _, ok := inlMarks[p]; ok {
// Don't use inline marks themselves. We don't know
// whether they will be zero-sized or not yet.
continue
}
pos := p.Pos.AtColumn1()
s := inlMarksByPos[pos]
if len(s) == 0 {
continue
}
for _, m := range s {
// We found an instruction with the same source position as
// some of the inline marks.
// Use this instruction instead.
p.Pos = p.Pos.WithIsStmt() // promote position to a statement
pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[m])
// Make the inline mark a real nop, so it doesn't generate any code.
m.As = obj.ANOP
m.Pos = src.NoXPos
m.From = obj.Addr{}
m.To = obj.Addr{}
}
delete(inlMarksByPos, pos)
}
// Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction).
for _, p := range inlMarkList {
if p.As != obj.ANOP {
pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[p])
}
}
}
if base.Ctxt.Flag_locationlists {
var debugInfo *ssa.FuncDebug
if e.curfn.ABI == obj.ABIInternal && base.Flag.N != 0 {
debugInfo = ssa.BuildFuncDebugNoOptimized(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset)
} else {
debugInfo = ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset)
}
e.curfn.DebugInfo = debugInfo
bstart := s.bstart
idToIdx := make([]int, f.NumBlocks())
for i, b := range f.Blocks {
idToIdx[b.ID] = i
}
// Note that at this moment, Prog.Pc is a sequence number; it's
// not a real PC until after assembly, so this mapping has to
// be done later.
debugInfo.GetPC = func(b, v ssa.ID) int64 {
switch v {
case ssa.BlockStart.ID:
if b == f.Entry.ID {
return 0 // Start at the very beginning, at the assembler-generated prologue.
// this should only happen for function args (ssa.OpArg)
}
return bstart[b].Pc
case ssa.BlockEnd.ID:
blk := f.Blocks[idToIdx[b]]
nv := len(blk.Values)
return valueToProgAfter[blk.Values[nv-1].ID].Pc
case ssa.FuncEnd.ID:
return e.curfn.LSym.Size
default:
return valueToProgAfter[v].Pc
}
}
}
// Resolve branches, and relax DefaultStmt into NotStmt
for _, br := range s.Branches {
br.P.To.SetTarget(s.bstart[br.B.ID])
if br.P.Pos.IsStmt() != src.PosIsStmt {
br.P.Pos = br.P.Pos.WithNotStmt()
} else if v0 := br.B.FirstPossibleStmtValue(); v0 != nil && v0.Pos.Line() == br.P.Pos.Line() && v0.Pos.IsStmt() == src.PosIsStmt {
br.P.Pos = br.P.Pos.WithNotStmt()
}
}
if e.log { // spew to stdout
filename := ""
for p := pp.Text; p != nil; p = p.Link {
if p.Pos.IsKnown() && p.InnermostFilename() != filename {
filename = p.InnermostFilename()
f.Logf("# %s\n", filename)
}
var s string
if v, ok := progToValue[p]; ok {
s = v.String()
} else if b, ok := progToBlock[p]; ok {
s = b.String()
} else {
s = " " // most value and branch strings are 2-3 characters long
}
f.Logf(" %-6s\t%.5d (%s)\t%s\n", s, p.Pc, p.InnermostLineNumber(), p.InstructionString())
}
}
if f.HTMLWriter != nil { // spew to ssa.html
var buf bytes.Buffer
buf.WriteString("<code>")
buf.WriteString("<dl class=\"ssa-gen\">")
filename := ""
for p := pp.Text; p != nil; p = p.Link {
// Don't spam every line with the file name, which is often huge.
// Only print changes, and "unknown" is not a change.
if p.Pos.IsKnown() && p.InnermostFilename() != filename {
filename = p.InnermostFilename()
buf.WriteString("<dt class=\"ssa-prog-src\"></dt><dd class=\"ssa-prog\">")
buf.WriteString(html.EscapeString("# " + filename))
buf.WriteString("</dd>")
}
buf.WriteString("<dt class=\"ssa-prog-src\">")
if v, ok := progToValue[p]; ok {
buf.WriteString(v.HTML())
} else if b, ok := progToBlock[p]; ok {
buf.WriteString("<b>" + b.HTML() + "</b>")
}
buf.WriteString("</dt>")
buf.WriteString("<dd class=\"ssa-prog\">")
buf.WriteString(fmt.Sprintf("%.5d <span class=\"l%v line-number\">(%s)</span> %s", p.Pc, p.InnermostLineNumber(), p.InnermostLineNumberHTML(), html.EscapeString(p.InstructionString())))
buf.WriteString("</dd>")
}
buf.WriteString("</dl>")
buf.WriteString("</code>")
f.HTMLWriter.WriteColumn("genssa", "genssa", "ssa-prog", buf.String())
}
defframe(&s, e, f)
f.HTMLWriter.Close()
f.HTMLWriter = nil
}
func defframe(s *State, e *ssafn, f *ssa.Func) {
pp := s.pp
frame := types.Rnd(s.maxarg+e.stksize, int64(types.RegSize))
if Arch.PadFrame != nil {
frame = Arch.PadFrame(frame)
}
// Fill in argument and frame size.
pp.Text.To.Type = obj.TYPE_TEXTSIZE
pp.Text.To.Val = int32(types.Rnd(f.OwnAux.ArgWidth(), int64(types.RegSize)))
pp.Text.To.Offset = frame
p := pp.Text
// Insert code to spill argument registers if the named slot may be partially
// live. That is, the named slot is considered live by liveness analysis,
// (because a part of it is live), but we may not spill all parts into the
// slot. This can only happen with aggregate-typed arguments that are SSA-able
// and not address-taken (for non-SSA-able or address-taken arguments we always
// spill upfront).
// Note: spilling is unnecessary in the -N/no-optimize case, since all values
// will be considered non-SSAable and spilled up front.
// TODO(register args) Make liveness more fine-grained to that partial spilling is okay.
if f.OwnAux.ABIInfo().InRegistersUsed() != 0 && base.Flag.N == 0 {
// First, see if it is already spilled before it may be live. Look for a spill
// in the entry block up to the first safepoint.
type nameOff struct {
n *ir.Name
off int64
}
partLiveArgsSpilled := make(map[nameOff]bool)
for _, v := range f.Entry.Values {
if v.Op.IsCall() {
break
}
if v.Op != ssa.OpStoreReg || v.Args[0].Op != ssa.OpArgIntReg {
continue
}
n, off := ssa.AutoVar(v)
if n.Class != ir.PPARAM || n.Addrtaken() || !TypeOK(n.Type()) || !s.partLiveArgs[n] {
continue
}
partLiveArgsSpilled[nameOff{n, off}] = true
}
// Then, insert code to spill registers if not already.
for _, a := range f.OwnAux.ABIInfo().InParams() {
n, ok := a.Name.(*ir.Name)
if !ok || n.Addrtaken() || !TypeOK(n.Type()) || !s.partLiveArgs[n] || len(a.Registers) <= 1 {
continue
}
rts, offs := a.RegisterTypesAndOffsets()
for i := range a.Registers {
if !rts[i].HasPointers() {
continue
}
if partLiveArgsSpilled[nameOff{n, offs[i]}] {
continue // already spilled
}
reg := ssa.ObjRegForAbiReg(a.Registers[i], f.Config)
p = Arch.SpillArgReg(pp, p, f, rts[i], reg, n, offs[i])
}
}
}
// Insert code to zero ambiguously live variables so that the
// garbage collector only sees initialized values when it
// looks for pointers.
var lo, hi int64
// Opaque state for backend to use. Current backends use it to
// keep track of which helper registers have been zeroed.
var state uint32
// Iterate through declarations. Autos are sorted in decreasing
// frame offset order.
for _, n := range e.curfn.Dcl {
if !n.Needzero() {
continue
}
if n.Class != ir.PAUTO {
e.Fatalf(n.Pos(), "needzero class %d", n.Class)
}
if n.Type().Size()%int64(types.PtrSize) != 0 || n.FrameOffset()%int64(types.PtrSize) != 0 || n.Type().Size() == 0 {
e.Fatalf(n.Pos(), "var %L has size %d offset %d", n, n.Type().Size(), n.Offset_)
}
if lo != hi && n.FrameOffset()+n.Type().Size() >= lo-int64(2*types.RegSize) {
// Merge with range we already have.
lo = n.FrameOffset()
continue
}
// Zero old range
p = Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
// Set new range.
lo = n.FrameOffset()
hi = lo + n.Type().Size()
}
// Zero final range.
Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
}
// For generating consecutive jump instructions to model a specific branching
type IndexJump struct {
Jump obj.As
Index int
}
func (s *State) oneJump(b *ssa.Block, jump *IndexJump) {
p := s.Br(jump.Jump, b.Succs[jump.Index].Block())
p.Pos = b.Pos
}
// CombJump generates combinational instructions (2 at present) for a block jump,
// thereby the behaviour of non-standard condition codes could be simulated
func (s *State) CombJump(b, next *ssa.Block, jumps *[2][2]IndexJump) {
switch next {
case b.Succs[0].Block():
s.oneJump(b, &jumps[0][0])
s.oneJump(b, &jumps[0][1])
case b.Succs[1].Block():
s.oneJump(b, &jumps[1][0])
s.oneJump(b, &jumps[1][1])
default:
var q *obj.Prog
if b.Likely != ssa.BranchUnlikely {
s.oneJump(b, &jumps[1][0])
s.oneJump(b, &jumps[1][1])
q = s.Br(obj.AJMP, b.Succs[1].Block())
} else {
s.oneJump(b, &jumps[0][0])
s.oneJump(b, &jumps[0][1])
q = s.Br(obj.AJMP, b.Succs[0].Block())
}
q.Pos = b.Pos
}
}
// AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
func AddAux(a *obj.Addr, v *ssa.Value) {
AddAux2(a, v, v.AuxInt)
}
func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR {
v.Fatalf("bad AddAux addr %v", a)
}
// add integer offset
a.Offset += offset
// If no additional symbol offset, we're done.
if v.Aux == nil {
return
}
// Add symbol's offset from its base register.
switch n := v.Aux.(type) {
case *ssa.AuxCall:
a.Name = obj.NAME_EXTERN
a.Sym = n.Fn
case *obj.LSym:
a.Name = obj.NAME_EXTERN
a.Sym = n
case *ir.Name:
if n.Class == ir.PPARAM || (n.Class == ir.PPARAMOUT && !n.IsOutputParamInRegisters()) {
a.Name = obj.NAME_PARAM
a.Sym = ir.Orig(n).(*ir.Name).Linksym()
a.Offset += n.FrameOffset()
break
}
a.Name = obj.NAME_AUTO
if n.Class == ir.PPARAMOUT {
a.Sym = ir.Orig(n).(*ir.Name).Linksym()
} else {
a.Sym = n.Linksym()
}
a.Offset += n.FrameOffset()
default:
v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
}
}
// extendIndex extends v to a full int width.
// panic with the given kind if v does not fit in an int (only on 32-bit archs).
func (s *state) extendIndex(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
size := idx.Type.Size()
if size == s.config.PtrSize {
return idx
}
if size > s.config.PtrSize {
// truncate 64-bit indexes on 32-bit pointer archs. Test the
// high word and branch to out-of-bounds failure if it is not 0.
var lo *ssa.Value
if idx.Type.IsSigned() {
lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TINT], idx)
} else {
lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TUINT], idx)
}
if bounded || base.Flag.B != 0 {
return lo
}
bNext := s.f.NewBlock(ssa.BlockPlain)
bPanic := s.f.NewBlock(ssa.BlockExit)
hi := s.newValue1(ssa.OpInt64Hi, types.Types[types.TUINT32], idx)
cmp := s.newValue2(ssa.OpEq32, types.Types[types.TBOOL], hi, s.constInt32(types.Types[types.TUINT32], 0))
if !idx.Type.IsSigned() {
switch kind {
case ssa.BoundsIndex:
kind = ssa.BoundsIndexU
case ssa.BoundsSliceAlen:
kind = ssa.BoundsSliceAlenU
case ssa.BoundsSliceAcap:
kind = ssa.BoundsSliceAcapU
case ssa.BoundsSliceB:
kind = ssa.BoundsSliceBU
case ssa.BoundsSlice3Alen:
kind = ssa.BoundsSlice3AlenU
case ssa.BoundsSlice3Acap:
kind = ssa.BoundsSlice3AcapU
case ssa.BoundsSlice3B:
kind = ssa.BoundsSlice3BU
case ssa.BoundsSlice3C:
kind = ssa.BoundsSlice3CU
}
}
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
b.AddEdgeTo(bNext)
b.AddEdgeTo(bPanic)
s.startBlock(bPanic)
mem := s.newValue4I(ssa.OpPanicExtend, types.TypeMem, int64(kind), hi, lo, len, s.mem())
s.endBlock().SetControl(mem)
s.startBlock(bNext)
return lo
}
// Extend value to the required size
var op ssa.Op
if idx.Type.IsSigned() {
switch 10*size + s.config.PtrSize {
case 14:
op = ssa.OpSignExt8to32
case 18:
op = ssa.OpSignExt8to64
case 24:
op = ssa.OpSignExt16to32
case 28:
op = ssa.OpSignExt16to64
case 48:
op = ssa.OpSignExt32to64
default:
s.Fatalf("bad signed index extension %s", idx.Type)
}
} else {
switch 10*size + s.config.PtrSize {
case 14:
op = ssa.OpZeroExt8to32
case 18:
op = ssa.OpZeroExt8to64
case 24:
op = ssa.OpZeroExt16to32
case 28:
op = ssa.OpZeroExt16to64
case 48:
op = ssa.OpZeroExt32to64
default:
s.Fatalf("bad unsigned index extension %s", idx.Type)
}
}
return s.newValue1(op, types.Types[types.TINT], idx)
}
// CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values.
// Called during ssaGenValue.
func CheckLoweredPhi(v *ssa.Value) {
if v.Op != ssa.OpPhi {
v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString())
}
if v.Type.IsMemory() {
return
}
f := v.Block.Func
loc := f.RegAlloc[v.ID]
for _, a := range v.Args {
if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
v.Fatalf("phi arg at different location than phi: %v @ %s, but arg %v @ %s\n%s\n", v, loc, a, aloc, v.Block.Func)
}
}
}
// CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block,
// except for incoming in-register arguments.
// The output of LoweredGetClosurePtr is generally hardwired to the correct register.
// That register contains the closure pointer on closure entry.
func CheckLoweredGetClosurePtr(v *ssa.Value) {
entry := v.Block.Func.Entry
if entry != v.Block {
base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
}
for _, w := range entry.Values {
if w == v {
break
}
switch w.Op {
case ssa.OpArgIntReg, ssa.OpArgFloatReg:
// okay
default:
base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
}
}
}
// CheckArgReg ensures that v is in the function's entry block.
func CheckArgReg(v *ssa.Value) {
entry := v.Block.Func.Entry
if entry != v.Block {
base.Fatalf("in %s, badly placed ArgIReg or ArgFReg: %v %v", v.Block.Func.Name, v.Block, v)
}
}
func AddrAuto(a *obj.Addr, v *ssa.Value) {
n, off := ssa.AutoVar(v)
a.Type = obj.TYPE_MEM
a.Sym = n.Linksym()
a.Reg = int16(Arch.REGSP)
a.Offset = n.FrameOffset() + off
if n.Class == ir.PPARAM || (n.Class == ir.PPARAMOUT && !n.IsOutputParamInRegisters()) {
a.Name = obj.NAME_PARAM
} else {
a.Name = obj.NAME_AUTO
}
}
// Call returns a new CALL instruction for the SSA value v.
// It uses PrepareCall to prepare the call.
func (s *State) Call(v *ssa.Value) *obj.Prog {
pPosIsStmt := s.pp.Pos.IsStmt() // The statement-ness fo the call comes from ssaGenState
s.PrepareCall(v)
p := s.Prog(obj.ACALL)
if pPosIsStmt == src.PosIsStmt {
p.Pos = v.Pos.WithIsStmt()
} else {
p.Pos = v.Pos.WithNotStmt()
}
if sym, ok := v.Aux.(*ssa.AuxCall); ok && sym.Fn != nil {
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = sym.Fn
} else {
// TODO(mdempsky): Can these differences be eliminated?
switch Arch.LinkArch.Family {
case sys.AMD64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm:
p.To.Type = obj.TYPE_REG
case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64:
p.To.Type = obj.TYPE_MEM
default:
base.Fatalf("unknown indirect call family")
}
p.To.Reg = v.Args[0].Reg()
}
return p
}
// PrepareCall prepares to emit a CALL instruction for v and does call-related bookkeeping.
// It must be called immediately before emitting the actual CALL instruction,
// since it emits PCDATA for the stack map at the call (calls are safe points).
func (s *State) PrepareCall(v *ssa.Value) {
idx := s.livenessMap.Get(v)
if !idx.StackMapValid() {
// See Liveness.hasStackMap.
if sym, ok := v.Aux.(*ssa.AuxCall); !ok || !(sym.Fn == ir.Syms.Typedmemclr || sym.Fn == ir.Syms.Typedmemmove) {
base.Fatalf("missing stack map index for %v", v.LongString())
}
}
call, ok := v.Aux.(*ssa.AuxCall)
if ok {
// Record call graph information for nowritebarrierrec
// analysis.
if nowritebarrierrecCheck != nil {
nowritebarrierrecCheck.recordCall(s.pp.CurFunc, call.Fn, v.Pos)
}
}
if s.maxarg < v.AuxInt {
s.maxarg = v.AuxInt
}
}
// UseArgs records the fact that an instruction needs a certain amount of
// callee args space for its use.
func (s *State) UseArgs(n int64) {
if s.maxarg < n {
s.maxarg = n
}
}
// fieldIdx finds the index of the field referred to by the ODOT node n.
func fieldIdx(n *ir.SelectorExpr) int {
t := n.X.Type()
if !t.IsStruct() {
panic("ODOT's LHS is not a struct")
}
for i, f := range t.Fields().Slice() {
if f.Sym == n.Sel {
if f.Offset != n.Offset() {
panic("field offset doesn't match")
}
return i
}
}
panic(fmt.Sprintf("can't find field in expr %v\n", n))
// TODO: keep the result of this function somewhere in the ODOT Node
// so we don't have to recompute it each time we need it.
}
// ssafn holds frontend information about a function that the backend is processing.
// It also exports a bunch of compiler services for the ssa backend.
type ssafn struct {
curfn *ir.Func
strings map[string]*obj.LSym // map from constant string to data symbols
stksize int64 // stack size for current frame
stkptrsize int64 // prefix of stack containing pointers
log bool // print ssa debug to the stdout
}
// StringData returns a symbol which
// is the data component of a global string constant containing s.
func (e *ssafn) StringData(s string) *obj.LSym {
if aux, ok := e.strings[s]; ok {
return aux
}
if e.strings == nil {
e.strings = make(map[string]*obj.LSym)
}
data := staticdata.StringSym(e.curfn.Pos(), s)
e.strings[s] = data
return data
}
func (e *ssafn) Auto(pos src.XPos, t *types.Type) *ir.Name {
return typecheck.TempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
}
// SplitSlot returns a slot representing the data of parent starting at offset.
func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
node := parent.N
if node.Class != ir.PAUTO || node.Addrtaken() {
// addressed things and non-autos retain their parents (i.e., cannot truly be split)
return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset}
}
s := &types.Sym{Name: node.Sym().Name + suffix, Pkg: types.LocalPkg}
n := ir.NewNameAt(parent.N.Pos(), s)
s.Def = n
ir.AsNode(s.Def).Name().SetUsed(true)
n.SetType(t)
n.Class = ir.PAUTO
n.SetEsc(ir.EscNever)
n.Curfn = e.curfn
e.curfn.Dcl = append(e.curfn.Dcl, n)
types.CalcSize(t)
return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset}
}
func (e *ssafn) CanSSA(t *types.Type) bool {
return TypeOK(t)
}
func (e *ssafn) Line(pos src.XPos) string {
return base.FmtPos(pos)
}
// Log logs a message from the compiler.
func (e *ssafn) Logf(msg string, args ...interface{}) {
if e.log {
fmt.Printf(msg, args...)
}
}
func (e *ssafn) Log() bool {
return e.log
}
// Fatal reports a compiler error and exits.
func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) {
base.Pos = pos
nargs := append([]interface{}{ir.FuncName(e.curfn)}, args...)
base.Fatalf("'%s': "+msg, nargs...)
}
// Warnl reports a "warning", which is usually flag-triggered
// logging output for the benefit of tests.
func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) {
base.WarnfAt(pos, fmt_, args...)
}
func (e *ssafn) Debug_checknil() bool {
return base.Debug.Nil != 0
}
func (e *ssafn) UseWriteBarrier() bool {
return base.Flag.WB
}
func (e *ssafn) Syslook(name string) *obj.LSym {
switch name {
case "goschedguarded":
return ir.Syms.Goschedguarded
case "writeBarrier":
return ir.Syms.WriteBarrier
case "gcWriteBarrier":
return ir.Syms.GCWriteBarrier
case "typedmemmove":
return ir.Syms.Typedmemmove
case "typedmemclr":
return ir.Syms.Typedmemclr
}
e.Fatalf(src.NoXPos, "unknown Syslook func %v", name)
return nil
}
func (e *ssafn) SetWBPos(pos src.XPos) {
e.curfn.SetWBPos(pos)
}
func (e *ssafn) MyImportPath() string {
return base.Ctxt.Pkgpath
}
func clobberBase(n ir.Node) ir.Node {
if n.Op() == ir.ODOT {
n := n.(*ir.SelectorExpr)
if n.X.Type().NumFields() == 1 {
return clobberBase(n.X)
}
}
if n.Op() == ir.OINDEX {
n := n.(*ir.IndexExpr)
if n.X.Type().IsArray() && n.X.Type().NumElem() == 1 {
return clobberBase(n.X)
}
}
return n
}
// callTargetLSym returns the correct LSym to call 'callee' using its ABI.
func callTargetLSym(callee *ir.Name) *obj.LSym {
if callee.Func == nil {
// TODO(austin): This happens in a few cases of
// compiler-generated functions. These are all
// ABIInternal. It would be better if callee.Func was
// never nil and we didn't need this case.
return callee.Linksym()
}
return callee.LinksymABI(callee.Func.ABI)
}
func min8(a, b int8) int8 {
if a < b {
return a
}
return b
}
func max8(a, b int8) int8 {
if a > b {
return a
}
return b
}
// deferstruct makes a runtime._defer structure.
func deferstruct() *types.Type {
makefield := func(name string, typ *types.Type) *types.Field {
// Unlike the global makefield function, this one needs to set Pkg
// because these types might be compared (in SSA CSE sorting).
// TODO: unify this makefield and the global one above.
sym := &types.Sym{Name: name, Pkg: types.LocalPkg}
return types.NewField(src.NoXPos, sym, typ)
}
// These fields must match the ones in runtime/runtime2.go:_defer and
// (*state).call above.
fields := []*types.Field{
makefield("started", types.Types[types.TBOOL]),
makefield("heap", types.Types[types.TBOOL]),
makefield("openDefer", types.Types[types.TBOOL]),
makefield("sp", types.Types[types.TUINTPTR]),
makefield("pc", types.Types[types.TUINTPTR]),
// Note: the types here don't really matter. Defer structures
// are always scanned explicitly during stack copying and GC,
// so we make them uintptr type even though they are real pointers.
makefield("fn", types.Types[types.TUINTPTR]),
makefield("_panic", types.Types[types.TUINTPTR]),
makefield("link", types.Types[types.TUINTPTR]),
makefield("fd", types.Types[types.TUINTPTR]),
makefield("varp", types.Types[types.TUINTPTR]),
makefield("framepc", types.Types[types.TUINTPTR]),
}
// build struct holding the above fields
s := types.NewStruct(types.NoPkg, fields)
s.SetNoalg(true)
types.CalcStructSize(s)
return s
}
// SlotAddr uses LocalSlot information to initialize an obj.Addr
// The resulting addr is used in a non-standard context -- in the prologue
// of a function, before the frame has been constructed, so the standard
// addressing for the parameters will be wrong.
func SpillSlotAddr(spill ssa.Spill, baseReg int16, extraOffset int64) obj.Addr {
return obj.Addr{
Name: obj.NAME_NONE,
Type: obj.TYPE_MEM,
Reg: baseReg,
Offset: spill.Offset + extraOffset,
}
}
var (
BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym
ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym
)
// GCWriteBarrierReg maps from registers to gcWriteBarrier implementation LSyms.
var GCWriteBarrierReg map[int16]*obj.LSym
|
[
"\"GOSSAFUNC\"",
"\"GOSSADIR\""
] |
[] |
[
"GOSSADIR",
"GOSSAFUNC"
] |
[]
|
["GOSSADIR", "GOSSAFUNC"]
|
go
| 2 | 0 | |
src/condor_contrib/campus_factory/python-lib/GlideinWMS/condorExe.py
|
#
# Project:
# glideinWMS
#
# File Version:
# $Id: condorExe.py,v 1.6.12.1.8.1.6.1 2010/11/05 18:28:23 sfiligoi Exp $
#
# Description:
# This module implements the functions to execute condor commands
#
# Author:
# Igor Sfiligoi (Sept 7th 2006)
#
import os
import os.path
import popen2
import string
class UnconfigError(RuntimeError):
def __init__(self,str):
RuntimeError.__init__(self,str)
class ExeError(RuntimeError):
def __init__(self,str):
RuntimeError.__init__(self,str)
#
# Configuration
#
# Set path to condor binaries, if needed
def set_path(new_condor_bin_path,new_condor_sbin_path=None):
global condor_bin_path,condor_sbin_path
condor_bin_path=new_condor_bin_path
if new_condor_sbin_path!=None:
condor_sbin_path=new_condor_sbin_path
#
# Execute an arbitrary condor command and return its output as a list of lines
# condor_exe uses a relative path to $CONDOR_BIN
# Fails if stderr is not empty
#
# can throw UnconfigError or ExeError
def exe_cmd(condor_exe,args,stdin_data=None):
global condor_bin_path
if condor_bin_path==None:
raise UnconfigError, "condor_bin_path is undefined!"
condor_exe_path=os.path.join(condor_bin_path,condor_exe)
cmd="%s %s" % (condor_exe_path,args)
return iexe_cmd(cmd,stdin_data)
def exe_cmd_sbin(condor_exe,args,stdin_data=None):
global condor_sbin_path
if condor_sbin_path==None:
raise UnconfigError, "condor_sbin_path is undefined!"
condor_exe_path=os.path.join(condor_sbin_path,condor_exe)
cmd="%s %s" % (condor_exe_path,args)
return iexe_cmd(cmd,stdin_data)
############################################################
#
# P R I V A T E, do not use
#
############################################################
# can throw ExeError
def iexe_cmd(cmd,stdin_data=None):
child=popen2.Popen3(cmd,True)
if stdin_data!=None:
child.tochild.write(stdin_data)
child.tochild.close()
tempOut = child.fromchild.readlines()
child.fromchild.close()
tempErr = child.childerr.readlines()
child.childerr.close()
try:
errcode=child.wait()
except OSError, e:
if len(tempOut)!=0:
# if there was some output, it is probably just a problem of timing
# have seen a lot of those when running very short processes
errcode=0
else:
raise ExeError, "Error running '%s'\nStdout:%s\nStderr:%s\nException OSError: %s"%(cmd,tempOut,tempErr,e)
if (errcode!=0):
raise ExeError, "Error running '%s'\ncode %i:%s"%(cmd,errcode,tempErr)
return tempOut
#
# Set condor_bin_path
#
def init1():
global condor_bin_path
# try using condor commands to find it out
try:
condor_bin_path=iexe_cmd("condor_config_val BIN")[0][:-1] # remove trailing newline
except ExeError,e:
# try to find the RELEASE_DIR, and append bin
try:
release_path=iexe_cmd("condor_config_val RELEASE_DIR")
condor_bin_path=os.path.join(release_path[0][:-1],"bin")
except ExeError,e:
# try condor_q in the path
try:
condorq_bin_path=iexe_cmd("which condor_q")
condor_bin_path=os.path.dirname(condorq_bin_path[0][:-1])
except ExeError,e:
# look for condor_config in /etc
if os.environ.has_key("CONDOR_CONFIG"):
condor_config=os.environ["CONDOR_CONFIG"]
else:
condor_config="/etc/condor/condor_config"
try:
# BIN = <path>
bin_def=iexe_cmd('grep "^ *BIN" %s'%condor_config)
condor_bin_path=string.split(bin_def[0][:-1])[2]
except ExeError, e:
try:
# RELEASE_DIR = <path>
release_def=iexe_cmd('grep "^ *RELEASE_DIR" %s'%condor_config)
condor_bin_path=os.path.join(string.split(release_def[0][:-1])[2],"bin")
except ExeError, e:
pass # don't know what else to try
#
# Set condor_sbin_path
#
def init2():
global condor_sbin_path
# try using condor commands to find it out
try:
condor_sbin_path=iexe_cmd("condor_config_val SBIN")[0][:-1] # remove trailing newline
except ExeError,e:
# try to find the RELEASE_DIR, and append bin
try:
release_path=iexe_cmd("condor_config_val RELEASE_DIR")
condor_sbin_path=os.path.join(release_path[0][:-1],"sbin")
except ExeError,e:
# try condor_q in the path
try:
condora_sbin_path=iexe_cmd("which condor_advertise")
condor_sbin_path=os.path.dirname(condora_sbin_path[0][:-1])
except ExeError,e:
# look for condor_config in /etc
if os.environ.has_key("CONDOR_CONFIG"):
condor_config=os.environ["CONDOR_CONFIG"]
else:
condor_config="/etc/condor/condor_config"
try:
# BIN = <path>
bin_def=iexe_cmd('grep "^ *SBIN" %s'%condor_config)
condor_sbin_path=string.split(bin_def[0][:-1])[2]
except ExeError, e:
try:
# RELEASE_DIR = <path>
release_def=iexe_cmd('grep "^ *RELEASE_DIR" %s'%condor_config)
condor_sbin_path=os.path.join(string.split(release_def[0][:-1])[2],"sbin")
except ExeError, e:
pass # don't know what else to try
def init():
init1()
init2()
# This way we know that it is undefined
condor_bin_path=None
condor_sbin_path=None
init()
|
[] |
[] |
[
"CONDOR_CONFIG"
] |
[]
|
["CONDOR_CONFIG"]
|
python
| 1 | 0 | |
cmd/search.go
|
package cmd
import (
"encoding/json"
"fmt"
"log"
"os"
"strconv"
"strings"
"github.com/go-openapi/runtime/client"
ttools "github.com/intel/tfortools"
"github.com/raphaelreyna/shrtct/internal/generated/models"
"github.com/raphaelreyna/shrtct/internal/generated/shortcut"
"github.com/raphaelreyna/shrtct/internal/generated/shortcut/operations"
"github.com/spf13/cobra"
)
func init() {
searchCmd.AddCommand(searchStoryCmd())
searchCmd.AddCommand(searchEpicCmd)
rootCmd.AddCommand(searchCmd)
}
var searchCmd = &cobra.Command{
Use: "search",
Short: "Search stories or epics",
Run: nil,
}
func searchStoryCmd() *cobra.Command {
var (
cc = cobra.Command{
Use: "stories",
}
flags = cc.Flags()
query string
project string
stype string
estimate string
state string
blocked, blocking bool
)
flags.StringVarP(&query, "query", "q", "", "Finds all stories matching the general search query")
flags.StringVarP(&project, "project", "p", "", "Finds all stories of a specific project.")
flags.StringVarP(&stype, "type", "t", "", "Finds all Stories of a specific type (feature, bug, or chore).")
flags.StringVarP(&estimate, "estimate", "e", "", "Finds all Stories of a specific point value.")
flags.StringVarP(&state, "state", "s", "", "Finds all Stories in a specific state (\"In Review\")")
flags.BoolVarP(&blocked, "blocked", "b", false, "Finds all Stories that are blocked.")
flags.BoolVarP(&blocking, "blocking", "B", false, "Finds all Stories that are blocking.")
cc.Run = func(cmd *cobra.Command, args []string) {
var (
ctx = cmd.Context()
clnt = shortcut.Default
ops = clnt.Operations
auth = client.APIKeyAuth(
"Shortcut-Token",
"header",
os.Getenv("SHORTCUT_TOKEN"),
)
)
if err := flags.Parse(args); err != nil {
log.Fatal(err)
}
var qparts = make([]string, 0)
if query != "" {
qparts = append(qparts, query)
}
if project != "" {
qparts = append(qparts, fmt.Sprintf("project:%q", project))
}
if stype != "" {
qparts = append(qparts, fmt.Sprintf("type:%q", stype))
}
if state != "" {
qparts = append(qparts, fmt.Sprintf("state:%q", state))
}
if blocked {
qparts = append(qparts, "is:blocked")
}
if blocking {
qparts = append(qparts, "is:blocking")
}
if estimate != "" {
_, err := strconv.Atoi(estimate)
if err != nil {
log.Fatal(err)
}
qparts = append(qparts, fmt.Sprintf("estimate:%s", estimate))
}
var q = strings.Join(qparts, " AND ")
ssok, err := ops.SearchStories(&operations.SearchStoriesParams{
Context: ctx,
Search: &models.Search{
Query: &q,
},
}, auth)
if err != nil {
log.Fatal(err)
}
var tmplt = flags.Arg(0)
if tmplt != "" {
var searchResults []interface{}
data, err := json.Marshal(ssok.GetPayload().Data)
if err != nil {
log.Fatal(err)
}
if err := json.Unmarshal(data, &searchResults); err != nil {
log.Fatal(err)
}
err = ttools.OutputToTemplate(
os.Stdout,
"stories",
tmplt,
searchResults,
ttools.NewConfig(ttools.OptAllFns),
)
if err != nil {
log.Fatal(err)
}
return
}
var enc = json.NewEncoder(os.Stdout)
enc.SetIndent("", "\t")
enc.Encode(ssok.GetPayload().Data)
}
return &cc
}
var searchEpicCmd = &cobra.Command{
Use: "epics",
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("unimplemented")
},
}
|
[
"\"SHORTCUT_TOKEN\""
] |
[] |
[
"SHORTCUT_TOKEN"
] |
[]
|
["SHORTCUT_TOKEN"]
|
go
| 1 | 0 | |
3rdParty/rocksdb/6.0.fb/java/src/main/java/org/rocksdb/RocksDB.java
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
import java.util.*;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import org.rocksdb.util.Environment;
/**
* A RocksDB is a persistent ordered map from keys to values. It is safe for
* concurrent access from multiple threads without any external synchronization.
* All methods of this class could potentially throw RocksDBException, which
* indicates sth wrong at the RocksDB library side and the call failed.
*/
public class RocksDB extends RocksObject {
public static final byte[] DEFAULT_COLUMN_FAMILY = "default".getBytes();
public static final int NOT_FOUND = -1;
private enum LibraryState {
NOT_LOADED,
LOADING,
LOADED
}
private static AtomicReference<LibraryState> libraryLoaded
= new AtomicReference<>(LibraryState.NOT_LOADED);
static {
RocksDB.loadLibrary();
}
/**
* Loads the necessary library files.
* Calling this method twice will have no effect.
* By default the method extracts the shared library for loading at
* java.io.tmpdir, however, you can override this temporary location by
* setting the environment variable ROCKSDB_SHAREDLIB_DIR.
*/
public static void loadLibrary() {
if (libraryLoaded.get() == LibraryState.LOADED) {
return;
}
if (libraryLoaded.compareAndSet(LibraryState.NOT_LOADED,
LibraryState.LOADING)) {
final String tmpDir = System.getenv("ROCKSDB_SHAREDLIB_DIR");
// loading possibly necessary libraries.
for (final CompressionType compressionType : CompressionType.values()) {
try {
if (compressionType.getLibraryName() != null) {
System.loadLibrary(compressionType.getLibraryName());
}
} catch (UnsatisfiedLinkError e) {
// since it may be optional, we ignore its loading failure here.
}
}
try {
NativeLibraryLoader.getInstance().loadLibrary(tmpDir);
} catch (IOException e) {
libraryLoaded.set(LibraryState.NOT_LOADED);
throw new RuntimeException("Unable to load the RocksDB shared library",
e);
}
libraryLoaded.set(LibraryState.LOADED);
return;
}
while (libraryLoaded.get() == LibraryState.LOADING) {
try {
Thread.sleep(10);
} catch(final InterruptedException e) {
//ignore
}
}
}
/**
* Tries to load the necessary library files from the given list of
* directories.
*
* @param paths a list of strings where each describes a directory
* of a library.
*/
public static void loadLibrary(final List<String> paths) {
if (libraryLoaded.get() == LibraryState.LOADED) {
return;
}
if (libraryLoaded.compareAndSet(LibraryState.NOT_LOADED,
LibraryState.LOADING)) {
for (final CompressionType compressionType : CompressionType.values()) {
if (compressionType.equals(CompressionType.NO_COMPRESSION)) {
continue;
}
for (final String path : paths) {
try {
System.load(path + "/" + Environment.getSharedLibraryFileName(
compressionType.getLibraryName()));
break;
} catch (UnsatisfiedLinkError e) {
// since they are optional, we ignore loading fails.
}
}
}
boolean success = false;
UnsatisfiedLinkError err = null;
for (final String path : paths) {
try {
System.load(path + "/" +
Environment.getJniLibraryFileName("rocksdbjni"));
success = true;
break;
} catch (UnsatisfiedLinkError e) {
err = e;
}
}
if (!success) {
libraryLoaded.set(LibraryState.NOT_LOADED);
throw err;
}
libraryLoaded.set(LibraryState.LOADED);
return;
}
while (libraryLoaded.get() == LibraryState.LOADING) {
try {
Thread.sleep(10);
} catch(final InterruptedException e) {
//ignore
}
}
}
/**
* The factory constructor of RocksDB that opens a RocksDB instance given
* the path to the database using the default options w/ createIfMissing
* set to true.
*
* @param path the path to the rocksdb.
* @return a {@link RocksDB} instance on success, null if the specified
* {@link RocksDB} can not be opened.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
* @see Options#setCreateIfMissing(boolean)
*/
public static RocksDB open(final String path) throws RocksDBException {
// This allows to use the rocksjni default Options instead of
// the c++ one.
Options options = new Options();
options.setCreateIfMissing(true);
return open(options, path);
}
/**
* The factory constructor of RocksDB that opens a RocksDB instance given
* the path to the database using the specified options and db path and a list
* of column family names.
* <p>
* If opened in read write mode every existing column family name must be
* passed within the list to this method.</p>
* <p>
* If opened in read-only mode only a subset of existing column families must
* be passed to this method.</p>
* <p>
* Options instance *should* not be disposed before all DBs using this options
* instance have been closed. If user doesn't call options dispose explicitly,
* then this options instance will be GC'd automatically</p>
* <p>
* ColumnFamily handles are disposed when the RocksDB instance is disposed.
* </p>
*
* @param path the path to the rocksdb.
* @param columnFamilyDescriptors list of column family descriptors
* @param columnFamilyHandles will be filled with ColumnFamilyHandle instances
* on open.
* @return a {@link RocksDB} instance on success, null if the specified
* {@link RocksDB} can not be opened.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
* @see DBOptions#setCreateIfMissing(boolean)
*/
public static RocksDB open(final String path,
final List<ColumnFamilyDescriptor> columnFamilyDescriptors,
final List<ColumnFamilyHandle> columnFamilyHandles)
throws RocksDBException {
// This allows to use the rocksjni default Options instead of
// the c++ one.
DBOptions options = new DBOptions();
return open(options, path, columnFamilyDescriptors, columnFamilyHandles);
}
/**
* The factory constructor of RocksDB that opens a RocksDB instance given
* the path to the database using the specified options and db path.
*
* <p>
* Options instance *should* not be disposed before all DBs using this options
* instance have been closed. If user doesn't call options dispose explicitly,
* then this options instance will be GC'd automatically.</p>
* <p>
* Options instance can be re-used to open multiple DBs if DB statistics is
* not used. If DB statistics are required, then its recommended to open DB
* with new Options instance as underlying native statistics instance does not
* use any locks to prevent concurrent updates.</p>
*
* @param options {@link org.rocksdb.Options} instance.
* @param path the path to the rocksdb.
* @return a {@link RocksDB} instance on success, null if the specified
* {@link RocksDB} can not be opened.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*
* @see Options#setCreateIfMissing(boolean)
*/
public static RocksDB open(final Options options, final String path)
throws RocksDBException {
// when non-default Options is used, keeping an Options reference
// in RocksDB can prevent Java to GC during the life-time of
// the currently-created RocksDB.
final RocksDB db = new RocksDB(open(options.nativeHandle_, path));
db.storeOptionsInstance(options);
return db;
}
/**
* The factory constructor of RocksDB that opens a RocksDB instance given
* the path to the database using the specified options and db path and a list
* of column family names.
* <p>
* If opened in read write mode every existing column family name must be
* passed within the list to this method.</p>
* <p>
* If opened in read-only mode only a subset of existing column families must
* be passed to this method.</p>
* <p>
* Options instance *should* not be disposed before all DBs using this options
* instance have been closed. If user doesn't call options dispose explicitly,
* then this options instance will be GC'd automatically.</p>
* <p>
* Options instance can be re-used to open multiple DBs if DB statistics is
* not used. If DB statistics are required, then its recommended to open DB
* with new Options instance as underlying native statistics instance does not
* use any locks to prevent concurrent updates.</p>
* <p>
* ColumnFamily handles are disposed when the RocksDB instance is disposed.
* </p>
*
* @param options {@link org.rocksdb.DBOptions} instance.
* @param path the path to the rocksdb.
* @param columnFamilyDescriptors list of column family descriptors
* @param columnFamilyHandles will be filled with ColumnFamilyHandle instances
* on open.
* @return a {@link RocksDB} instance on success, null if the specified
* {@link RocksDB} can not be opened.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*
* @see DBOptions#setCreateIfMissing(boolean)
*/
public static RocksDB open(final DBOptions options, final String path,
final List<ColumnFamilyDescriptor> columnFamilyDescriptors,
final List<ColumnFamilyHandle> columnFamilyHandles)
throws RocksDBException {
final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][];
final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()];
for (int i = 0; i < columnFamilyDescriptors.size(); i++) {
final ColumnFamilyDescriptor cfDescriptor = columnFamilyDescriptors
.get(i);
cfNames[i] = cfDescriptor.columnFamilyName();
cfOptionHandles[i] = cfDescriptor.columnFamilyOptions().nativeHandle_;
}
final long[] handles = open(options.nativeHandle_, path, cfNames,
cfOptionHandles);
final RocksDB db = new RocksDB(handles[0]);
db.storeOptionsInstance(options);
for (int i = 1; i < handles.length; i++) {
columnFamilyHandles.add(new ColumnFamilyHandle(db, handles[i]));
}
return db;
}
/**
* The factory constructor of RocksDB that opens a RocksDB instance in
* Read-Only mode given the path to the database using the default
* options.
*
* @param path the path to the RocksDB.
* @return a {@link RocksDB} instance on success, null if the specified
* {@link RocksDB} can not be opened.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public static RocksDB openReadOnly(final String path)
throws RocksDBException {
// This allows to use the rocksjni default Options instead of
// the c++ one.
Options options = new Options();
return openReadOnly(options, path);
}
/**
* The factory constructor of RocksDB that opens a RocksDB instance in
* Read-Only mode given the path to the database using the default
* options.
*
* @param path the path to the RocksDB.
* @param columnFamilyDescriptors list of column family descriptors
* @param columnFamilyHandles will be filled with ColumnFamilyHandle instances
* on open.
* @return a {@link RocksDB} instance on success, null if the specified
* {@link RocksDB} can not be opened.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public static RocksDB openReadOnly(final String path,
final List<ColumnFamilyDescriptor> columnFamilyDescriptors,
final List<ColumnFamilyHandle> columnFamilyHandles)
throws RocksDBException {
// This allows to use the rocksjni default Options instead of
// the c++ one.
final DBOptions options = new DBOptions();
return openReadOnly(options, path, columnFamilyDescriptors,
columnFamilyHandles);
}
/**
* The factory constructor of RocksDB that opens a RocksDB instance in
* Read-Only mode given the path to the database using the specified
* options and db path.
*
* Options instance *should* not be disposed before all DBs using this options
* instance have been closed. If user doesn't call options dispose explicitly,
* then this options instance will be GC'd automatically.
*
* @param options {@link Options} instance.
* @param path the path to the RocksDB.
* @return a {@link RocksDB} instance on success, null if the specified
* {@link RocksDB} can not be opened.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public static RocksDB openReadOnly(final Options options, final String path)
throws RocksDBException {
// when non-default Options is used, keeping an Options reference
// in RocksDB can prevent Java to GC during the life-time of
// the currently-created RocksDB.
final RocksDB db = new RocksDB(openROnly(options.nativeHandle_, path));
db.storeOptionsInstance(options);
return db;
}
/**
* The factory constructor of RocksDB that opens a RocksDB instance in
* Read-Only mode given the path to the database using the specified
* options and db path.
*
* <p>This open method allows to open RocksDB using a subset of available
* column families</p>
* <p>Options instance *should* not be disposed before all DBs using this
* options instance have been closed. If user doesn't call options dispose
* explicitly,then this options instance will be GC'd automatically.</p>
*
* @param options {@link DBOptions} instance.
* @param path the path to the RocksDB.
* @param columnFamilyDescriptors list of column family descriptors
* @param columnFamilyHandles will be filled with ColumnFamilyHandle instances
* on open.
* @return a {@link RocksDB} instance on success, null if the specified
* {@link RocksDB} can not be opened.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public static RocksDB openReadOnly(final DBOptions options, final String path,
final List<ColumnFamilyDescriptor> columnFamilyDescriptors,
final List<ColumnFamilyHandle> columnFamilyHandles)
throws RocksDBException {
// when non-default Options is used, keeping an Options reference
// in RocksDB can prevent Java to GC during the life-time of
// the currently-created RocksDB.
final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][];
final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()];
for (int i = 0; i < columnFamilyDescriptors.size(); i++) {
final ColumnFamilyDescriptor cfDescriptor = columnFamilyDescriptors
.get(i);
cfNames[i] = cfDescriptor.columnFamilyName();
cfOptionHandles[i] = cfDescriptor.columnFamilyOptions().nativeHandle_;
}
final long[] handles = openROnly(options.nativeHandle_, path, cfNames,
cfOptionHandles);
final RocksDB db = new RocksDB(handles[0]);
db.storeOptionsInstance(options);
for (int i = 1; i < handles.length; i++) {
columnFamilyHandles.add(new ColumnFamilyHandle(db, handles[i]));
}
return db;
}
/**
* Static method to determine all available column families for a
* rocksdb database identified by path
*
* @param options Options for opening the database
* @param path Absolute path to rocksdb database
* @return List<byte[]> List containing the column family names
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public static List<byte[]> listColumnFamilies(final Options options,
final String path) throws RocksDBException {
return Arrays.asList(RocksDB.listColumnFamilies(options.nativeHandle_,
path));
}
protected void storeOptionsInstance(DBOptionsInterface options) {
options_ = options;
}
private static void checkBounds(int offset, int len, int size) {
if ((offset | len | (offset + len) | (size - (offset + len))) < 0) {
throw new IndexOutOfBoundsException(String.format("offset(%d), len(%d), size(%d)", offset, len, size));
}
}
/**
* Set the database entry for "key" to "value".
*
* @param key the specified key to be inserted.
* @param value the value associated with the specified key.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void put(final byte[] key, final byte[] value)
throws RocksDBException {
put(nativeHandle_, key, 0, key.length, value, 0, value.length);
}
/**
* Set the database entry for "key" to "value"
*
* @param key The specified key to be inserted
* @param offset the offset of the "key" array to be used, must be non-negative and
* no larger than "key".length
* @param len the length of the "key" array to be used, must be non-negative and
* must be non-negative and no larger than ("key".length - offset)
* @param value the value associated with the specified key
* @param vOffset the offset of the "value" array to be used, must be non-negative and
* no longer than "key".length
* @param vLen the length of the "value" array to be used, must be non-negative and
* must be non-negative and no larger than ("value".length - offset)
*
* @throws RocksDBException thrown if errors happens in underlying native library.
*/
public void put(final byte[] key, int offset, int len, final byte[] value, int vOffset, int vLen) throws RocksDBException {
checkBounds(offset, len, key.length);
checkBounds(vOffset, vLen, value.length);
put(nativeHandle_, key, offset, len, value, vOffset, vLen);
}
/**
* Set the database entry for "key" to "value" in the specified
* column family.
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @param key the specified key to be inserted.
* @param value the value associated with the specified key.
*
* throws IllegalArgumentException if column family is not present
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void put(final ColumnFamilyHandle columnFamilyHandle,
final byte[] key, final byte[] value) throws RocksDBException {
put(nativeHandle_, key, 0, key.length, value, 0, value.length,
columnFamilyHandle.nativeHandle_);
}
/**
* Set the database entry for "key" to "value" in the specified
* column family.
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @param key The specified key to be inserted
* @param offset the offset of the "key" array to be used, must be non-negative and
* no larger than "key".length
* @param len the length of the "key" array to be used, must be non-negative and
* must be non-negative and no larger than ("key".length - offset)
* @param value the value associated with the specified key
* @param vOffset the offset of the "value" array to be used, must be non-negative and
* no longer than "key".length
* @param vLen the length of the "value" array to be used, must be non-negative and
* must be non-negative and no larger than ("value".length - offset)
*
* @throws RocksDBException thrown if errors happens in underlying native library.
*/
public void put(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, int offset, int len, final byte[] value, int vOffset, int vLen) throws RocksDBException {
checkBounds(offset, len, key.length);
checkBounds(vOffset, vLen, value.length);
put(nativeHandle_, key, offset, len, value, vOffset, vLen,
columnFamilyHandle.nativeHandle_);
}
/**
* Set the database entry for "key" to "value".
*
* @param writeOpts {@link org.rocksdb.WriteOptions} instance.
* @param key the specified key to be inserted.
* @param value the value associated with the specified key.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void put(final WriteOptions writeOpts, final byte[] key,
final byte[] value) throws RocksDBException {
put(nativeHandle_, writeOpts.nativeHandle_,
key, 0, key.length, value, 0, value.length);
}
/**
* Set the database entry for "key" to "value".
*
* @param writeOpts {@link org.rocksdb.WriteOptions} instance.
* @param key The specified key to be inserted
* @param offset the offset of the "key" array to be used, must be non-negative and
* no larger than "key".length
* @param len the length of the "key" array to be used, must be non-negative and
* must be non-negative and no larger than ("key".length - offset)
* @param value the value associated with the specified key
* @param vOffset the offset of the "value" array to be used, must be non-negative and
* no longer than "key".length
* @param vLen the length of the "value" array to be used, must be non-negative and
* must be non-negative and no larger than ("value".length - offset)
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void put(final WriteOptions writeOpts, byte[] key, int offset, int len, byte[] value, int vOffset, int vLen) throws RocksDBException {
checkBounds(offset, len, key.length);
checkBounds(vOffset, vLen, value.length);
put(nativeHandle_, writeOpts.nativeHandle_,
key, offset, len, value, vOffset, vLen);
}
/**
* Set the database entry for "key" to "value" for the specified
* column family.
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @param writeOpts {@link org.rocksdb.WriteOptions} instance.
* @param key the specified key to be inserted.
* @param value the value associated with the specified key.
*
* throws IllegalArgumentException if column family is not present
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
* @see IllegalArgumentException
*/
public void put(final ColumnFamilyHandle columnFamilyHandle,
final WriteOptions writeOpts, final byte[] key,
final byte[] value) throws RocksDBException {
put(nativeHandle_, writeOpts.nativeHandle_, key, 0, key.length, value,
0, value.length, columnFamilyHandle.nativeHandle_);
}
/**
* Set the database entry for "key" to "value" for the specified
* column family.
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @param writeOpts {@link org.rocksdb.WriteOptions} instance.
* @param key The specified key to be inserted
* @param offset the offset of the "key" array to be used, must be non-negative and
* no larger than "key".length
* @param len the length of the "key" array to be used, must be non-negative and
* must be non-negative and no larger than ("key".length - offset)
* @param value the value associated with the specified key
* @param vOffset the offset of the "value" array to be used, must be non-negative and
* no longer than "key".length
* @param vLen the length of the "value" array to be used, must be non-negative and
* must be non-negative and no larger than ("value".length - offset)
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void put(final ColumnFamilyHandle columnFamilyHandle,
final WriteOptions writeOpts, final byte[] key, int offset, int len,
final byte[] value, int vOffset, int vLen) throws RocksDBException {
checkBounds(offset, len, key.length);
checkBounds(vOffset, vLen, value.length);
put(nativeHandle_, writeOpts.nativeHandle_, key, offset, len, value,
vOffset, vLen, columnFamilyHandle.nativeHandle_);
}
/**
* If the key definitely does not exist in the database, then this method
* returns false, else true.
*
* This check is potentially lighter-weight than invoking DB::Get(). One way
* to make this lighter weight is to avoid doing any IOs.
*
* @param key byte array of a key to search for
* @param value StringBuilder instance which is a out parameter if a value is
* found in block-cache.
* @return boolean value indicating if key does not exist or might exist.
*/
public boolean keyMayExist(final byte[] key, final StringBuilder value) {
return keyMayExist(nativeHandle_, key, 0, key.length, value);
}
/**
* If the key definitely does not exist in the database, then this method
* returns false, else true.
*
* This check is potentially lighter-weight than invoking DB::Get(). One way
* to make this lighter weight is to avoid doing any IOs.
*
* @param key byte array of a key to search for
* @param offset the offset of the "key" array to be used, must be non-negative and
* no larger than "key".length
* @param len the length of the "key" array to be used, must be non-negative and
* @param value StringBuilder instance which is a out parameter if a value is
* found in block-cache.
*
* @return boolean value indicating if key does not exist or might exist.
*/
public boolean keyMayExist(final byte[] key, int offset, int len, final StringBuilder value) {
checkBounds(offset, len, key.length);
return keyMayExist(nativeHandle_, key, offset, len, value);
}
/**
* If the key definitely does not exist in the database, then this method
* returns false, else true.
*
* This check is potentially lighter-weight than invoking DB::Get(). One way
* to make this lighter weight is to avoid doing any IOs.
*
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance
* @param key byte array of a key to search for
* @param value StringBuilder instance which is a out parameter if a value is
* found in block-cache.
* @return boolean value indicating if key does not exist or might exist.
*/
public boolean keyMayExist(final ColumnFamilyHandle columnFamilyHandle,
final byte[] key, final StringBuilder value) {
return keyMayExist(nativeHandle_, key, 0, key.length,
columnFamilyHandle.nativeHandle_, value);
}
/**
* If the key definitely does not exist in the database, then this method
* returns false, else true.
*
* This check is potentially lighter-weight than invoking DB::Get(). One way
* to make this lighter weight is to avoid doing any IOs.
*
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance
* @param key byte array of a key to search for
* @param offset the offset of the "key" array to be used, must be non-negative and
* no larger than "key".length
* @param len the length of the "key" array to be used, must be non-negative and
* @param value StringBuilder instance which is a out parameter if a value is
* found in block-cache.
* @return boolean value indicating if key does not exist or might exist.
*/
public boolean keyMayExist(final ColumnFamilyHandle columnFamilyHandle,
final byte[] key, int offset, int len, final StringBuilder value) {
checkBounds(offset, len, key.length);
return keyMayExist(nativeHandle_, key, offset, len,
columnFamilyHandle.nativeHandle_, value);
}
/**
* If the key definitely does not exist in the database, then this method
* returns false, else true.
*
* This check is potentially lighter-weight than invoking DB::Get(). One way
* to make this lighter weight is to avoid doing any IOs.
*
* @param readOptions {@link ReadOptions} instance
* @param key byte array of a key to search for
* @param value StringBuilder instance which is a out parameter if a value is
* found in block-cache.
* @return boolean value indicating if key does not exist or might exist.
*/
public boolean keyMayExist(final ReadOptions readOptions,
final byte[] key, final StringBuilder value) {
return keyMayExist(nativeHandle_, readOptions.nativeHandle_,
key, 0, key.length, value);
}
/**
* If the key definitely does not exist in the database, then this method
* returns false, else true.
*
* This check is potentially lighter-weight than invoking DB::Get(). One way
* to make this lighter weight is to avoid doing any IOs.
*
* @param readOptions {@link ReadOptions} instance
* @param key byte array of a key to search for
* @param offset the offset of the "key" array to be used, must be non-negative and
* no larger than "key".length
* @param len the length of the "key" array to be used, must be non-negative and
* @param value StringBuilder instance which is a out parameter if a value is
* found in block-cache.
* @return boolean value indicating if key does not exist or might exist.
*/
public boolean keyMayExist(final ReadOptions readOptions,
final byte[] key, int offset, int len, final StringBuilder value) {
checkBounds(offset, len, key.length);
return keyMayExist(nativeHandle_, readOptions.nativeHandle_,
key, offset, len, value);
}
/**
* If the key definitely does not exist in the database, then this method
* returns false, else true.
*
* This check is potentially lighter-weight than invoking DB::Get(). One way
* to make this lighter weight is to avoid doing any IOs.
*
* @param readOptions {@link ReadOptions} instance
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance
* @param key byte array of a key to search for
* @param value StringBuilder instance which is a out parameter if a value is
* found in block-cache.
* @return boolean value indicating if key does not exist or might exist.
*/
public boolean keyMayExist(final ReadOptions readOptions,
final ColumnFamilyHandle columnFamilyHandle, final byte[] key,
final StringBuilder value) {
return keyMayExist(nativeHandle_, readOptions.nativeHandle_,
key, 0, key.length, columnFamilyHandle.nativeHandle_,
value);
}
/**
* If the key definitely does not exist in the database, then this method
* returns false, else true.
*
* This check is potentially lighter-weight than invoking DB::Get(). One way
* to make this lighter weight is to avoid doing any IOs.
*
* @param readOptions {@link ReadOptions} instance
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance
* @param key byte array of a key to search for
* @param offset the offset of the "key" array to be used, must be non-negative and
* no larger than "key".length
* @param len the length of the "key" array to be used, must be non-negative and
* @param value StringBuilder instance which is a out parameter if a value is
* found in block-cache.
* @return boolean value indicating if key does not exist or might exist.
*/
public boolean keyMayExist(final ReadOptions readOptions,
final ColumnFamilyHandle columnFamilyHandle, final byte[] key, int offset, int len,
final StringBuilder value) {
checkBounds(offset, len, key.length);
return keyMayExist(nativeHandle_, readOptions.nativeHandle_,
key, offset, len, columnFamilyHandle.nativeHandle_,
value);
}
/**
* Apply the specified updates to the database.
*
* @param writeOpts WriteOptions instance
* @param updates WriteBatch instance
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void write(final WriteOptions writeOpts, final WriteBatch updates)
throws RocksDBException {
write0(nativeHandle_, writeOpts.nativeHandle_, updates.nativeHandle_);
}
/**
* Apply the specified updates to the database.
*
* @param writeOpts WriteOptions instance
* @param updates WriteBatchWithIndex instance
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void write(final WriteOptions writeOpts,
final WriteBatchWithIndex updates) throws RocksDBException {
write1(nativeHandle_, writeOpts.nativeHandle_, updates.nativeHandle_);
}
/**
* Add merge operand for key/value pair.
*
* @param key the specified key to be merged.
* @param value the value to be merged with the current value for
* the specified key.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void merge(final byte[] key, final byte[] value)
throws RocksDBException {
merge(nativeHandle_, key, 0, key.length, value, 0, value.length);
}
/**
* Add merge operand for key/value pair.
*
* @param key the specified key to be merged.
* @param offset the offset of the "key" array to be used, must be non-negative and
* no larger than "key".length
* @param len the length of the "key" array to be used, must be non-negative and
* @param value the value to be merged with the current value for the specified key.
* @param vOffset the offset of the "value" array to be used, must be non-negative and
* no longer than "key".length
* @param vLen the length of the "value" array to be used, must be non-negative and
* must be non-negative and no larger than ("value".length - offset)
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void merge(final byte[] key, int offset, int len, final byte[] value, int vOffset, int vLen)
throws RocksDBException {
checkBounds(offset, len, key.length);
checkBounds(vOffset, vLen, value.length);
merge(nativeHandle_, key, offset, len, value, vOffset, vLen);
}
/**
* Add merge operand for key/value pair in a ColumnFamily.
*
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance
* @param key the specified key to be merged.
* @param value the value to be merged with the current value for
* the specified key.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void merge(final ColumnFamilyHandle columnFamilyHandle,
final byte[] key, final byte[] value) throws RocksDBException {
merge(nativeHandle_, key, 0, key.length, value, 0, value.length,
columnFamilyHandle.nativeHandle_);
}
/**
* Add merge operand for key/value pair in a ColumnFamily.
*
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance
* @param key the specified key to be merged.
* @param offset the offset of the "key" array to be used, must be non-negative and
* no larger than "key".length
* @param len the length of the "key" array to be used, must be non-negative and
* @param value the value to be merged with the current value for
* the specified key.
* @param vOffset the offset of the "value" array to be used, must be non-negative and
* no longer than "key".length
* @param vLen the length of the "value" array to be used, must be non-negative and
* must be non-negative and no larger than ("value".length - offset)
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void merge(final ColumnFamilyHandle columnFamilyHandle,
final byte[] key, int offset, int len, final byte[] value, int vOffset, int vLen) throws RocksDBException {
checkBounds(offset, len, key.length);
checkBounds(vOffset, vLen, value.length);
merge(nativeHandle_, key, offset, len, value, vOffset, vLen,
columnFamilyHandle.nativeHandle_);
}
/**
* Add merge operand for key/value pair.
*
* @param writeOpts {@link WriteOptions} for this write.
* @param key the specified key to be merged.
* @param value the value to be merged with the current value for
* the specified key.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void merge(final WriteOptions writeOpts, final byte[] key,
final byte[] value) throws RocksDBException {
merge(nativeHandle_, writeOpts.nativeHandle_,
key, 0, key.length, value, 0, value.length);
}
/**
* Add merge operand for key/value pair.
*
* @param writeOpts {@link WriteOptions} for this write.
* @param key the specified key to be merged.
* @param offset the offset of the "key" array to be used, must be non-negative and
* no larger than "key".length
* @param len the length of the "key" array to be used, must be non-negative and
* @param value the value to be merged with the current value for
* the specified key.
* @param vOffset the offset of the "value" array to be used, must be non-negative and
* no longer than "key".length
* @param vLen the length of the "value" array to be used, must be non-negative and
* must be non-negative and no larger than ("value".length - offset)
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void merge(final WriteOptions writeOpts, final byte[] key, int offset, int len,
final byte[] value, int vOffset, int vLen) throws RocksDBException {
checkBounds(offset, len, key.length);
checkBounds(vOffset, vLen, value.length);
merge(nativeHandle_, writeOpts.nativeHandle_,
key, offset, len, value, vOffset, vLen);
}
/**
* Add merge operand for key/value pair.
*
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance
* @param writeOpts {@link WriteOptions} for this write.
* @param key the specified key to be merged.
* @param value the value to be merged with the current value for
* the specified key.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void merge(final ColumnFamilyHandle columnFamilyHandle,
final WriteOptions writeOpts, final byte[] key,
final byte[] value) throws RocksDBException {
merge(nativeHandle_, writeOpts.nativeHandle_,
key, 0, key.length, value, 0, value.length,
columnFamilyHandle.nativeHandle_);
}
/**
* Add merge operand for key/value pair.
*
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance
* @param writeOpts {@link WriteOptions} for this write.
* @param key the specified key to be merged.
* @param offset the offset of the "key" array to be used, must be non-negative and
* no larger than "key".length
* @param len the length of the "key" array to be used, must be non-negative and
* @param value the value to be merged with the current value for
* the specified key.
* @param vOffset the offset of the "value" array to be used, must be non-negative and
* no longer than "key".length
* @param vLen the length of the "value" array to be used, must be non-negative and
* must be non-negative and no larger than ("value".length - offset)
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void merge(final ColumnFamilyHandle columnFamilyHandle,
final WriteOptions writeOpts, final byte[] key, int offset, int len,
final byte[] value, int vOffset, int vLen) throws RocksDBException {
checkBounds(offset, len, key.length);
checkBounds(vOffset, vLen, value.length);
merge(nativeHandle_, writeOpts.nativeHandle_,
key, offset, len, value, vOffset, vLen,
columnFamilyHandle.nativeHandle_);
}
// TODO(AR) we should improve the #get() API, returning -1 (RocksDB.NOT_FOUND) is not very nice
// when we could communicate better status into, also the C++ code show that -2 could be returned
/**
* Get the value associated with the specified key within column family*
*
* @param key the key to retrieve the value.
* @param value the out-value to receive the retrieved value.
*
* @return The size of the actual value that matches the specified
* {@code key} in byte. If the return value is greater than the
* length of {@code value}, then it indicates that the size of the
* input buffer {@code value} is insufficient and partial result will
* be returned. RocksDB.NOT_FOUND will be returned if the value not
* found.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public int get(final byte[] key, final byte[] value) throws RocksDBException {
return get(nativeHandle_, key, 0, key.length, value, 0, value.length);
}
/**
* Get the value associated with the specified key within column family*
*
* @param key the key to retrieve the value.
* @param offset the offset of the "key" array to be used, must be non-negative and
* no larger than "key".length
* @param len the length of the "key" array to be used, must be non-negative and
* @param value the out-value to receive the retrieved value.
* @param vOffset the offset of the "value" array to be used, must be non-negative and
* no longer than "key".length
* @param vLen the length of the "value" array to be used, must be non-negative and
* must be non-negative and no larger than ("value".length - offset)
*
* @return The size of the actual value that matches the specified
* {@code key} in byte. If the return value is greater than the
* length of {@code value}, then it indicates that the size of the
* input buffer {@code value} is insufficient and partial result will
* be returned. RocksDB.NOT_FOUND will be returned if the value not
* found.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public int get(final byte[] key, int offset, int len, final byte[] value, int vOffset, int vLen) throws RocksDBException {
checkBounds(offset, len, key.length);
checkBounds(vOffset, vLen, value.length);
return get(nativeHandle_, key, offset, len, value, vOffset, vLen);
}
/**
* Get the value associated with the specified key within column family.
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @param key the key to retrieve the value.
* @param value the out-value to receive the retrieved value.
* @return The size of the actual value that matches the specified
* {@code key} in byte. If the return value is greater than the
* length of {@code value}, then it indicates that the size of the
* input buffer {@code value} is insufficient and partial result will
* be returned. RocksDB.NOT_FOUND will be returned if the value not
* found.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public int get(final ColumnFamilyHandle columnFamilyHandle, final byte[] key,
final byte[] value) throws RocksDBException, IllegalArgumentException {
return get(nativeHandle_, key, 0, key.length, value, 0, value.length,
columnFamilyHandle.nativeHandle_);
}
/**
* Get the value associated with the specified key within column family.
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @param key the key to retrieve the value.
* @param offset the offset of the "key" array to be used, must be non-negative and
* no larger than "key".length
* @param len the length of the "key" array to be used, must be non-negative and
* @param value the out-value to receive the retrieved value.
* @param vOffset the offset of the "value" array to be used, must be non-negative and
* no longer than "key".length
* @param vLen the length of the "value" array to be used, must be non-negative and
* must be non-negative and no larger than ("value".length - offset)
*
* @return The size of the actual value that matches the specified
* {@code key} in byte. If the return value is greater than the
* length of {@code value}, then it indicates that the size of the
* input buffer {@code value} is insufficient and partial result will
* be returned. RocksDB.NOT_FOUND will be returned if the value not
* found.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public int get(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, int offset, int len,
final byte[] value, int vOffset, int vLen) throws RocksDBException, IllegalArgumentException {
checkBounds(offset, len, key.length);
checkBounds(vOffset, vLen, value.length);
return get(nativeHandle_, key, offset, len, value, vOffset, vLen,
columnFamilyHandle.nativeHandle_);
}
/**
* Get the value associated with the specified key.
*
* @param opt {@link org.rocksdb.ReadOptions} instance.
* @param key the key to retrieve the value.
* @param value the out-value to receive the retrieved value.
* @return The size of the actual value that matches the specified
* {@code key} in byte. If the return value is greater than the
* length of {@code value}, then it indicates that the size of the
* input buffer {@code value} is insufficient and partial result will
* be returned. RocksDB.NOT_FOUND will be returned if the value not
* found.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public int get(final ReadOptions opt, final byte[] key,
final byte[] value) throws RocksDBException {
return get(nativeHandle_, opt.nativeHandle_,
key, 0, key.length, value, 0, value.length);
}
/**
* Get the value associated with the specified key.
*
* @param opt {@link org.rocksdb.ReadOptions} instance.
* @param key the key to retrieve the value.
* @param offset the offset of the "key" array to be used, must be non-negative and
* no larger than "key".length
* @param len the length of the "key" array to be used, must be non-negative and
* @param value the out-value to receive the retrieved value.
* @param vOffset the offset of the "value" array to be used, must be non-negative and
* no longer than "key".length
* @param vLen the length of the "value" array to be used, must be non-negative and
* must be non-negative and no larger than ("value".length - offset)
* @return The size of the actual value that matches the specified
* {@code key} in byte. If the return value is greater than the
* length of {@code value}, then it indicates that the size of the
* input buffer {@code value} is insufficient and partial result will
* be returned. RocksDB.NOT_FOUND will be returned if the value not
* found.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public int get(final ReadOptions opt, final byte[] key, int offset, int len,
final byte[] value, int vOffset, int vLen) throws RocksDBException {
checkBounds(offset, len, key.length);
checkBounds(vOffset, vLen, value.length);
return get(nativeHandle_, opt.nativeHandle_,
key, offset, len, value, vOffset, vLen);
}
/**
* Get the value associated with the specified key within column family.
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @param opt {@link org.rocksdb.ReadOptions} instance.
* @param key the key to retrieve the value.
* @param value the out-value to receive the retrieved value.
* @return The size of the actual value that matches the specified
* {@code key} in byte. If the return value is greater than the
* length of {@code value}, then it indicates that the size of the
* input buffer {@code value} is insufficient and partial result will
* be returned. RocksDB.NOT_FOUND will be returned if the value not
* found.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public int get(final ColumnFamilyHandle columnFamilyHandle,
final ReadOptions opt, final byte[] key, final byte[] value)
throws RocksDBException {
return get(nativeHandle_, opt.nativeHandle_, key, 0, key.length, value,
0, value.length, columnFamilyHandle.nativeHandle_);
}
/**
* Get the value associated with the specified key within column family.
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @param opt {@link org.rocksdb.ReadOptions} instance.
* @param key the key to retrieve the value.
* @param offset the offset of the "key" array to be used, must be non-negative and
* no larger than "key".length
* @param len the length of the "key" array to be used, must be non-negative and
* @param value the out-value to receive the retrieved value.
* @param vOffset the offset of the "value" array to be used, must be non-negative and
* no longer than "key".length
* @param vLen the length of the "value" array to be used, must be non-negative and
* must be non-negative and no larger than ("value".length - offset)
* @return The size of the actual value that matches the specified
* {@code key} in byte. If the return value is greater than the
* length of {@code value}, then it indicates that the size of the
* input buffer {@code value} is insufficient and partial result will
* be returned. RocksDB.NOT_FOUND will be returned if the value not
* found.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public int get(final ColumnFamilyHandle columnFamilyHandle,
final ReadOptions opt, final byte[] key, int offset, int len, final byte[] value, int vOffset, int vLen)
throws RocksDBException {
checkBounds(offset, len, key.length);
checkBounds(vOffset, vLen, value.length);
return get(nativeHandle_, opt.nativeHandle_, key, offset, len, value,
vOffset, vLen, columnFamilyHandle.nativeHandle_);
}
/**
* The simplified version of get which returns a new byte array storing
* the value associated with the specified input key if any. null will be
* returned if the specified key is not found.
*
* @param key the key retrieve the value.
* @return a byte array storing the value associated with the input key if
* any. null if it does not find the specified key.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public byte[] get(final byte[] key) throws RocksDBException {
return get(nativeHandle_, key, 0, key.length);
}
/**
* The simplified version of get which returns a new byte array storing
* the value associated with the specified input key if any. null will be
* returned if the specified key is not found.
*
* @param key the key retrieve the value.
* @param offset the offset of the "key" array to be used, must be non-negative and
* no larger than "key".length
* @param len the length of the "key" array to be used, must be non-negative and
* @return a byte array storing the value associated with the input key if
* any. null if it does not find the specified key.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public byte[] get(final byte[] key, int offset, int len) throws RocksDBException {
checkBounds(offset, len, key.length);
return get(nativeHandle_, key, offset, len);
}
/**
* The simplified version of get which returns a new byte array storing
* the value associated with the specified input key if any. null will be
* returned if the specified key is not found.
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @param key the key retrieve the value.
* @return a byte array storing the value associated with the input key if
* any. null if it does not find the specified key.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public byte[] get(final ColumnFamilyHandle columnFamilyHandle,
final byte[] key) throws RocksDBException {
return get(nativeHandle_, key, 0, key.length,
columnFamilyHandle.nativeHandle_);
}
/**
* The simplified version of get which returns a new byte array storing
* the value associated with the specified input key if any. null will be
* returned if the specified key is not found.
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @param key the key retrieve the value.
* @param offset the offset of the "key" array to be used, must be non-negative and
* no larger than "key".length
* @param len the length of the "key" array to be used, must be non-negative and
* @return a byte array storing the value associated with the input key if
* any. null if it does not find the specified key.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public byte[] get(final ColumnFamilyHandle columnFamilyHandle,
final byte[] key, int offset, int len) throws RocksDBException {
checkBounds(offset, len, key.length);
return get(nativeHandle_, key, offset, len,
columnFamilyHandle.nativeHandle_);
}
/**
* The simplified version of get which returns a new byte array storing
* the value associated with the specified input key if any. null will be
* returned if the specified key is not found.
*
* @param key the key retrieve the value.
* @param opt Read options.
* @return a byte array storing the value associated with the input key if
* any. null if it does not find the specified key.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public byte[] get(final ReadOptions opt, final byte[] key)
throws RocksDBException {
return get(nativeHandle_, opt.nativeHandle_, key, 0, key.length);
}
/**
* The simplified version of get which returns a new byte array storing
* the value associated with the specified input key if any. null will be
* returned if the specified key is not found.
*
* @param key the key retrieve the value.
* @param offset the offset of the "key" array to be used, must be non-negative and
* no larger than "key".length
* @param len the length of the "key" array to be used, must be non-negative and
* @param opt Read options.
* @return a byte array storing the value associated with the input key if
* any. null if it does not find the specified key.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public byte[] get(final ReadOptions opt, final byte[] key, int offset, int len)
throws RocksDBException {
checkBounds(offset, len, key.length);
return get(nativeHandle_, opt.nativeHandle_, key, offset, len);
}
/**
* The simplified version of get which returns a new byte array storing
* the value associated with the specified input key if any. null will be
* returned if the specified key is not found.
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @param key the key retrieve the value.
* @param opt Read options.
* @return a byte array storing the value associated with the input key if
* any. null if it does not find the specified key.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public byte[] get(final ColumnFamilyHandle columnFamilyHandle,
final ReadOptions opt, final byte[] key) throws RocksDBException {
return get(nativeHandle_, opt.nativeHandle_, key, 0, key.length,
columnFamilyHandle.nativeHandle_);
}
/**
* The simplified version of get which returns a new byte array storing
* the value associated with the specified input key if any. null will be
* returned if the specified key is not found.
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @param key the key retrieve the value.
* @param offset the offset of the "key" array to be used, must be non-negative and
* no larger than "key".length
* @param len the length of the "key" array to be used, must be non-negative and
* @param opt Read options.
* @return a byte array storing the value associated with the input key if
* any. null if it does not find the specified key.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public byte[] get(final ColumnFamilyHandle columnFamilyHandle,
final ReadOptions opt, final byte[] key, int offset, int len) throws RocksDBException {
checkBounds(offset, len, key.length);
return get(nativeHandle_, opt.nativeHandle_, key, offset, len,
columnFamilyHandle.nativeHandle_);
}
/**
* Returns a map of keys for which values were found in DB.
*
* @param keys List of keys for which values need to be retrieved.
* @return Map where key of map is the key passed by user and value for map
* entry is the corresponding value in DB.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*
* @deprecated Consider {@link #multiGetAsList(List)} instead.
*/
@Deprecated
public Map<byte[], byte[]> multiGet(final List<byte[]> keys)
throws RocksDBException {
assert(keys.size() != 0);
final byte[][] keysArray = keys.toArray(new byte[keys.size()][]);
final int keyOffsets[] = new int[keysArray.length];
final int keyLengths[] = new int[keysArray.length];
for(int i = 0; i < keyLengths.length; i++) {
keyLengths[i] = keysArray[i].length;
}
final byte[][] values = multiGet(nativeHandle_, keysArray, keyOffsets,
keyLengths);
final Map<byte[], byte[]> keyValueMap =
new HashMap<>(computeCapacityHint(values.length));
for(int i = 0; i < values.length; i++) {
if(values[i] == null) {
continue;
}
keyValueMap.put(keys.get(i), values[i]);
}
return keyValueMap;
}
private static int computeCapacityHint(final int estimatedNumberOfItems) {
// Default load factor for HashMap is 0.75, so N * 1.5 will be at the load
// limit. We add +1 for a buffer.
return (int)Math.ceil(estimatedNumberOfItems * 1.5 + 1.0);
}
/**
* Returns a map of keys for which values were found in DB.
* <p>
* Note: Every key needs to have a related column family name in
* {@code columnFamilyHandleList}.
* </p>
*
* @param columnFamilyHandleList {@link java.util.List} containing
* {@link org.rocksdb.ColumnFamilyHandle} instances.
* @param keys List of keys for which values need to be retrieved.
* @return Map where key of map is the key passed by user and value for map
* entry is the corresponding value in DB.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
* @throws IllegalArgumentException thrown if the size of passed keys is not
* equal to the amount of passed column family handles.
*
* @deprecated Consider {@link #multiGetAsList(List, List)} instead.
*/
@Deprecated
public Map<byte[], byte[]> multiGet(
final List<ColumnFamilyHandle> columnFamilyHandleList,
final List<byte[]> keys) throws RocksDBException,
IllegalArgumentException {
assert(keys.size() != 0);
// Check if key size equals cfList size. If not a exception must be
// thrown. If not a Segmentation fault happens.
if (keys.size() != columnFamilyHandleList.size()) {
throw new IllegalArgumentException(
"For each key there must be a ColumnFamilyHandle.");
}
final long[] cfHandles = new long[columnFamilyHandleList.size()];
for (int i = 0; i < columnFamilyHandleList.size(); i++) {
cfHandles[i] = columnFamilyHandleList.get(i).nativeHandle_;
}
final byte[][] keysArray = keys.toArray(new byte[keys.size()][]);
final int keyOffsets[] = new int[keysArray.length];
final int keyLengths[] = new int[keysArray.length];
for(int i = 0; i < keyLengths.length; i++) {
keyLengths[i] = keysArray[i].length;
}
final byte[][] values = multiGet(nativeHandle_, keysArray, keyOffsets,
keyLengths, cfHandles);
final Map<byte[], byte[]> keyValueMap =
new HashMap<>(computeCapacityHint(values.length));
for(int i = 0; i < values.length; i++) {
if (values[i] == null) {
continue;
}
keyValueMap.put(keys.get(i), values[i]);
}
return keyValueMap;
}
/**
* Returns a map of keys for which values were found in DB.
*
* @param opt Read options.
* @param keys of keys for which values need to be retrieved.
* @return Map where key of map is the key passed by user and value for map
* entry is the corresponding value in DB.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*
* @deprecated Consider {@link #multiGetAsList(ReadOptions, List)} instead.
*/
@Deprecated
public Map<byte[], byte[]> multiGet(final ReadOptions opt,
final List<byte[]> keys) throws RocksDBException {
assert(keys.size() != 0);
final byte[][] keysArray = keys.toArray(new byte[keys.size()][]);
final int keyOffsets[] = new int[keysArray.length];
final int keyLengths[] = new int[keysArray.length];
for(int i = 0; i < keyLengths.length; i++) {
keyLengths[i] = keysArray[i].length;
}
final byte[][] values = multiGet(nativeHandle_, opt.nativeHandle_,
keysArray, keyOffsets, keyLengths);
final Map<byte[], byte[]> keyValueMap =
new HashMap<>(computeCapacityHint(values.length));
for(int i = 0; i < values.length; i++) {
if(values[i] == null) {
continue;
}
keyValueMap.put(keys.get(i), values[i]);
}
return keyValueMap;
}
/**
* Returns a map of keys for which values were found in DB.
* <p>
* Note: Every key needs to have a related column family name in
* {@code columnFamilyHandleList}.
* </p>
*
* @param opt Read options.
* @param columnFamilyHandleList {@link java.util.List} containing
* {@link org.rocksdb.ColumnFamilyHandle} instances.
* @param keys of keys for which values need to be retrieved.
* @return Map where key of map is the key passed by user and value for map
* entry is the corresponding value in DB.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
* @throws IllegalArgumentException thrown if the size of passed keys is not
* equal to the amount of passed column family handles.
*
* @deprecated Consider {@link #multiGetAsList(ReadOptions, List, List)}
* instead.
*/
@Deprecated
public Map<byte[], byte[]> multiGet(final ReadOptions opt,
final List<ColumnFamilyHandle> columnFamilyHandleList,
final List<byte[]> keys) throws RocksDBException {
assert(keys.size() != 0);
// Check if key size equals cfList size. If not a exception must be
// thrown. If not a Segmentation fault happens.
if (keys.size()!=columnFamilyHandleList.size()){
throw new IllegalArgumentException(
"For each key there must be a ColumnFamilyHandle.");
}
final long[] cfHandles = new long[columnFamilyHandleList.size()];
for (int i = 0; i < columnFamilyHandleList.size(); i++) {
cfHandles[i] = columnFamilyHandleList.get(i).nativeHandle_;
}
final byte[][] keysArray = keys.toArray(new byte[keys.size()][]);
final int keyOffsets[] = new int[keysArray.length];
final int keyLengths[] = new int[keysArray.length];
for(int i = 0; i < keyLengths.length; i++) {
keyLengths[i] = keysArray[i].length;
}
final byte[][] values = multiGet(nativeHandle_, opt.nativeHandle_,
keysArray, keyOffsets, keyLengths, cfHandles);
final Map<byte[], byte[]> keyValueMap
= new HashMap<>(computeCapacityHint(values.length));
for(int i = 0; i < values.length; i++) {
if(values[i] == null) {
continue;
}
keyValueMap.put(keys.get(i), values[i]);
}
return keyValueMap;
}
/**
* Takes a list of keys, and returns a list of values for the given list of
* keys. List will contain null for keys which could not be found.
*
* @param keys List of keys for which values need to be retrieved.
* @return List of values for the given list of keys. List will contain
* null for keys which could not be found.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public List<byte[]> multiGetAsList(final List<byte[]> keys)
throws RocksDBException {
assert(keys.size() != 0);
final byte[][] keysArray = keys.toArray(new byte[keys.size()][]);
final int keyOffsets[] = new int[keysArray.length];
final int keyLengths[] = new int[keysArray.length];
for(int i = 0; i < keyLengths.length; i++) {
keyLengths[i] = keysArray[i].length;
}
return Arrays.asList(multiGet(nativeHandle_, keysArray, keyOffsets,
keyLengths));
}
/**
* Returns a list of values for the given list of keys. List will contain
* null for keys which could not be found.
* <p>
* Note: Every key needs to have a related column family name in
* {@code columnFamilyHandleList}.
* </p>
*
* @param columnFamilyHandleList {@link java.util.List} containing
* {@link org.rocksdb.ColumnFamilyHandle} instances.
* @param keys List of keys for which values need to be retrieved.
* @return List of values for the given list of keys. List will contain
* null for keys which could not be found.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
* @throws IllegalArgumentException thrown if the size of passed keys is not
* equal to the amount of passed column family handles.
*/
public List<byte[]> multiGetAsList(
final List<ColumnFamilyHandle> columnFamilyHandleList,
final List<byte[]> keys) throws RocksDBException,
IllegalArgumentException {
assert(keys.size() != 0);
// Check if key size equals cfList size. If not a exception must be
// thrown. If not a Segmentation fault happens.
if (keys.size() != columnFamilyHandleList.size()) {
throw new IllegalArgumentException(
"For each key there must be a ColumnFamilyHandle.");
}
final long[] cfHandles = new long[columnFamilyHandleList.size()];
for (int i = 0; i < columnFamilyHandleList.size(); i++) {
cfHandles[i] = columnFamilyHandleList.get(i).nativeHandle_;
}
final byte[][] keysArray = keys.toArray(new byte[keys.size()][]);
final int keyOffsets[] = new int[keysArray.length];
final int keyLengths[] = new int[keysArray.length];
for(int i = 0; i < keyLengths.length; i++) {
keyLengths[i] = keysArray[i].length;
}
return Arrays.asList(multiGet(nativeHandle_, keysArray, keyOffsets,
keyLengths, cfHandles));
}
/**
* Returns a list of values for the given list of keys. List will contain
* null for keys which could not be found.
*
* @param opt Read options.
* @param keys of keys for which values need to be retrieved.
* @return List of values for the given list of keys. List will contain
* null for keys which could not be found.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public List<byte[]> multiGetAsList(final ReadOptions opt,
final List<byte[]> keys) throws RocksDBException {
assert(keys.size() != 0);
final byte[][] keysArray = keys.toArray(new byte[keys.size()][]);
final int keyOffsets[] = new int[keysArray.length];
final int keyLengths[] = new int[keysArray.length];
for(int i = 0; i < keyLengths.length; i++) {
keyLengths[i] = keysArray[i].length;
}
return Arrays.asList(multiGet(nativeHandle_, opt.nativeHandle_,
keysArray, keyOffsets, keyLengths));
}
/**
* Returns a list of values for the given list of keys. List will contain
* null for keys which could not be found.
* <p>
* Note: Every key needs to have a related column family name in
* {@code columnFamilyHandleList}.
* </p>
*
* @param opt Read options.
* @param columnFamilyHandleList {@link java.util.List} containing
* {@link org.rocksdb.ColumnFamilyHandle} instances.
* @param keys of keys for which values need to be retrieved.
* @return List of values for the given list of keys. List will contain
* null for keys which could not be found.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
* @throws IllegalArgumentException thrown if the size of passed keys is not
* equal to the amount of passed column family handles.
*/
public List<byte[]> multiGetAsList(final ReadOptions opt,
final List<ColumnFamilyHandle> columnFamilyHandleList,
final List<byte[]> keys) throws RocksDBException {
assert(keys.size() != 0);
// Check if key size equals cfList size. If not a exception must be
// thrown. If not a Segmentation fault happens.
if (keys.size()!=columnFamilyHandleList.size()){
throw new IllegalArgumentException(
"For each key there must be a ColumnFamilyHandle.");
}
final long[] cfHandles = new long[columnFamilyHandleList.size()];
for (int i = 0; i < columnFamilyHandleList.size(); i++) {
cfHandles[i] = columnFamilyHandleList.get(i).nativeHandle_;
}
final byte[][] keysArray = keys.toArray(new byte[keys.size()][]);
final int keyOffsets[] = new int[keysArray.length];
final int keyLengths[] = new int[keysArray.length];
for(int i = 0; i < keyLengths.length; i++) {
keyLengths[i] = keysArray[i].length;
}
return Arrays.asList(multiGet(nativeHandle_, opt.nativeHandle_,
keysArray, keyOffsets, keyLengths, cfHandles));
}
/**
* Remove the database entry (if any) for "key". Returns OK on
* success, and a non-OK status on error. It is not an error if "key"
* did not exist in the database.
*
* @param key Key to delete within database
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*
* @deprecated Use {@link #delete(byte[])}
*/
@Deprecated
public void remove(final byte[] key) throws RocksDBException {
delete(key);
}
/**
* Delete the database entry (if any) for "key". Returns OK on
* success, and a non-OK status on error. It is not an error if "key"
* did not exist in the database.
*
* @param key Key to delete within database
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void delete(final byte[] key) throws RocksDBException {
delete(nativeHandle_, key, 0, key.length);
}
/**
* Delete the database entry (if any) for "key". Returns OK on
* success, and a non-OK status on error. It is not an error if "key"
* did not exist in the database.
*
* @param key Key to delete within database
* @param offset the offset of the "key" array to be used, must be non-negative and
* no larger than "key".length
* @param len the length of the "key" array to be used, must be non-negative and
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void delete(final byte[] key, int offset, int len) throws RocksDBException {
delete(nativeHandle_, key, offset, len);
}
/**
* Remove the database entry (if any) for "key". Returns OK on
* success, and a non-OK status on error. It is not an error if "key"
* did not exist in the database.
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @param key Key to delete within database
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*
* @deprecated Use {@link #delete(ColumnFamilyHandle, byte[])}
*/
@Deprecated
public void remove(final ColumnFamilyHandle columnFamilyHandle,
final byte[] key) throws RocksDBException {
delete(columnFamilyHandle, key);
}
/**
* Delete the database entry (if any) for "key". Returns OK on
* success, and a non-OK status on error. It is not an error if "key"
* did not exist in the database.
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @param key Key to delete within database
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void delete(final ColumnFamilyHandle columnFamilyHandle,
final byte[] key) throws RocksDBException {
delete(nativeHandle_, key, 0, key.length, columnFamilyHandle.nativeHandle_);
}
/**
* Delete the database entry (if any) for "key". Returns OK on
* success, and a non-OK status on error. It is not an error if "key"
* did not exist in the database.
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @param key Key to delete within database
* @param offset the offset of the "key" array to be used, must be non-negative and
* no larger than "key".length
* @param len the length of the "key" array to be used, must be non-negative and
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void delete(final ColumnFamilyHandle columnFamilyHandle,
final byte[] key, int offset, int len) throws RocksDBException {
delete(nativeHandle_, key, offset, len, columnFamilyHandle.nativeHandle_);
}
/**
* Remove the database entry (if any) for "key". Returns OK on
* success, and a non-OK status on error. It is not an error if "key"
* did not exist in the database.
*
* @param writeOpt WriteOptions to be used with delete operation
* @param key Key to delete within database
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*
* @deprecated Use {@link #delete(WriteOptions, byte[])}
*/
@Deprecated
public void remove(final WriteOptions writeOpt, final byte[] key)
throws RocksDBException {
delete(writeOpt, key);
}
/**
* Delete the database entry (if any) for "key". Returns OK on
* success, and a non-OK status on error. It is not an error if "key"
* did not exist in the database.
*
* @param writeOpt WriteOptions to be used with delete operation
* @param key Key to delete within database
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void delete(final WriteOptions writeOpt, final byte[] key)
throws RocksDBException {
delete(nativeHandle_, writeOpt.nativeHandle_, key, 0, key.length);
}
/**
* Delete the database entry (if any) for "key". Returns OK on
* success, and a non-OK status on error. It is not an error if "key"
* did not exist in the database.
*
* @param writeOpt WriteOptions to be used with delete operation
* @param key Key to delete within database
* @param offset the offset of the "key" array to be used, must be non-negative and
* no larger than "key".length
* @param len the length of the "key" array to be used, must be non-negative and
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void delete(final WriteOptions writeOpt, final byte[] key, int offset, int len)
throws RocksDBException {
delete(nativeHandle_, writeOpt.nativeHandle_, key, offset, len);
}
/**
* Remove the database entry (if any) for "key". Returns OK on
* success, and a non-OK status on error. It is not an error if "key"
* did not exist in the database.
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @param writeOpt WriteOptions to be used with delete operation
* @param key Key to delete within database
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*
* @deprecated Use {@link #delete(ColumnFamilyHandle, WriteOptions, byte[])}
*/
@Deprecated
public void remove(final ColumnFamilyHandle columnFamilyHandle,
final WriteOptions writeOpt, final byte[] key)
throws RocksDBException {
delete(columnFamilyHandle, writeOpt, key);
}
/**
* Delete the database entry (if any) for "key". Returns OK on
* success, and a non-OK status on error. It is not an error if "key"
* did not exist in the database.
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @param writeOpt WriteOptions to be used with delete operation
* @param key Key to delete within database
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void delete(final ColumnFamilyHandle columnFamilyHandle,
final WriteOptions writeOpt, final byte[] key)
throws RocksDBException {
delete(nativeHandle_, writeOpt.nativeHandle_, key, 0, key.length,
columnFamilyHandle.nativeHandle_);
}
/**
* Delete the database entry (if any) for "key". Returns OK on
* success, and a non-OK status on error. It is not an error if "key"
* did not exist in the database.
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @param writeOpt WriteOptions to be used with delete operation
* @param key Key to delete within database
* @param offset the offset of the "key" array to be used, must be non-negative and
* no larger than "key".length
* @param len the length of the "key" array to be used, must be non-negative and
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void delete(final ColumnFamilyHandle columnFamilyHandle,
final WriteOptions writeOpt, final byte[] key, int offset, int len)
throws RocksDBException {
delete(nativeHandle_, writeOpt.nativeHandle_, key, offset, len,
columnFamilyHandle.nativeHandle_);
}
/**
* Remove the database entry for {@code key}. Requires that the key exists
* and was not overwritten. It is not an error if the key did not exist
* in the database.
*
* If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
* times), then the result of calling SingleDelete() on this key is undefined.
* SingleDelete() only behaves correctly if there has been only one Put()
* for this key since the previous call to SingleDelete() for this key.
*
* This feature is currently an experimental performance optimization
* for a very specific workload. It is up to the caller to ensure that
* SingleDelete is only used for a key that is not deleted using Delete() or
* written using Merge(). Mixing SingleDelete operations with Deletes and
* Merges can result in undefined behavior.
*
* @param key Key to delete within database
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
@Experimental("Performance optimization for a very specific workload")
public void singleDelete(final byte[] key) throws RocksDBException {
singleDelete(nativeHandle_, key, key.length);
}
/**
* Remove the database entry for {@code key}. Requires that the key exists
* and was not overwritten. It is not an error if the key did not exist
* in the database.
*
* If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
* times), then the result of calling SingleDelete() on this key is undefined.
* SingleDelete() only behaves correctly if there has been only one Put()
* for this key since the previous call to SingleDelete() for this key.
*
* This feature is currently an experimental performance optimization
* for a very specific workload. It is up to the caller to ensure that
* SingleDelete is only used for a key that is not deleted using Delete() or
* written using Merge(). Mixing SingleDelete operations with Deletes and
* Merges can result in undefined behavior.
*
* @param columnFamilyHandle The column family to delete the key from
* @param key Key to delete within database
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
@Experimental("Performance optimization for a very specific workload")
public void singleDelete(final ColumnFamilyHandle columnFamilyHandle,
final byte[] key) throws RocksDBException {
singleDelete(nativeHandle_, key, key.length,
columnFamilyHandle.nativeHandle_);
}
/**
* Remove the database entry for {@code key}. Requires that the key exists
* and was not overwritten. It is not an error if the key did not exist
* in the database.
*
* If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
* times), then the result of calling SingleDelete() on this key is undefined.
* SingleDelete() only behaves correctly if there has been only one Put()
* for this key since the previous call to SingleDelete() for this key.
*
* This feature is currently an experimental performance optimization
* for a very specific workload. It is up to the caller to ensure that
* SingleDelete is only used for a key that is not deleted using Delete() or
* written using Merge(). Mixing SingleDelete operations with Deletes and
* Merges can result in undefined behavior.
*
* Note: consider setting {@link WriteOptions#setSync(boolean)} true.
*
* @param writeOpt Write options for the delete
* @param key Key to delete within database
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
@Experimental("Performance optimization for a very specific workload")
public void singleDelete(final WriteOptions writeOpt, final byte[] key)
throws RocksDBException {
singleDelete(nativeHandle_, writeOpt.nativeHandle_, key, key.length);
}
/**
* Remove the database entry for {@code key}. Requires that the key exists
* and was not overwritten. It is not an error if the key did not exist
* in the database.
*
* If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
* times), then the result of calling SingleDelete() on this key is undefined.
* SingleDelete() only behaves correctly if there has been only one Put()
* for this key since the previous call to SingleDelete() for this key.
*
* This feature is currently an experimental performance optimization
* for a very specific workload. It is up to the caller to ensure that
* SingleDelete is only used for a key that is not deleted using Delete() or
* written using Merge(). Mixing SingleDelete operations with Deletes and
* Merges can result in undefined behavior.
*
* Note: consider setting {@link WriteOptions#setSync(boolean)} true.
*
* @param columnFamilyHandle The column family to delete the key from
* @param writeOpt Write options for the delete
* @param key Key to delete within database
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
@Experimental("Performance optimization for a very specific workload")
public void singleDelete(final ColumnFamilyHandle columnFamilyHandle,
final WriteOptions writeOpt, final byte[] key) throws RocksDBException {
singleDelete(nativeHandle_, writeOpt.nativeHandle_, key, key.length,
columnFamilyHandle.nativeHandle_);
}
/**
* DB implements can export properties about their state
* via this method on a per column family level.
*
* <p>If {@code property} is a valid property understood by this DB
* implementation, fills {@code value} with its current value and
* returns true. Otherwise returns false.</p>
*
* <p>Valid property names include:
* <ul>
* <li>"rocksdb.num-files-at-level<N>" - return the number of files at
* level <N>, where <N> is an ASCII representation of a level
* number (e.g. "0").</li>
* <li>"rocksdb.stats" - returns a multi-line string that describes statistics
* about the internal operation of the DB.</li>
* <li>"rocksdb.sstables" - returns a multi-line string that describes all
* of the sstables that make up the db contents.</li>
* </ul>
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @param property to be fetched. See above for examples
* @return property value
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public String getProperty(final ColumnFamilyHandle columnFamilyHandle,
final String property) throws RocksDBException {
return getProperty0(nativeHandle_, columnFamilyHandle.nativeHandle_,
property, property.length());
}
/**
* Removes the database entries in the range ["beginKey", "endKey"), i.e.,
* including "beginKey" and excluding "endKey". a non-OK status on error. It
* is not an error if no keys exist in the range ["beginKey", "endKey").
*
* Delete the database entry (if any) for "key". Returns OK on success, and a
* non-OK status on error. It is not an error if "key" did not exist in the
* database.
*
* @param beginKey
* First key to delete within database (included)
* @param endKey
* Last key to delete within database (excluded)
*
* @throws RocksDBException
* thrown if error happens in underlying native library.
*/
public void deleteRange(final byte[] beginKey, final byte[] endKey) throws RocksDBException {
deleteRange(nativeHandle_, beginKey, 0, beginKey.length, endKey, 0, endKey.length);
}
/**
* Removes the database entries in the range ["beginKey", "endKey"), i.e.,
* including "beginKey" and excluding "endKey". a non-OK status on error. It
* is not an error if no keys exist in the range ["beginKey", "endKey").
*
* Delete the database entry (if any) for "key". Returns OK on success, and a
* non-OK status on error. It is not an error if "key" did not exist in the
* database.
*
* @param columnFamilyHandle
* {@link org.rocksdb.ColumnFamilyHandle} instance
* @param beginKey
* First key to delete within database (included)
* @param endKey
* Last key to delete within database (excluded)
*
* @throws RocksDBException
* thrown if error happens in underlying native library.
*/
public void deleteRange(final ColumnFamilyHandle columnFamilyHandle, final byte[] beginKey,
final byte[] endKey) throws RocksDBException {
deleteRange(nativeHandle_, beginKey, 0, beginKey.length, endKey, 0, endKey.length,
columnFamilyHandle.nativeHandle_);
}
/**
* Removes the database entries in the range ["beginKey", "endKey"), i.e.,
* including "beginKey" and excluding "endKey". a non-OK status on error. It
* is not an error if no keys exist in the range ["beginKey", "endKey").
*
* Delete the database entry (if any) for "key". Returns OK on success, and a
* non-OK status on error. It is not an error if "key" did not exist in the
* database.
*
* @param writeOpt
* WriteOptions to be used with delete operation
* @param beginKey
* First key to delete within database (included)
* @param endKey
* Last key to delete within database (excluded)
*
* @throws RocksDBException
* thrown if error happens in underlying native library.
*/
public void deleteRange(final WriteOptions writeOpt, final byte[] beginKey, final byte[] endKey)
throws RocksDBException {
deleteRange(nativeHandle_, writeOpt.nativeHandle_, beginKey, 0, beginKey.length, endKey, 0,
endKey.length);
}
/**
* Removes the database entries in the range ["beginKey", "endKey"), i.e.,
* including "beginKey" and excluding "endKey". a non-OK status on error. It
* is not an error if no keys exist in the range ["beginKey", "endKey").
*
* Delete the database entry (if any) for "key". Returns OK on success, and a
* non-OK status on error. It is not an error if "key" did not exist in the
* database.
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @param writeOpt
* WriteOptions to be used with delete operation
* @param beginKey
* First key to delete within database (included)
* @param endKey
* Last key to delete within database (excluded)
*
* @throws RocksDBException
* thrown if error happens in underlying native library.
*/
public void deleteRange(final ColumnFamilyHandle columnFamilyHandle, final WriteOptions writeOpt,
final byte[] beginKey, final byte[] endKey) throws RocksDBException {
deleteRange(nativeHandle_, writeOpt.nativeHandle_, beginKey, 0, beginKey.length, endKey, 0,
endKey.length, columnFamilyHandle.nativeHandle_);
}
/**
* DB implementations can export properties about their state
* via this method. If "property" is a valid property understood by this
* DB implementation, fills "*value" with its current value and returns
* true. Otherwise returns false.
*
* <p>Valid property names include:
* <ul>
* <li>"rocksdb.num-files-at-level<N>" - return the number of files at
* level <N>, where <N> is an ASCII representation of a level
* number (e.g. "0").</li>
* <li>"rocksdb.stats" - returns a multi-line string that describes statistics
* about the internal operation of the DB.</li>
* <li>"rocksdb.sstables" - returns a multi-line string that describes all
* of the sstables that make up the db contents.</li>
*</ul>
*
* @param property to be fetched. See above for examples
* @return property value
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public String getProperty(final String property) throws RocksDBException {
return getProperty0(nativeHandle_, property, property.length());
}
/**
* <p> Similar to GetProperty(), but only works for a subset of properties
* whose return value is a numerical value. Return the value as long.</p>
*
* <p><strong>Note</strong>: As the returned property is of type
* {@code uint64_t} on C++ side the returning value can be negative
* because Java supports in Java 7 only signed long values.</p>
*
* <p><strong>Java 7</strong>: To mitigate the problem of the non
* existent unsigned long tpye, values should be encapsulated using
* {@link java.math.BigInteger} to reflect the correct value. The correct
* behavior is guaranteed if {@code 2^64} is added to negative values.</p>
*
* <p><strong>Java 8</strong>: In Java 8 the value should be treated as
* unsigned long using provided methods of type {@link Long}.</p>
*
* @param property to be fetched.
*
* @return numerical property value.
*
* @throws RocksDBException if an error happens in the underlying native code.
*/
public long getLongProperty(final String property) throws RocksDBException {
return getLongProperty(nativeHandle_, property, property.length());
}
/**
* <p> Similar to GetProperty(), but only works for a subset of properties
* whose return value is a numerical value. Return the value as long.</p>
*
* <p><strong>Note</strong>: As the returned property is of type
* {@code uint64_t} on C++ side the returning value can be negative
* because Java supports in Java 7 only signed long values.</p>
*
* <p><strong>Java 7</strong>: To mitigate the problem of the non
* existent unsigned long tpye, values should be encapsulated using
* {@link java.math.BigInteger} to reflect the correct value. The correct
* behavior is guaranteed if {@code 2^64} is added to negative values.</p>
*
* <p><strong>Java 8</strong>: In Java 8 the value should be treated as
* unsigned long using provided methods of type {@link Long}.</p>
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @param property to be fetched.
*
* @return numerical property value
*
* @throws RocksDBException if an error happens in the underlying native code.
*/
public long getLongProperty(final ColumnFamilyHandle columnFamilyHandle,
final String property) throws RocksDBException {
return getLongProperty(nativeHandle_, columnFamilyHandle.nativeHandle_,
property, property.length());
}
/**
* <p> Return sum of the getLongProperty of all the column families</p>
*
* <p><strong>Note</strong>: As the returned property is of type
* {@code uint64_t} on C++ side the returning value can be negative
* because Java supports in Java 7 only signed long values.</p>
*
* <p><strong>Java 7</strong>: To mitigate the problem of the non
* existent unsigned long tpye, values should be encapsulated using
* {@link java.math.BigInteger} to reflect the correct value. The correct
* behavior is guaranteed if {@code 2^64} is added to negative values.</p>
*
* <p><strong>Java 8</strong>: In Java 8 the value should be treated as
* unsigned long using provided methods of type {@link Long}.</p>
*
* @param property to be fetched.
*
* @return numerical property value
*
* @throws RocksDBException if an error happens in the underlying native code.
*/
public long getAggregatedLongProperty(final String property) throws RocksDBException {
return getAggregatedLongProperty(nativeHandle_, property, property.length());
}
/**
* <p>Return a heap-allocated iterator over the contents of the
* database. The result of newIterator() is initially invalid
* (caller must call one of the Seek methods on the iterator
* before using it).</p>
*
* <p>Caller should close the iterator when it is no longer needed.
* The returned iterator should be closed before this db is closed.
* </p>
*
* @return instance of iterator object.
*/
public RocksIterator newIterator() {
return new RocksIterator(this, iterator(nativeHandle_));
}
/**
* <p>Return a heap-allocated iterator over the contents of the
* database. The result of newIterator() is initially invalid
* (caller must call one of the Seek methods on the iterator
* before using it).</p>
*
* <p>Caller should close the iterator when it is no longer needed.
* The returned iterator should be closed before this db is closed.
* </p>
*
* @param readOptions {@link ReadOptions} instance.
* @return instance of iterator object.
*/
public RocksIterator newIterator(final ReadOptions readOptions) {
return new RocksIterator(this, iterator(nativeHandle_,
readOptions.nativeHandle_));
}
/**
* <p>Return a handle to the current DB state. Iterators created with
* this handle will all observe a stable snapshot of the current DB
* state. The caller must call ReleaseSnapshot(result) when the
* snapshot is no longer needed.</p>
*
* <p>nullptr will be returned if the DB fails to take a snapshot or does
* not support snapshot.</p>
*
* @return Snapshot {@link Snapshot} instance
*/
public Snapshot getSnapshot() {
long snapshotHandle = getSnapshot(nativeHandle_);
if (snapshotHandle != 0) {
return new Snapshot(snapshotHandle);
}
return null;
}
/**
* Release a previously acquired snapshot. The caller must not
* use "snapshot" after this call.
*
* @param snapshot {@link Snapshot} instance
*/
public void releaseSnapshot(final Snapshot snapshot) {
if (snapshot != null) {
releaseSnapshot(nativeHandle_, snapshot.nativeHandle_);
}
}
/**
* <p>Return a heap-allocated iterator over the contents of the
* database. The result of newIterator() is initially invalid
* (caller must call one of the Seek methods on the iterator
* before using it).</p>
*
* <p>Caller should close the iterator when it is no longer needed.
* The returned iterator should be closed before this db is closed.
* </p>
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @return instance of iterator object.
*/
public RocksIterator newIterator(
final ColumnFamilyHandle columnFamilyHandle) {
return new RocksIterator(this, iteratorCF(nativeHandle_,
columnFamilyHandle.nativeHandle_));
}
/**
* <p>Return a heap-allocated iterator over the contents of the
* database. The result of newIterator() is initially invalid
* (caller must call one of the Seek methods on the iterator
* before using it).</p>
*
* <p>Caller should close the iterator when it is no longer needed.
* The returned iterator should be closed before this db is closed.
* </p>
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @param readOptions {@link ReadOptions} instance.
* @return instance of iterator object.
*/
public RocksIterator newIterator(final ColumnFamilyHandle columnFamilyHandle,
final ReadOptions readOptions) {
return new RocksIterator(this, iteratorCF(nativeHandle_,
columnFamilyHandle.nativeHandle_, readOptions.nativeHandle_));
}
/**
* Returns iterators from a consistent database state across multiple
* column families. Iterators are heap allocated and need to be deleted
* before the db is deleted
*
* @param columnFamilyHandleList {@link java.util.List} containing
* {@link org.rocksdb.ColumnFamilyHandle} instances.
* @return {@link java.util.List} containing {@link org.rocksdb.RocksIterator}
* instances
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public List<RocksIterator> newIterators(
final List<ColumnFamilyHandle> columnFamilyHandleList)
throws RocksDBException {
return newIterators(columnFamilyHandleList, new ReadOptions());
}
/**
* Returns iterators from a consistent database state across multiple
* column families. Iterators are heap allocated and need to be deleted
* before the db is deleted
*
* @param columnFamilyHandleList {@link java.util.List} containing
* {@link org.rocksdb.ColumnFamilyHandle} instances.
* @param readOptions {@link ReadOptions} instance.
* @return {@link java.util.List} containing {@link org.rocksdb.RocksIterator}
* instances
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public List<RocksIterator> newIterators(
final List<ColumnFamilyHandle> columnFamilyHandleList,
final ReadOptions readOptions) throws RocksDBException {
final long[] columnFamilyHandles = new long[columnFamilyHandleList.size()];
for (int i = 0; i < columnFamilyHandleList.size(); i++) {
columnFamilyHandles[i] = columnFamilyHandleList.get(i).nativeHandle_;
}
final long[] iteratorRefs = iterators(nativeHandle_, columnFamilyHandles,
readOptions.nativeHandle_);
final List<RocksIterator> iterators = new ArrayList<>(
columnFamilyHandleList.size());
for (int i=0; i<columnFamilyHandleList.size(); i++){
iterators.add(new RocksIterator(this, iteratorRefs[i]));
}
return iterators;
}
/**
* Gets the handle for the default column family
*
* @return The handle of the default column family
*/
public ColumnFamilyHandle getDefaultColumnFamily() {
final ColumnFamilyHandle cfHandle = new ColumnFamilyHandle(this,
getDefaultColumnFamily(nativeHandle_));
cfHandle.disOwnNativeHandle();
return cfHandle;
}
/**
* Creates a new column family with the name columnFamilyName and
* allocates a ColumnFamilyHandle within an internal structure.
* The ColumnFamilyHandle is automatically disposed with DB disposal.
*
* @param columnFamilyDescriptor column family to be created.
* @return {@link org.rocksdb.ColumnFamilyHandle} instance.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public ColumnFamilyHandle createColumnFamily(
final ColumnFamilyDescriptor columnFamilyDescriptor)
throws RocksDBException {
return new ColumnFamilyHandle(this, createColumnFamily(nativeHandle_,
columnFamilyDescriptor.columnFamilyName(),
columnFamilyDescriptor.columnFamilyOptions().nativeHandle_));
}
/**
* Drops the column family specified by {@code columnFamilyHandle}. This call
* only records a drop record in the manifest and prevents the column
* family from flushing and compacting.
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void dropColumnFamily(final ColumnFamilyHandle columnFamilyHandle)
throws RocksDBException {
dropColumnFamily(nativeHandle_, columnFamilyHandle.nativeHandle_);
}
// Bulk drop column families. This call only records drop records in the
// manifest and prevents the column families from flushing and compacting.
// In case of error, the request may succeed partially. User may call
// ListColumnFamilies to check the result.
public void dropColumnFamilies(
final List<ColumnFamilyHandle> columnFamilies) throws RocksDBException {
final long[] cfHandles = new long[columnFamilies.size()];
for (int i = 0; i < columnFamilies.size(); i++) {
cfHandles[i] = columnFamilies.get(i).nativeHandle_;
}
dropColumnFamilies(nativeHandle_, cfHandles);
}
/**
* <p>Flush all memory table data.</p>
*
* <p>Note: it must be ensured that the FlushOptions instance
* is not GC'ed before this method finishes. If the wait parameter is
* set to false, flush processing is asynchronous.</p>
*
* @param flushOptions {@link org.rocksdb.FlushOptions} instance.
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
*/
public void flush(final FlushOptions flushOptions)
throws RocksDBException {
flush(nativeHandle_, flushOptions.nativeHandle_);
}
/**
* <p>Flush all memory table data.</p>
*
* <p>Note: it must be ensured that the FlushOptions instance
* is not GC'ed before this method finishes. If the wait parameter is
* set to false, flush processing is asynchronous.</p>
*
* @param flushOptions {@link org.rocksdb.FlushOptions} instance.
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} instance.
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
*/
public void flush(final FlushOptions flushOptions,
final ColumnFamilyHandle columnFamilyHandle) throws RocksDBException {
flush(nativeHandle_, flushOptions.nativeHandle_,
columnFamilyHandle.nativeHandle_);
}
/**
* <p>Range compaction of database.</p>
* <p><strong>Note</strong>: After the database has been compacted,
* all data will have been pushed down to the last level containing
* any data.</p>
*
* <p><strong>See also</strong></p>
* <ul>
* <li>{@link #compactRange(boolean, int, int)}</li>
* <li>{@link #compactRange(byte[], byte[])}</li>
* <li>{@link #compactRange(byte[], byte[], boolean, int, int)}</li>
* </ul>
*
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
*/
public void compactRange() throws RocksDBException {
compactRange0(nativeHandle_, false, -1, 0);
}
/**
* <p>Range compaction of database.</p>
* <p><strong>Note</strong>: After the database has been compacted,
* all data will have been pushed down to the last level containing
* any data.</p>
*
* <p><strong>See also</strong></p>
* <ul>
* <li>{@link #compactRange()}</li>
* <li>{@link #compactRange(boolean, int, int)}</li>
* <li>{@link #compactRange(byte[], byte[], boolean, int, int)}</li>
* </ul>
*
* @param begin start of key range (included in range)
* @param end end of key range (excluded from range)
*
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
*/
public void compactRange(final byte[] begin, final byte[] end)
throws RocksDBException {
compactRange0(nativeHandle_, begin, begin.length, end,
end.length, false, -1, 0);
}
/**
* <p>Range compaction of database.</p>
* <p><strong>Note</strong>: After the database has been compacted,
* all data will have been pushed down to the last level containing
* any data.</p>
*
* <p>Compaction outputs should be placed in options.db_paths
* [target_path_id]. Behavior is undefined if target_path_id is
* out of range.</p>
*
* <p><strong>See also</strong></p>
* <ul>
* <li>{@link #compactRange()}</li>
* <li>{@link #compactRange(byte[], byte[])}</li>
* <li>{@link #compactRange(byte[], byte[], boolean, int, int)}</li>
* </ul>
*
* @deprecated Use {@link #compactRange(ColumnFamilyHandle, byte[], byte[], CompactRangeOptions)} instead
*
* @param reduce_level reduce level after compaction
* @param target_level target level to compact to
* @param target_path_id the target path id of output path
*
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
*/
@Deprecated
public void compactRange(final boolean reduce_level,
final int target_level, final int target_path_id)
throws RocksDBException {
compactRange0(nativeHandle_, reduce_level,
target_level, target_path_id);
}
/**
* <p>Range compaction of database.</p>
* <p><strong>Note</strong>: After the database has been compacted,
* all data will have been pushed down to the last level containing
* any data.</p>
*
* <p>Compaction outputs should be placed in options.db_paths
* [target_path_id]. Behavior is undefined if target_path_id is
* out of range.</p>
*
* <p><strong>See also</strong></p>
* <ul>
* <li>{@link #compactRange()}</li>
* <li>{@link #compactRange(boolean, int, int)}</li>
* <li>{@link #compactRange(byte[], byte[])}</li>
* </ul>
*
* @deprecated Use {@link #compactRange(ColumnFamilyHandle, byte[], byte[], CompactRangeOptions)} instead
*
* @param begin start of key range (included in range)
* @param end end of key range (excluded from range)
* @param reduce_level reduce level after compaction
* @param target_level target level to compact to
* @param target_path_id the target path id of output path
*
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
*/
@Deprecated
public void compactRange(final byte[] begin, final byte[] end,
final boolean reduce_level, final int target_level,
final int target_path_id) throws RocksDBException {
compactRange0(nativeHandle_, begin, begin.length, end, end.length,
reduce_level, target_level, target_path_id);
}
/**
* <p>Range compaction of column family.</p>
* <p><strong>Note</strong>: After the database has been compacted,
* all data will have been pushed down to the last level containing
* any data.</p>
*
* <p><strong>See also</strong></p>
* <ul>
* <li>
* {@link #compactRange(ColumnFamilyHandle, boolean, int, int)}
* </li>
* <li>
* {@link #compactRange(ColumnFamilyHandle, byte[], byte[])}
* </li>
* <li>
* {@link #compactRange(ColumnFamilyHandle, byte[], byte[],
* boolean, int, int)}
* </li>
* </ul>
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance.
*
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
*/
public void compactRange(final ColumnFamilyHandle columnFamilyHandle)
throws RocksDBException {
compactRange(nativeHandle_, false, -1, 0,
columnFamilyHandle.nativeHandle_);
}
/**
* <p>Range compaction of column family.</p>
* <p><strong>Note</strong>: After the database has been compacted,
* all data will have been pushed down to the last level containing
* any data.</p>
*
* <p><strong>See also</strong></p>
* <ul>
* <li>{@link #compactRange(ColumnFamilyHandle)}</li>
* <li>
* {@link #compactRange(ColumnFamilyHandle, boolean, int, int)}
* </li>
* <li>
* {@link #compactRange(ColumnFamilyHandle, byte[], byte[],
* boolean, int, int)}
* </li>
* </ul>
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance.
* @param begin start of key range (included in range)
* @param end end of key range (excluded from range)
*
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
*/
public void compactRange(final ColumnFamilyHandle columnFamilyHandle,
final byte[] begin, final byte[] end) throws RocksDBException {
compactRange(nativeHandle_, begin, begin.length, end, end.length,
false, -1, 0, columnFamilyHandle.nativeHandle_);
}
/**
* <p>Range compaction of column family.</p>
* <p><strong>Note</strong>: After the database has been compacted,
* all data will have been pushed down to the last level containing
* any data.</p>
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} instance.
* @param begin start of key range (included in range)
* @param end end of key range (excluded from range)
* @param compactRangeOptions options for the compaction
*
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
*/
public void compactRange(final ColumnFamilyHandle columnFamilyHandle,
final byte[] begin, final byte[] end, CompactRangeOptions compactRangeOptions) throws RocksDBException {
compactRange(nativeHandle_, begin, begin.length, end, end.length,
compactRangeOptions.nativeHandle_, columnFamilyHandle.nativeHandle_);
}
/**
* <p>Range compaction of column family.</p>
* <p><strong>Note</strong>: After the database has been compacted,
* all data will have been pushed down to the last level containing
* any data.</p>
*
* <p>Compaction outputs should be placed in options.db_paths
* [target_path_id]. Behavior is undefined if target_path_id is
* out of range.</p>
*
* <p><strong>See also</strong></p>
* <ul>
* <li>{@link #compactRange(ColumnFamilyHandle)}</li>
* <li>
* {@link #compactRange(ColumnFamilyHandle, byte[], byte[])}
* </li>
* <li>
* {@link #compactRange(ColumnFamilyHandle, byte[], byte[],
* boolean, int, int)}
* </li>
* </ul>
*
* @deprecated Use {@link #compactRange(ColumnFamilyHandle, byte[], byte[], CompactRangeOptions)} instead
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance.
* @param reduce_level reduce level after compaction
* @param target_level target level to compact to
* @param target_path_id the target path id of output path
*
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
*/
@Deprecated
public void compactRange(final ColumnFamilyHandle columnFamilyHandle,
final boolean reduce_level, final int target_level,
final int target_path_id) throws RocksDBException {
compactRange(nativeHandle_, reduce_level, target_level,
target_path_id, columnFamilyHandle.nativeHandle_);
}
/**
* <p>Range compaction of column family.</p>
* <p><strong>Note</strong>: After the database has been compacted,
* all data will have been pushed down to the last level containing
* any data.</p>
*
* <p>Compaction outputs should be placed in options.db_paths
* [target_path_id]. Behavior is undefined if target_path_id is
* out of range.</p>
*
* <p><strong>See also</strong></p>
* <ul>
* <li>{@link #compactRange(ColumnFamilyHandle)}</li>
* <li>
* {@link #compactRange(ColumnFamilyHandle, boolean, int, int)}
* </li>
* <li>
* {@link #compactRange(ColumnFamilyHandle, byte[], byte[])}
* </li>
* </ul>
*
* @deprecated Use {@link #compactRange(ColumnFamilyHandle, byte[], byte[], CompactRangeOptions)} instead
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance.
* @param begin start of key range (included in range)
* @param end end of key range (excluded from range)
* @param reduce_level reduce level after compaction
* @param target_level target level to compact to
* @param target_path_id the target path id of output path
*
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
*/
@Deprecated
public void compactRange(final ColumnFamilyHandle columnFamilyHandle,
final byte[] begin, final byte[] end, final boolean reduce_level,
final int target_level, final int target_path_id)
throws RocksDBException {
compactRange(nativeHandle_, begin, begin.length, end, end.length,
reduce_level, target_level, target_path_id,
columnFamilyHandle.nativeHandle_);
}
/**
* This function will wait until all currently running background processes
* finish. After it returns, no background process will be run until
* {@link #continueBackgroundWork()} is called
*
* @throws RocksDBException If an error occurs when pausing background work
*/
public void pauseBackgroundWork() throws RocksDBException {
pauseBackgroundWork(nativeHandle_);
}
/**
* Resumes backround work which was suspended by
* previously calling {@link #pauseBackgroundWork()}
*
* @throws RocksDBException If an error occurs when resuming background work
*/
public void continueBackgroundWork() throws RocksDBException {
continueBackgroundWork(nativeHandle_);
}
/**
* <p>The sequence number of the most recent transaction.</p>
*
* @return sequence number of the most
* recent transaction.
*/
public long getLatestSequenceNumber() {
return getLatestSequenceNumber(nativeHandle_);
}
/**
* <p>Prevent file deletions. Compactions will continue to occur,
* but no obsolete files will be deleted. Calling this multiple
* times have the same effect as calling it once.</p>
*
* @throws RocksDBException thrown if operation was not performed
* successfully.
*/
public void disableFileDeletions() throws RocksDBException {
disableFileDeletions(nativeHandle_);
}
/**
* <p>Allow compactions to delete obsolete files.
* If force == true, the call to EnableFileDeletions()
* will guarantee that file deletions are enabled after
* the call, even if DisableFileDeletions() was called
* multiple times before.</p>
*
* <p>If force == false, EnableFileDeletions will only
* enable file deletion after it's been called at least
* as many times as DisableFileDeletions(), enabling
* the two methods to be called by two threads
* concurrently without synchronization
* -- i.e., file deletions will be enabled only after both
* threads call EnableFileDeletions()</p>
*
* @param force boolean value described above.
*
* @throws RocksDBException thrown if operation was not performed
* successfully.
*/
public void enableFileDeletions(final boolean force)
throws RocksDBException {
enableFileDeletions(nativeHandle_, force);
}
/**
* <p>Returns an iterator that is positioned at a write-batch containing
* seq_number. If the sequence number is non existent, it returns an iterator
* at the first available seq_no after the requested seq_no.</p>
*
* <p>Must set WAL_ttl_seconds or WAL_size_limit_MB to large values to
* use this api, else the WAL files will get
* cleared aggressively and the iterator might keep getting invalid before
* an update is read.</p>
*
* @param sequenceNumber sequence number offset
*
* @return {@link org.rocksdb.TransactionLogIterator} instance.
*
* @throws org.rocksdb.RocksDBException if iterator cannot be retrieved
* from native-side.
*/
public TransactionLogIterator getUpdatesSince(final long sequenceNumber)
throws RocksDBException {
return new TransactionLogIterator(
getUpdatesSince(nativeHandle_, sequenceNumber));
}
public void setOptions(final ColumnFamilyHandle columnFamilyHandle,
final MutableColumnFamilyOptions mutableColumnFamilyOptions)
throws RocksDBException {
setOptions(nativeHandle_, columnFamilyHandle.nativeHandle_,
mutableColumnFamilyOptions.getKeys(),
mutableColumnFamilyOptions.getValues());
}
private long[] toNativeHandleList(final List<? extends RocksObject> objectList) {
final int len = objectList.size();
final long[] handleList = new long[len];
for (int i = 0; i < len; i++) {
handleList[i] = objectList.get(i).nativeHandle_;
}
return handleList;
}
/**
* ingestExternalFile will load a list of external SST files (1) into the DB
* We will try to find the lowest possible level that the file can fit in, and
* ingest the file into this level (2). A file that have a key range that
* overlap with the memtable key range will require us to Flush the memtable
* first before ingesting the file.
*
* (1) External SST files can be created using {@link SstFileWriter}
* (2) We will try to ingest the files to the lowest possible level
* even if the file compression doesn't match the level compression
*
* @param filePathList The list of files to ingest
* @param ingestExternalFileOptions the options for the ingestion
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void ingestExternalFile(final List<String> filePathList,
final IngestExternalFileOptions ingestExternalFileOptions)
throws RocksDBException {
ingestExternalFile(nativeHandle_, getDefaultColumnFamily().nativeHandle_,
filePathList.toArray(new String[filePathList.size()]),
filePathList.size(), ingestExternalFileOptions.nativeHandle_);
}
/**
* ingestExternalFile will load a list of external SST files (1) into the DB
* We will try to find the lowest possible level that the file can fit in, and
* ingest the file into this level (2). A file that have a key range that
* overlap with the memtable key range will require us to Flush the memtable
* first before ingesting the file.
*
* (1) External SST files can be created using {@link SstFileWriter}
* (2) We will try to ingest the files to the lowest possible level
* even if the file compression doesn't match the level compression
*
* @param columnFamilyHandle The column family for the ingested files
* @param filePathList The list of files to ingest
* @param ingestExternalFileOptions the options for the ingestion
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void ingestExternalFile(final ColumnFamilyHandle columnFamilyHandle,
final List<String> filePathList,
final IngestExternalFileOptions ingestExternalFileOptions)
throws RocksDBException {
ingestExternalFile(nativeHandle_, columnFamilyHandle.nativeHandle_,
filePathList.toArray(new String[filePathList.size()]),
filePathList.size(), ingestExternalFileOptions.nativeHandle_);
}
/**
* Static method to destroy the contents of the specified database.
* Be very careful using this method.
*
* @param path the path to the Rocksdb database.
* @param options {@link org.rocksdb.Options} instance.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public static void destroyDB(final String path, final Options options)
throws RocksDBException {
destroyDB(path, options.nativeHandle_);
}
/**
* Private constructor.
*
* @param nativeHandle The native handle of the C++ RocksDB object
*/
protected RocksDB(final long nativeHandle) {
super(nativeHandle);
}
// native methods
protected native static long open(final long optionsHandle,
final String path) throws RocksDBException;
/**
* @param optionsHandle Native handle pointing to an Options object
* @param path The directory path for the database files
* @param columnFamilyNames An array of column family names
* @param columnFamilyOptions An array of native handles pointing to
* ColumnFamilyOptions objects
*
* @return An array of native handles, [0] is the handle of the RocksDB object
* [1..1+n] are handles of the ColumnFamilyReferences
*
* @throws RocksDBException thrown if the database could not be opened
*/
protected native static long[] open(final long optionsHandle,
final String path, final byte[][] columnFamilyNames,
final long[] columnFamilyOptions) throws RocksDBException;
protected native static long openROnly(final long optionsHandle,
final String path) throws RocksDBException;
/**
* @param optionsHandle Native handle pointing to an Options object
* @param path The directory path for the database files
* @param columnFamilyNames An array of column family names
* @param columnFamilyOptions An array of native handles pointing to
* ColumnFamilyOptions objects
*
* @return An array of native handles, [0] is the handle of the RocksDB object
* [1..1+n] are handles of the ColumnFamilyReferences
*
* @throws RocksDBException thrown if the database could not be opened
*/
protected native static long[] openROnly(final long optionsHandle,
final String path, final byte[][] columnFamilyNames,
final long[] columnFamilyOptions
) throws RocksDBException;
protected native static byte[][] listColumnFamilies(long optionsHandle,
String path) throws RocksDBException;
protected native void put(long handle, byte[] key, int keyOffset,
int keyLength, byte[] value, int valueOffset, int valueLength)
throws RocksDBException;
protected native void put(long handle, byte[] key, int keyOffset,
int keyLength, byte[] value, int valueOffset, int valueLength,
long cfHandle) throws RocksDBException;
protected native void put(long handle, long writeOptHandle, byte[] key,
int keyOffset, int keyLength, byte[] value, int valueOffset,
int valueLength) throws RocksDBException;
protected native void put(long handle, long writeOptHandle, byte[] key,
int keyOffset, int keyLength, byte[] value, int valueOffset,
int valueLength, long cfHandle) throws RocksDBException;
protected native void write0(final long handle, long writeOptHandle,
long wbHandle) throws RocksDBException;
protected native void write1(final long handle, long writeOptHandle,
long wbwiHandle) throws RocksDBException;
protected native boolean keyMayExist(final long handle, final byte[] key,
final int keyOffset, final int keyLength,
final StringBuilder stringBuilder);
protected native boolean keyMayExist(final long handle, final byte[] key,
final int keyOffset, final int keyLength, final long cfHandle,
final StringBuilder stringBuilder);
protected native boolean keyMayExist(final long handle,
final long optionsHandle, final byte[] key, final int keyOffset,
final int keyLength, final StringBuilder stringBuilder);
protected native boolean keyMayExist(final long handle,
final long optionsHandle, final byte[] key, final int keyOffset,
final int keyLength, final long cfHandle,
final StringBuilder stringBuilder);
protected native void merge(long handle, byte[] key, int keyOffset,
int keyLength, byte[] value, int valueOffset, int valueLength)
throws RocksDBException;
protected native void merge(long handle, byte[] key, int keyOffset,
int keyLength, byte[] value, int valueOffset, int valueLength,
long cfHandle) throws RocksDBException;
protected native void merge(long handle, long writeOptHandle, byte[] key,
int keyOffset, int keyLength, byte[] value, int valueOffset,
int valueLength) throws RocksDBException;
protected native void merge(long handle, long writeOptHandle, byte[] key,
int keyOffset, int keyLength, byte[] value, int valueOffset,
int valueLength, long cfHandle) throws RocksDBException;
protected native int get(long handle, byte[] key, int keyOffset,
int keyLength, byte[] value, int valueOffset, int valueLength)
throws RocksDBException;
protected native int get(long handle, byte[] key, int keyOffset,
int keyLength, byte[] value, int valueOffset, int valueLength,
long cfHandle) throws RocksDBException;
protected native int get(long handle, long readOptHandle, byte[] key,
int keyOffset, int keyLength, byte[] value, int valueOffset,
int valueLength) throws RocksDBException;
protected native int get(long handle, long readOptHandle, byte[] key,
int keyOffset, int keyLength, byte[] value, int valueOffset,
int valueLength, long cfHandle) throws RocksDBException;
protected native byte[][] multiGet(final long dbHandle, final byte[][] keys,
final int[] keyOffsets, final int[] keyLengths);
protected native byte[][] multiGet(final long dbHandle, final byte[][] keys,
final int[] keyOffsets, final int[] keyLengths,
final long[] columnFamilyHandles);
protected native byte[][] multiGet(final long dbHandle, final long rOptHandle,
final byte[][] keys, final int[] keyOffsets, final int[] keyLengths);
protected native byte[][] multiGet(final long dbHandle, final long rOptHandle,
final byte[][] keys, final int[] keyOffsets, final int[] keyLengths,
final long[] columnFamilyHandles);
protected native byte[] get(long handle, byte[] key, int keyOffset,
int keyLength) throws RocksDBException;
protected native byte[] get(long handle, byte[] key, int keyOffset,
int keyLength, long cfHandle) throws RocksDBException;
protected native byte[] get(long handle, long readOptHandle,
byte[] key, int keyOffset, int keyLength) throws RocksDBException;
protected native byte[] get(long handle, long readOptHandle, byte[] key,
int keyOffset, int keyLength, long cfHandle) throws RocksDBException;
protected native void delete(long handle, byte[] key, int keyOffset,
int keyLength) throws RocksDBException;
protected native void delete(long handle, byte[] key, int keyOffset,
int keyLength, long cfHandle) throws RocksDBException;
protected native void delete(long handle, long writeOptHandle, byte[] key,
int keyOffset, int keyLength) throws RocksDBException;
protected native void delete(long handle, long writeOptHandle, byte[] key,
int keyOffset, int keyLength, long cfHandle) throws RocksDBException;
protected native void singleDelete(
long handle, byte[] key, int keyLen) throws RocksDBException;
protected native void singleDelete(
long handle, byte[] key, int keyLen, long cfHandle)
throws RocksDBException;
protected native void singleDelete(
long handle, long writeOptHandle,
byte[] key, int keyLen) throws RocksDBException;
protected native void singleDelete(
long handle, long writeOptHandle,
byte[] key, int keyLen, long cfHandle) throws RocksDBException;
protected native void deleteRange(long handle, byte[] beginKey, int beginKeyOffset,
int beginKeyLength, byte[] endKey, int endKeyOffset, int endKeyLength)
throws RocksDBException;
protected native void deleteRange(long handle, byte[] beginKey, int beginKeyOffset,
int beginKeyLength, byte[] endKey, int endKeyOffset, int endKeyLength, long cfHandle)
throws RocksDBException;
protected native void deleteRange(long handle, long writeOptHandle, byte[] beginKey,
int beginKeyOffset, int beginKeyLength, byte[] endKey, int endKeyOffset, int endKeyLength)
throws RocksDBException;
protected native void deleteRange(long handle, long writeOptHandle, byte[] beginKey,
int beginKeyOffset, int beginKeyLength, byte[] endKey, int endKeyOffset, int endKeyLength,
long cfHandle) throws RocksDBException;
protected native String getProperty0(long nativeHandle,
String property, int propertyLength) throws RocksDBException;
protected native String getProperty0(long nativeHandle, long cfHandle,
String property, int propertyLength) throws RocksDBException;
protected native long getLongProperty(long nativeHandle, String property,
int propertyLength) throws RocksDBException;
protected native long getLongProperty(long nativeHandle, long cfHandle,
String property, int propertyLength) throws RocksDBException;
protected native long getAggregatedLongProperty(long nativeHandle, String property,
int propertyLength) throws RocksDBException;
protected native long iterator(long handle);
protected native long iterator(long handle, long readOptHandle);
protected native long iteratorCF(long handle, long cfHandle);
protected native long iteratorCF(long handle, long cfHandle,
long readOptHandle);
protected native long[] iterators(final long handle,
final long[] columnFamilyHandles, final long readOptHandle)
throws RocksDBException;
protected native long getSnapshot(long nativeHandle);
protected native void releaseSnapshot(
long nativeHandle, long snapshotHandle);
@Override protected native void disposeInternal(final long handle);
private native long getDefaultColumnFamily(long handle);
private native long createColumnFamily(final long handle,
final byte[] columnFamilyName, final long columnFamilyOptions)
throws RocksDBException;
private native void dropColumnFamily(final long handle, final long cfHandle)
throws RocksDBException;
private native void dropColumnFamilies(final long handle,
final long[] cfHandles) throws RocksDBException;
private native void flush(long handle, long flushOptHandle)
throws RocksDBException;
private native void flush(long handle, long flushOptHandle, long cfHandle)
throws RocksDBException;
private native void compactRange0(long handle, boolean reduce_level,
int target_level, int target_path_id) throws RocksDBException;
private native void compactRange0(long handle, byte[] begin, int beginLen,
byte[] end, int endLen, boolean reduce_level, int target_level,
int target_path_id) throws RocksDBException;
private native void compactRange(long handle, byte[] begin, int beginLen,
byte[] end, int endLen, long compactRangeOptHandle, long cfHandle)
throws RocksDBException;
private native void compactRange(long handle, boolean reduce_level,
int target_level, int target_path_id, long cfHandle)
throws RocksDBException;
private native void compactRange(long handle, byte[] begin, int beginLen,
byte[] end, int endLen, boolean reduce_level, int target_level,
int target_path_id, long cfHandle) throws RocksDBException;
private native void pauseBackgroundWork(long handle) throws RocksDBException;
private native void continueBackgroundWork(long handle) throws RocksDBException;
private native long getLatestSequenceNumber(long handle);
private native void disableFileDeletions(long handle) throws RocksDBException;
private native void enableFileDeletions(long handle, boolean force)
throws RocksDBException;
private native long getUpdatesSince(long handle, long sequenceNumber)
throws RocksDBException;
private native void setOptions(long handle, long cfHandle, String[] keys,
String[] values) throws RocksDBException;
private native void ingestExternalFile(long handle, long cfHandle,
String[] filePathList, int filePathListLen,
long ingest_external_file_options_handle) throws RocksDBException;
private native static void destroyDB(final String path,
final long optionsHandle) throws RocksDBException;
protected DBOptionsInterface options_;
}
|
[
"\"ROCKSDB_SHAREDLIB_DIR\""
] |
[] |
[
"ROCKSDB_SHAREDLIB_DIR"
] |
[]
|
["ROCKSDB_SHAREDLIB_DIR"]
|
java
| 1 | 0 | |
openmessaging/src/main/java/io/openmessaging/rocketmq/consumer/PullConsumerImpl.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.openmessaging.rocketmq.consumer;
import io.openmessaging.KeyValue;
import io.openmessaging.Message;
import io.openmessaging.OMSBuiltinKeys;
import io.openmessaging.consumer.PullConsumer;
import io.openmessaging.exception.OMSRuntimeException;
import io.openmessaging.rocketmq.config.ClientConfig;
import io.openmessaging.rocketmq.domain.ConsumeRequest;
import io.openmessaging.rocketmq.utils.BeanUtils;
import io.openmessaging.rocketmq.utils.OMSUtil;
import org.apache.rocketmq.client.consumer.DefaultMQPullConsumer;
import org.apache.rocketmq.client.consumer.MQPullConsumer;
import org.apache.rocketmq.client.consumer.MQPullConsumerScheduleService;
import org.apache.rocketmq.client.consumer.PullResult;
import org.apache.rocketmq.client.consumer.PullTaskCallback;
import org.apache.rocketmq.client.consumer.PullTaskContext;
import org.apache.rocketmq.client.exception.MQClientException;
import org.apache.rocketmq.client.impl.consumer.ProcessQueue;
import org.apache.rocketmq.client.log.ClientLogger;
import org.apache.rocketmq.common.message.MessageExt;
import org.apache.rocketmq.common.message.MessageQueue;
import org.apache.rocketmq.logging.InternalLogger;
import org.apache.rocketmq.remoting.serialize.LanguageCode;
public class PullConsumerImpl implements PullConsumer {
private final DefaultMQPullConsumer rocketmqPullConsumer;
private final KeyValue properties;
private boolean started = false;
private final MQPullConsumerScheduleService pullConsumerScheduleService;
private final LocalMessageCache localMessageCache;
private final ClientConfig clientConfig;
private final static InternalLogger log = ClientLogger.getLog();
public PullConsumerImpl(final KeyValue properties) {
this.properties = properties;
this.clientConfig = BeanUtils.populate(properties, ClientConfig.class);
String consumerGroup = clientConfig.getConsumerId();
if (null == consumerGroup || consumerGroup.isEmpty()) {
throw new OMSRuntimeException("-1", "Consumer Group is necessary for RocketMQ, please set it.");
}
pullConsumerScheduleService = new MQPullConsumerScheduleService(consumerGroup);
this.rocketmqPullConsumer = pullConsumerScheduleService.getDefaultMQPullConsumer();
if ("true".equalsIgnoreCase(System.getenv("OMS_RMQ_DIRECT_NAME_SRV"))) {
String accessPoints = clientConfig.getAccessPoints();
if (accessPoints == null || accessPoints.isEmpty()) {
throw new OMSRuntimeException("-1", "OMS AccessPoints is null or empty.");
}
this.rocketmqPullConsumer.setNamesrvAddr(accessPoints.replace(',', ';'));
}
this.rocketmqPullConsumer.setConsumerGroup(consumerGroup);
int maxReDeliveryTimes = clientConfig.getRmqMaxRedeliveryTimes();
this.rocketmqPullConsumer.setMaxReconsumeTimes(maxReDeliveryTimes);
String consumerId = OMSUtil.buildInstanceName();
this.rocketmqPullConsumer.setInstanceName(consumerId);
properties.put(OMSBuiltinKeys.CONSUMER_ID, consumerId);
this.rocketmqPullConsumer.setLanguage(LanguageCode.OMS);
this.localMessageCache = new LocalMessageCache(this.rocketmqPullConsumer, clientConfig);
}
@Override
public KeyValue attributes() {
return properties;
}
@Override
public PullConsumer attachQueue(String queueName) {
registerPullTaskCallback(queueName);
return this;
}
@Override
public PullConsumer attachQueue(String queueName, KeyValue attributes) {
registerPullTaskCallback(queueName);
return this;
}
@Override
public PullConsumer detachQueue(String queueName) {
this.rocketmqPullConsumer.getRegisterTopics().remove(queueName);
return this;
}
@Override
public Message receive() {
MessageExt rmqMsg = localMessageCache.poll();
return rmqMsg == null ? null : OMSUtil.msgConvert(rmqMsg);
}
@Override
public Message receive(final KeyValue properties) {
MessageExt rmqMsg = localMessageCache.poll(properties);
return rmqMsg == null ? null : OMSUtil.msgConvert(rmqMsg);
}
@Override
public void ack(final String messageId) {
localMessageCache.ack(messageId);
}
@Override
public void ack(final String messageId, final KeyValue properties) {
localMessageCache.ack(messageId);
}
@Override
public synchronized void startup() {
if (!started) {
try {
this.pullConsumerScheduleService.start();
this.localMessageCache.startup();
} catch (MQClientException e) {
throw new OMSRuntimeException("-1", e);
}
}
this.started = true;
}
private void registerPullTaskCallback(final String targetQueueName) {
this.pullConsumerScheduleService.registerPullTaskCallback(targetQueueName, new PullTaskCallback() {
@Override
public void doPullTask(final MessageQueue mq, final PullTaskContext context) {
MQPullConsumer consumer = context.getPullConsumer();
try {
long offset = localMessageCache.nextPullOffset(mq);
PullResult pullResult = consumer.pull(mq, "*",
offset, localMessageCache.nextPullBatchNums());
ProcessQueue pq = rocketmqPullConsumer.getDefaultMQPullConsumerImpl().getRebalanceImpl()
.getProcessQueueTable().get(mq);
switch (pullResult.getPullStatus()) {
case FOUND:
if (pq != null) {
pq.putMessage(pullResult.getMsgFoundList());
for (final MessageExt messageExt : pullResult.getMsgFoundList()) {
localMessageCache.submitConsumeRequest(new ConsumeRequest(messageExt, mq, pq));
}
}
break;
default:
break;
}
localMessageCache.updatePullOffset(mq, pullResult.getNextBeginOffset());
} catch (Exception e) {
log.error("A error occurred in pull message process.", e);
}
}
});
}
@Override
public synchronized void shutdown() {
if (this.started) {
this.localMessageCache.shutdown();
this.pullConsumerScheduleService.shutdown();
this.rocketmqPullConsumer.shutdown();
}
this.started = false;
}
}
|
[
"\"OMS_RMQ_DIRECT_NAME_SRV\""
] |
[] |
[
"OMS_RMQ_DIRECT_NAME_SRV"
] |
[]
|
["OMS_RMQ_DIRECT_NAME_SRV"]
|
java
| 1 | 0 | |
controllers/suite_test.go
|
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"path/filepath"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
standardv1alpha1 "github.com/zzxwill/oam-autoscaler-trait/api/v1alpha1"
// +kubebuilder:scaffold:imports
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var cfg *rest.Config
var k8sClient client.Client
var testEnv *envtest.Environment
func TestAPIs(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecsWithDefaultAndCustomReporters(t,
"Controller Suite",
[]Reporter{printer.NewlineReporter{}})
}
var _ = BeforeSuite(func(done Done) {
logf.SetLogger(zap.LoggerTo(GinkgoWriter, true))
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
}
var err error
cfg, err = testEnv.Start()
Expect(err).ToNot(HaveOccurred())
Expect(cfg).ToNot(BeNil())
err = standardv1alpha1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
// +kubebuilder:scaffold:scheme
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).ToNot(HaveOccurred())
Expect(k8sClient).ToNot(BeNil())
close(done)
}, 60)
var _ = AfterSuite(func() {
By("tearing down the test environment")
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred())
})
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
helpers.go
|
package main
import (
"context"
"log"
"os"
"github.com/google/go-github/v32/github"
gitlab "github.com/xanzy/go-gitlab"
)
func getUsername(client interface{}, service string) string {
if client == nil {
log.Fatalf("Couldn't acquire a client to talk to %s", service)
}
if service == "github" {
ctx := context.Background()
user, _, err := client.(*github.Client).Users.Get(ctx, "")
if err != nil {
log.Fatal("Error retrieving username", err.Error())
}
return *user.Login
}
if service == "gitlab" {
user, _, err := client.(*gitlab.Client).Users.CurrentUser()
if err != nil {
log.Fatal("Error retrieving username", err.Error())
}
return user.Username
}
if service == "bitbucket" {
bitbucketUsername := os.Getenv("BITBUCKET_USERNAME")
if bitbucketUsername == "" {
log.Fatal("BITBUCKET_USERNAME environment variable not set")
}
return bitbucketUsername
}
return ""
}
|
[
"\"BITBUCKET_USERNAME\""
] |
[] |
[
"BITBUCKET_USERNAME"
] |
[]
|
["BITBUCKET_USERNAME"]
|
go
| 1 | 0 | |
tools/eval.py
|
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__)
sys.path.append(os.path.abspath(os.path.join(__dir__, '../')))
import argparse
import paddle
import paddle.fluid as fluid
import program
from ppcls.data import Reader
from ppcls.utils.config import get_config
from ppcls.utils.save_load import init_model
from paddle.fluid.incubate.fleet.collective import fleet
from paddle.fluid.incubate.fleet.base import role_maker
def parse_args():
parser = argparse.ArgumentParser("PaddleClas eval script")
parser.add_argument(
'-c',
'--config',
type=str,
default='./configs/eval.yaml',
help='config file path')
parser.add_argument(
'-o',
'--override',
action='append',
default=[],
help='config options to be overridden')
args = parser.parse_args()
return args
def main(args):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
config = get_config(args.config, overrides=args.override, show=True)
gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0))
place = fluid.CUDAPlace(gpu_id)
startup_prog = fluid.Program()
valid_prog = fluid.Program()
valid_dataloader, valid_fetchs = program.build(
config, valid_prog, startup_prog, is_train=False)
valid_prog = valid_prog.clone(for_test=True)
exe = fluid.Executor(place)
exe.run(startup_prog)
init_model(config, valid_prog, exe)
valid_reader = Reader(config, 'valid')()
valid_dataloader.set_sample_list_generator(valid_reader, place)
compiled_valid_prog = program.compile(config, valid_prog)
program.run(valid_dataloader, exe, compiled_valid_prog, valid_fetchs, -1,
'eval', config)
if __name__ == '__main__':
paddle.enable_static()
args = parse_args()
main(args)
|
[] |
[] |
[
"FLAGS_selected_gpus"
] |
[]
|
["FLAGS_selected_gpus"]
|
python
| 1 | 0 | |
config.go
|
package history
import (
"fmt"
"os"
"path/filepath"
"runtime"
"github.com/BurntSushi/toml"
)
type config struct {
Prompt string `toml:"prompt"`
InitQuery string `toml:"init_query"`
InitCursor string `toml:"init_cursor"`
ScreenColumns []string `toml:"screen_columns"`
VimModePrompt string `toml:"vim_mode_prompt"`
IgnoreWords []string `toml:"ignore_words"`
}
const tomlDir = "zhist"
func (cfg *config) load() error {
var dir string
if runtime.GOOS == "windows" {
base := os.Getenv("APPDATA")
if base == "" {
base = filepath.Join(os.Getenv("USERPROFILE"), "Application Data")
}
dir = filepath.Join(base, tomlDir)
} else {
dir = filepath.Join(os.Getenv("HOME"), ".config", tomlDir)
}
if err := os.MkdirAll(dir, 0700); err != nil {
return fmt.Errorf("cannot create directory: %v", err)
}
tomlFile := filepath.Join(dir, "config.toml")
_, err := os.Stat(tomlFile)
if err == nil {
_, err := toml.DecodeFile(tomlFile, cfg)
if err != nil {
return err
}
return nil
}
if !os.IsNotExist(err) {
return err
}
f, err := os.Create(tomlFile)
if err != nil {
return err
}
defer f.Close()
// Set default value
cfg.InitQuery = DefaultQuery
cfg.InitCursor = Wildcard
cfg.Prompt = Prompt
cfg.ScreenColumns = []string{"command"}
cfg.VimModePrompt = "VIM-MODE"
cfg.IgnoreWords = []string{}
return toml.NewEncoder(f).Encode(cfg)
}
|
[
"\"APPDATA\"",
"\"USERPROFILE\"",
"\"HOME\""
] |
[] |
[
"APPDATA",
"HOME",
"USERPROFILE"
] |
[]
|
["APPDATA", "HOME", "USERPROFILE"]
|
go
| 3 | 0 | |
llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.llap.cli;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.net.URL;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Collection;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
import org.apache.hadoop.hive.llap.tezplugins.LlapTezUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hive.common.CompressionUtils;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.llap.cli.LlapOptionsProcessor.LlapOptions;
import org.apache.hadoop.hive.llap.io.api.impl.LlapInputFormat;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.io.HiveInputFormat;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.json.JSONObject;
import com.google.common.base.Preconditions;
public class LlapServiceDriver {
protected static final Logger LOG = LoggerFactory.getLogger(LlapServiceDriver.class.getName());
private static final String[] DEFAULT_AUX_CLASSES = new String[] {
"org.apache.hive.hcatalog.data.JsonSerDe" };
private static final String HBASE_SERDE_CLASS = "org.apache.hadoop.hive.hbase.HBaseSerDe";
private static final String[] NEEDED_CONFIGS = {
"tez-site.xml", "hive-site.xml", "llap-daemon-site.xml", "core-site.xml" };
private static final String[] OPTIONAL_CONFIGS = { "ssl-server.xml" };
private final Configuration conf;
public LlapServiceDriver() {
SessionState ss = SessionState.get();
conf = (ss != null) ? ss.getConf() : new HiveConf(SessionState.class);
}
public static void main(String[] args) throws Exception {
int ret = 0;
try {
new LlapServiceDriver().run(args);
} catch (Throwable t) {
System.err.println("Failed: " + t.getMessage());
t.printStackTrace();
ret = 3;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Completed processing - exiting with " + ret);
}
System.exit(ret);
}
/**
* Intersect llap-daemon-site.xml configuration properties against an existing Configuration
* object, while resolving any ${} parameters that might be present.
*
* @param raw
* @return configuration object which is a slice of configured
*/
public static Configuration resolve(Configuration configured, String first, String... resources) {
Configuration defaults = new Configuration(false);
defaults.addResource(first);
for (String resource : resources) {
defaults.addResource(resource);
}
Configuration slice = new Configuration(false);
// for everything in defaults, slice out those from the configured
for (Map.Entry<String, String> kv : defaults) {
slice.set(kv.getKey(), configured.get(kv.getKey()));
}
return slice;
}
private void run(String[] args) throws Exception {
LlapOptionsProcessor optionsProcessor = new LlapOptionsProcessor();
LlapOptions options = optionsProcessor.processOptions(args);
if (options == null) {
// help
return;
}
Path tmpDir = new Path(options.getDirectory());
if (conf == null) {
throw new Exception("Cannot load any configuration to run command");
}
FileSystem fs = FileSystem.get(conf);
FileSystem lfs = FileSystem.getLocal(conf).getRawFileSystem();
// needed so that the file is actually loaded into configuration.
for (String f : NEEDED_CONFIGS) {
conf.addResource(f);
if (conf.getResource(f) == null) {
throw new Exception("Unable to find required config file: " + f);
}
}
for (String f : OPTIONAL_CONFIGS) {
conf.addResource(f);
}
conf.reloadConfiguration();
if (options.getName() != null) {
// update service registry configs - caveat: this has nothing to do with the actual settings
// as read by the AM
// if needed, use --hiveconf llap.daemon.service.hosts=@llap0 to dynamically switch between
// instances
conf.set(ConfVars.LLAP_DAEMON_SERVICE_HOSTS.varname, "@" + options.getName());
}
if (options.getSize() != -1) {
if (options.getCache() != -1) {
Preconditions.checkArgument(options.getCache() < options.getSize(),
"Cache has to be smaller than the container sizing");
}
if (options.getXmx() != -1) {
Preconditions.checkArgument(options.getXmx() < options.getSize(),
"Working memory has to be smaller than the container sizing");
}
if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_ALLOCATOR_DIRECT)) {
Preconditions.checkArgument(options.getXmx() + options.getCache() < options.getSize(),
"Working memory + cache has to be smaller than the containing sizing ");
}
}
final long minAlloc = conf.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, -1);
if (options.getSize() != -1) {
final long containerSize = options.getSize() / (1024 * 1024);
Preconditions.checkArgument(containerSize >= minAlloc,
"Container size should be greater than minimum allocation(%s)", minAlloc + "m");
conf.setLong(ConfVars.LLAP_DAEMON_YARN_CONTAINER_MB.varname, containerSize);
}
if (options.getExecutors() != -1) {
conf.setLong(ConfVars.LLAP_DAEMON_NUM_EXECUTORS.varname, options.getExecutors());
// TODO: vcpu settings - possibly when DRFA works right
}
if (options.getCache() != -1) {
conf.set(HiveConf.ConfVars.LLAP_IO_MEMORY_MAX_SIZE.varname,
Long.toString(options.getCache()));
}
if (options.getXmx() != -1) {
// Needs more explanation here
// Xmx is not the max heap value in JDK8
// You need to subtract 50% of the survivor fraction from this, to get actual usable memory before it goes into GC
conf.setLong(ConfVars.LLAP_DAEMON_MEMORY_PER_INSTANCE_MB.varname, (long)(options.getXmx())
/ (1024 * 1024));
}
for (Entry<Object, Object> props : options.getConfig().entrySet()) {
conf.set((String) props.getKey(), (String) props.getValue());
}
URL logger = conf.getResource("llap-daemon-log4j2.properties");
if (null == logger) {
throw new Exception("Unable to find required config file: llap-daemon-log4j2.properties");
}
Path home = new Path(System.getenv("HIVE_HOME"));
Path scripts = new Path(new Path(new Path(home, "scripts"), "llap"), "bin");
if (!lfs.exists(home)) {
throw new Exception("Unable to find HIVE_HOME:" + home);
} else if (!lfs.exists(scripts)) {
LOG.warn("Unable to find llap scripts:" + scripts);
}
Path libDir = new Path(tmpDir, "lib");
String tezLibs = conf.get("tez.lib.uris");
if (tezLibs == null) {
LOG.warn("Missing tez.lib.uris in tez-site.xml");
}
if (LOG.isDebugEnabled()) {
LOG.debug("Copying tez libs from " + tezLibs);
}
lfs.mkdirs(libDir);
fs.copyToLocalFile(new Path(tezLibs), new Path(libDir, "tez.tar.gz"));
CompressionUtils.unTar(new Path(libDir, "tez.tar.gz").toString(), libDir.toString(), true);
lfs.delete(new Path(libDir, "tez.tar.gz"), false);
// llap-common
lfs.copyFromLocalFile(new Path(Utilities.jarFinderGetJar(LlapDaemonProtocolProtos.class)), libDir);
// llap-tez
lfs.copyFromLocalFile(new Path(Utilities.jarFinderGetJar(LlapTezUtils.class)), libDir);
// llap-server
lfs.copyFromLocalFile(new Path(Utilities.jarFinderGetJar(LlapInputFormat.class)), libDir);
// hive-exec
lfs.copyFromLocalFile(new Path(Utilities.jarFinderGetJar(HiveInputFormat.class)), libDir);
// copy default aux classes (json/hbase)
for (String className : DEFAULT_AUX_CLASSES) {
localizeJarForClass(lfs, libDir, className, false);
}
if (options.getIsHBase()) {
try {
localizeJarForClass(lfs, libDir, HBASE_SERDE_CLASS, true);
Job fakeJob = new Job(new JobConf()); // HBase API is convoluted.
TableMapReduceUtil.addDependencyJars(fakeJob);
Collection<String> hbaseJars = fakeJob.getConfiguration().getStringCollection("tmpjars");
for (String jarPath : hbaseJars) {
if (!jarPath.isEmpty()) {
lfs.copyFromLocalFile(new Path(jarPath), libDir);
}
}
} catch (Throwable t) {
String err = "Failed to add HBase jars. Use --auxhbase=false to avoid localizing them";
LOG.error(err);
System.err.println(err);
throw new RuntimeException(t);
}
}
String auxJars = options.getAuxJars();
if (auxJars != null && !auxJars.isEmpty()) {
// TODO: transitive dependencies warning?
String[] jarPaths = auxJars.split(",");
for (String jarPath : jarPaths) {
if (!jarPath.isEmpty()) {
lfs.copyFromLocalFile(new Path(jarPath), libDir);
}
}
}
Path confPath = new Path(tmpDir, "conf");
lfs.mkdirs(confPath);
for (String f : NEEDED_CONFIGS) {
copyConfig(options, lfs, confPath, f);
}
for (String f : OPTIONAL_CONFIGS) {
try {
copyConfig(options, lfs, confPath, f);
} catch (Throwable t) {
LOG.info("Error getting an optional config " + f + "; ignoring: " + t.getMessage());
}
}
lfs.copyFromLocalFile(new Path(logger.toString()), confPath);
String java_home = System.getenv("JAVA_HOME");
String jre_home = System.getProperty("java.home");
if (java_home == null) {
java_home = jre_home;
} else if (!java_home.equals(jre_home)) {
LOG.warn("Java versions might not match : JAVA_HOME=%s,process jre=%s",
java_home, jre_home);
}
// extract configs for processing by the python fragments in Slider
JSONObject configs = new JSONObject();
configs.put("java.home", java_home);
configs.put(ConfVars.LLAP_DAEMON_YARN_CONTAINER_MB.varname, HiveConf.getIntVar(conf,
ConfVars.LLAP_DAEMON_YARN_CONTAINER_MB));
configs.put(HiveConf.ConfVars.LLAP_IO_MEMORY_MAX_SIZE.varname,
HiveConf.getSizeVar(conf, HiveConf.ConfVars.LLAP_IO_MEMORY_MAX_SIZE));
configs.put(HiveConf.ConfVars.LLAP_ALLOCATOR_DIRECT.varname,
HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_ALLOCATOR_DIRECT));
configs.put(ConfVars.LLAP_DAEMON_MEMORY_PER_INSTANCE_MB.varname, HiveConf.getIntVar(conf,
ConfVars.LLAP_DAEMON_MEMORY_PER_INSTANCE_MB));
configs.put(ConfVars.LLAP_DAEMON_VCPUS_PER_INSTANCE.varname, HiveConf.getIntVar(conf,
ConfVars.LLAP_DAEMON_VCPUS_PER_INSTANCE));
configs.put(ConfVars.LLAP_DAEMON_NUM_EXECUTORS.varname, HiveConf.getIntVar(conf,
ConfVars.LLAP_DAEMON_NUM_EXECUTORS));
configs.put(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
conf.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, -1));
configs.put(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,
conf.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, -1));
FSDataOutputStream os = lfs.create(new Path(tmpDir, "config.json"));
OutputStreamWriter w = new OutputStreamWriter(os);
configs.write(w);
w.close();
os.close();
lfs.close();
fs.close();
if (LOG.isDebugEnabled()) {
LOG.debug("Exiting successfully");
}
}
// TODO#: assumes throw
private void localizeJarForClass(FileSystem lfs, Path libDir, String className, boolean doThrow)
throws IOException {
String jarPath = null;
boolean hasException = false;
try {
Class<?> auxClass = Class.forName(className);
jarPath = Utilities.jarFinderGetJar(auxClass);
} catch (Throwable t) {
if (doThrow) {
throw (t instanceof IOException) ? (IOException)t : new IOException(t);
}
hasException = true;
String err =
"Cannot find a jar for [" + className + "] due to an exception (" + t.getMessage()
+ "); not packaging the jar";
LOG.error(err, t);
System.err.println(err);
}
if (jarPath != null) {
lfs.copyFromLocalFile(new Path(jarPath), libDir);
} else if (!hasException) {
String err = "Cannot find a jar for [" + className + "]; not packaging the jar";
if (doThrow) {
throw new IOException(err);
}
LOG.error(err);
System.err.println(err);
}
}
private void copyConfig(
LlapOptions options, FileSystem lfs, Path confPath, String f) throws IOException {
if (f.equals("llap-daemon-site.xml")) {
FSDataOutputStream confStream = lfs.create(new Path(confPath, f));
Configuration copy = resolve(conf, "llap-daemon-site.xml");
for (Entry<Object, Object> props : options.getConfig().entrySet()) {
// overrides
copy.set((String) props.getKey(), (String) props.getValue());
}
copy.writeXml(confStream);
confStream.close();
} else {
// they will be file:// URLs
lfs.copyFromLocalFile(new Path(conf.getResource(f).toString()), confPath);
}
}
}
|
[
"\"HIVE_HOME\"",
"\"JAVA_HOME\""
] |
[] |
[
"JAVA_HOME",
"HIVE_HOME"
] |
[]
|
["JAVA_HOME", "HIVE_HOME"]
|
java
| 2 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'instagram_clone.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
skbuild/platform_specifics/abstract.py
|
"""This module defines objects useful to discover which CMake generator is
supported on the current platform."""
from __future__ import print_function
import os
import shutil
import subprocess
import textwrap
from ..constants import CMAKE_DEFAULT_EXECUTABLE
from ..exceptions import SKBuildGeneratorNotFoundError
from ..utils import push_dir
test_folder = "_cmake_test_compile"
class CMakePlatform(object):
"""This class encapsulates the logic allowing to get the identifier of a
working CMake generator.
Derived class should at least set :attr:`default_generators`.
"""
def __init__(self):
self._default_generators = list()
@property
def default_generators(self):
"""List of generators considered by :func:`get_best_generator()`."""
return self._default_generators
@default_generators.setter
def default_generators(self, generators):
self._default_generators = generators
@property
def generator_installation_help(self):
"""Return message guiding the user for installing a valid toolchain."""
raise NotImplementedError # pragma: no cover
@staticmethod
def write_test_cmakelist(languages):
"""Write a minimal ``CMakeLists.txt`` useful to check if the
requested ``languages`` are supported."""
if not os.path.exists(test_folder):
os.makedirs(test_folder)
with open("{:s}/{:s}".format(test_folder, "CMakeLists.txt"), "w") as f:
f.write("cmake_minimum_required(VERSION 2.8)\n")
f.write("PROJECT(compiler_test NONE)\n")
for language in languages:
f.write("ENABLE_LANGUAGE({:s})\n".format(language))
@staticmethod
def cleanup_test():
"""Delete test project directory."""
if os.path.exists(test_folder):
shutil.rmtree(test_folder)
def get_generator(self, generator_name):
"""Loop over generators and return the first that matches the given
name.
"""
for default_generator in self.default_generators:
if default_generator.name == generator_name:
return default_generator
return CMakeGenerator(generator_name)
# TODO: this method name is not great. Does anyone have a better idea for
# renaming it?
def get_best_generator(
self, generator_name=None, skip_generator_test=False,
languages=("CXX", "C"), cleanup=True,
cmake_executable=CMAKE_DEFAULT_EXECUTABLE, cmake_args=()):
"""Loop over generators to find one that works by configuring
and compiling a test project.
:param generator_name: If provided, uses only provided generator, \
instead of trying :attr:`default_generators`.
:type generator_name: string or None
:param skip_generator_test: If set to True and if a generator name is \
specified, the generator test is skipped. If no generator_name is specified \
and the option is set to True, the first available generator is used.
:type skip_generator_test: bool
:param languages: The languages you'll need for your project, in terms \
that CMake recognizes.
:type languages: tuple
:param cleanup: If True, cleans up temporary folder used to test \
generators. Set to False for debugging to see CMake's output files.
:type cleanup: bool
:param cmake_executable: Path to CMake executable used to configure \
and build the test project used to evaluate if a generator is working.
:type cmake_executable: string
:param cmake_args: List of CMake arguments to use when configuring \
the test project. Only arguments starting with ``-DCMAKE_`` are \
used.
:type cmake_args: tuple
:return: CMake Generator object
:rtype: :class:`CMakeGenerator` or None
:raises skbuild.exceptions.SKBuildGeneratorNotFoundError:
"""
candidate_generators = []
if generator_name is None:
candidate_generators = self.default_generators
else:
# Lookup CMakeGenerator by name. Doing this allow to get a
# generator object with its ``env`` property appropriately
# initialized.
candidate_generators = []
for default_generator in self.default_generators:
if default_generator.name == generator_name:
candidate_generators.append(default_generator)
if not candidate_generators:
candidate_generators = [CMakeGenerator(generator_name)]
self.write_test_cmakelist(languages)
if skip_generator_test:
working_generator = candidate_generators[0]
else:
working_generator = self.compile_test_cmakelist(
cmake_executable, candidate_generators, cmake_args)
if working_generator is None:
raise SKBuildGeneratorNotFoundError(textwrap.dedent(
"""
{line}
scikit-build could not get a working generator for your system. Aborting build.
{installation_help}
{line}
""").strip().format( # noqa: E501
line="*"*80,
installation_help=self.generator_installation_help)
)
if cleanup:
CMakePlatform.cleanup_test()
return working_generator
@staticmethod
@push_dir(directory=test_folder)
def compile_test_cmakelist(
cmake_exe_path, candidate_generators, cmake_args=()):
"""Attempt to configure the test project with
each :class:`CMakeGenerator` from ``candidate_generators``.
Only cmake arguments starting with ``-DCMAKE_`` are used to configure
the test project.
The function returns the first generator allowing to successfully
configure the test project using ``cmake_exe_path``."""
# working generator is the first generator we find that works.
working_generator = None
# Include only -DCMAKE_* arguments
cmake_args = [arg for arg in cmake_args if arg.startswith("-DCMAKE_")]
# Do not complain about unused CMake arguments
cmake_args.insert(0, "--no-warn-unused-cli")
def _generator_discovery_status_msg(_generator, suffix=""):
outer = "-" * 80
inner = ["-" * ((idx * 5) - 3) for idx in range(1, 8)]
print(outer if suffix == "" else "\n".join(inner))
print("-- Trying \"%s\" generator%s" % (_generator.description, suffix))
print(outer if suffix != "" else "\n".join(inner[::-1]))
for generator in candidate_generators:
print("\n")
_generator_discovery_status_msg(generator)
# clear the cache for each attempted generator type
if os.path.isdir('build'):
shutil.rmtree('build')
with push_dir('build', make_directory=True):
# call cmake to see if the compiler specified by this
# generator works for the specified languages
cmd = [cmake_exe_path, '../', '-G', generator.name]
if generator.toolset:
cmd.extend(['-T', generator.toolset])
cmd.extend(cmake_args)
status = subprocess.call(cmd, env=generator.env)
_generator_discovery_status_msg(
generator, " - %s" % ("success" if status == 0 else "failure"))
print("")
# cmake succeeded, this generator should work
if status == 0:
# we have a working generator, don't bother looking for more
working_generator = generator
break
return working_generator
class CMakeGenerator(object):
"""Represents a CMake generator.
.. automethod:: __init__
"""
def __init__(self, name, env=None, toolset=None):
"""Instantiate a generator object with the given ``name``.
By default, ``os.environ`` is associated with the generator. Dictionary
passed as ``env`` parameter will be merged with ``os.environ``. If an
environment variable is set in both ``os.environ`` and ``env``, the
variable in ``env`` is used.
Some CMake generators support a ``toolset`` specification to tell the native
build system how to choose a compiler.
"""
self._generator_name = name
self.env = dict(
list(os.environ.items()) + list(env.items() if env else []))
self._generator_toolset = toolset
if toolset is None:
self._description = name
else:
self._description = "%s %s" % (name, toolset)
@property
def name(self):
"""Name of CMake generator."""
return self._generator_name
@property
def toolset(self):
"""Toolset specification associated with the CMake generator."""
return self._generator_toolset
@property
def description(self):
"""Name of CMake generator with properties describing the environment (e.g toolset)"""
return self._description
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
app/cmd/shell.go
|
package cmd
import (
"fmt"
"github.com/jenkins-zh/jenkins-cli/app/cmd/common"
"github.com/jenkins-zh/jenkins-cli/util"
"gopkg.in/yaml.v2"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"go.uber.org/zap"
"github.com/jenkins-zh/jenkins-cli/app/i18n"
"github.com/spf13/cobra"
)
// ShellOptions is the option of shell command
type ShellOptions struct {
common.CommonOption
TmpDir string
TmpConfigFileName string
}
var shellOptions ShellOptions
func init() {
rootCmd.AddCommand(shellCmd)
}
const (
defaultRcFile = `
if [ -f /etc/bashrc ]; then
source /etc/bashrc
fi
if [ -f ~/.bashrc ]; then
source ~/.bashrc
fi
if type -t __start_jcli >/dev/null; then true; else
source <(jcli completion)
fi
[[ -r "/usr/local/etc/profile.d/bash_completion.sh" ]] && . "/usr/local/etc/profile.d/bash_completion.sh"
`
zshRcFile = `
if [ -f /etc/zshrc ]; then
source /etc/zshrc
fi
if [ -f ~/.zshrc ]; then
source ~/.zshrc
fi
`
)
var shellCmd = &cobra.Command{
Use: "shell [<name>]",
Short: i18n.T("Create a sub shell so that changes to a specific Jenkins remain local to the shell."),
Long: i18n.T("Create a sub shell so that changes to a specific Jenkins remain local to the shell."),
Aliases: []string{"sh"},
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
if len(args) > 0 {
jenkinsName := args[0]
setCurrentJenkins(jenkinsName)
}
if shellOptions.TmpDir, err = ioutil.TempDir("", ".jcli-shell-"); err == nil {
shellOptions.TmpConfigFileName = filepath.Join(shellOptions.TmpDir, "/config")
var data []byte
config := getConfig()
if data, err = yaml.Marshal(&config); err == nil {
err = ioutil.WriteFile(shellOptions.TmpConfigFileName, data, 0644)
}
}
return
},
RunE: func(cmd *cobra.Command, _ []string) (err error) {
fullShell := os.Getenv("SHELL")
shell := filepath.Base(fullShell)
if fullShell == "" && runtime.GOOS == "windows" {
// SHELL is set by git-bash but not cygwin :-(
shell = "cmd.exe"
}
prompt := createNewBashPrompt(os.Getenv("PS1"))
rcFile := defaultRcFile + "\nexport PS1=" + prompt + "\nexport JCLI_CONFIG=\"" + shellOptions.TmpConfigFileName + "\"\n"
tmpRCFileName := shellOptions.TmpDir + "/.bashrc"
err = ioutil.WriteFile(tmpRCFileName, []byte(rcFile), 0760)
if err != nil {
return err
}
logger.Debug("temporary shell profile loaded", zap.String("path", tmpRCFileName))
e := util.ExecCommand(shellOptions.ExecContext, shell, "-rcfile", tmpRCFileName, "-i")
if shell == "zsh" {
env := os.Environ()
env = append(env, fmt.Sprintf("ZDOTDIR=%s", shellOptions.TmpDir))
e = util.ExecCommand(shellOptions.ExecContext, shell, "-i")
e.Env = env
} else if shell == "cmd.exe" {
env := os.Environ()
env = append(env, fmt.Sprintf("JCLI_CONFIG=%s", shellOptions.TmpConfigFileName))
e = util.ExecCommand(shellOptions.ExecContext, shell)
e.Env = env
}
e.Stdout = cmd.OutOrStdout()
e.Stderr = cmd.OutOrStderr()
e.Stdin = os.Stdin
err = e.Run()
return
},
PostRunE: func(cmd *cobra.Command, args []string) (err error) {
err = os.RemoveAll(shellOptions.TmpDir)
return
},
}
func createNewBashPrompt(prompt string) string {
if prompt == "" {
return "'[\\u@\\h \\W jcli> ]\\$ '"
}
if prompt[0] == '"' {
return prompt[0:1] + "jcli> " + prompt[1:]
}
if prompt[0] == '\'' {
return prompt[0:1] + "jcli> " + prompt[1:]
}
return "'jcli> " + prompt + "'"
}
|
[
"\"SHELL\"",
"\"PS1\""
] |
[] |
[
"SHELL",
"PS1"
] |
[]
|
["SHELL", "PS1"]
|
go
| 2 | 0 | |
api/pkg/policy/policy.go
|
// Copyright 2019 The soda Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package policy
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
"github.com/emicklei/go-restful"
log "github.com/sirupsen/logrus"
"github.com/soda/multi-cloud/api/pkg/context"
"github.com/soda/multi-cloud/api/pkg/model"
"github.com/soda/multi-cloud/api/pkg/utils"
"github.com/soda/multi-cloud/api/pkg/utils/constants"
)
var enforcer *Enforcer
func init() {
enforcer = NewEnforcer(false)
RegisterRules(enforcer)
enforcer.LoadRules(false)
}
type DefaultRule struct {
Name string
CheckStr string
}
func listRules() []DefaultRule {
return []DefaultRule{
{Name: "context_is_admin", CheckStr: "role:admin"},
}
}
func RegisterRules(e *Enforcer) {
e.RegisterDefaults(listRules())
}
func NewEnforcer(overWrite bool) *Enforcer {
return &Enforcer{OverWrite: overWrite}
}
type Enforcer struct {
Rules map[string]BaseCheck
DefaultRules []DefaultRule
OverWrite bool
}
func (e *Enforcer) RegisterDefaults(rules []DefaultRule) {
e.DefaultRules = rules
}
func (e *Enforcer) Enforce(rule string, target map[string]string, cred map[string]interface{}) (bool, error) {
if err := e.LoadRules(false); err != nil {
return false, err
}
toRule, ok := e.Rules[rule]
if !ok {
err := fmt.Errorf("rule [%s] does not exist", rule)
return false, err
}
return check(toRule, target, cred, *e, ""), nil
}
func (e *Enforcer) Authorize(rule string, target map[string]string, cred map[string]interface{}) (bool, error) {
return e.Enforce(rule, target, cred)
}
func (e *Enforcer) LoadRules(forcedReload bool) error {
path := os.Getenv("POLICY_PATH")
if path == "" {
path = constants.DefaultPolicyPath
}
fileInfo, err := os.Stat(path)
if err != nil {
return err
}
// Load all policy files that in the specified path
if fileInfo.IsDir() {
files, err := ioutil.ReadDir(path)
if err != nil {
return err
}
for _, f := range files {
if !f.IsDir() && strings.HasSuffix(f.Name(), ".json") {
err := e.LoadPolicyFile(path, forcedReload, false)
if err != nil {
return err
}
}
}
return nil
} else {
return e.LoadPolicyFile(path, forcedReload, e.OverWrite)
}
}
func (e *Enforcer) UpdateRules(rules map[string]BaseCheck) {
if e.Rules == nil {
e.Rules = make(map[string]BaseCheck)
}
for k, c := range rules {
e.Rules[k] = c
}
}
func (e *Enforcer) LoadPolicyFile(path string, forcedReload bool, overWrite bool) error {
// if rules is already set or user doesn't want to force reload, return it.
if e.Rules != nil && !forcedReload {
return nil
}
data, err := ioutil.ReadFile(path)
if err != nil {
msg := fmt.Sprintf("read policy file (%s) failed, reason:(%v)", path, err)
log.Infof(msg)
return fmt.Errorf(msg)
}
r, err := NewRules(data, e.DefaultRules)
if err != nil {
return err
}
if overWrite {
e.Rules = r.Rules
} else {
e.UpdateRules(r.Rules)
}
return nil
}
func NewRules(data []byte, defaultRule []DefaultRule) (*Rules, error) {
r := &Rules{}
err := r.Load(data, defaultRule)
return r, err
}
type Rules struct {
Rules map[string]BaseCheck
}
func (r *Rules) Load(data []byte, defaultRules []DefaultRule) error {
rulesMap := map[string]string{}
err := json.Unmarshal(data, &rulesMap)
if err != nil {
log.Errorf(err.Error())
return err
}
// add default value
for _, r := range defaultRules {
if v, ok := rulesMap[r.Name]; ok {
log.Errorf("policy rule (%s:%s) has conflict with default rule(%s:%s),abandon default value\n",
r.Name, v, r.Name, r.CheckStr)
} else {
rulesMap[r.Name] = r.CheckStr
}
}
if r.Rules == nil {
r.Rules = make(map[string]BaseCheck)
}
for k, v := range rulesMap {
r.Rules[k] = parseRule(v)
}
return nil
}
func (r *Rules) String() string {
b, _ := json.MarshalIndent(r.Rules, "", " ")
return string(b)
}
func Authorize(req *restful.Request, res *restful.Response, action string) bool {
if os.Getenv("OS_AUTH_AUTHSTRATEGY") != "keystone" {
return true
}
ctx := context.GetContext(req)
credentials := ctx.ToPolicyValue()
//TenantId := httpCtx.Input.Param(":tenantId")
TenantId := req.PathParameter("tenantId")
target := map[string]string{
"tenant_id": TenantId,
}
log.Infof("Action: %v", action)
log.Infof("Target: %v", target)
log.Infof("policy-Credentials: %v", credentials)
ok, err := enforcer.Authorize(action, target, credentials)
if err != nil {
log.Errorf("authorize failed, %s", err)
}
if !ok {
model.HttpError(res, http.StatusForbidden, "Operation is not permitted")
} else {
ctx.IsAdmin = utils.Contained("admin", ctx.Roles)
}
return ok
}
|
[
"\"POLICY_PATH\"",
"\"OS_AUTH_AUTHSTRATEGY\""
] |
[] |
[
"OS_AUTH_AUTHSTRATEGY",
"POLICY_PATH"
] |
[]
|
["OS_AUTH_AUTHSTRATEGY", "POLICY_PATH"]
|
go
| 2 | 0 | |
evaluate.py
|
import argparse
import multiprocessing
import os
import time
from collections import OrderedDict
from typing import Dict
import lavd
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
GPT2Config,
GPT2LMHeadModel,
GPT2Tokenizer,
)
from checkpoint import load_checkpoint, log_epoch_stats, metrics
from dataset import TextDataset, mask_tokens
batch_size = 1
num_workers = multiprocessing.cpu_count()
num_gpus = torch.cuda.device_count()
seed = 1234
def evaluate(
data_loader: DataLoader,
model: nn.Module,
device: torch.device,
logger: lavd.Logger,
masked_lm: bool = True,
name: str = "",
) -> Dict:
# Disables autograd during validation mode
torch.set_grad_enabled(False)
model.eval()
sampler = (
data_loader.sampler # type: ignore
if isinstance(data_loader.sampler, DistributedSampler) # type: ignore
else None
)
losses = []
pbar = logger.progress_bar(
name, total=len(data_loader.dataset), leave=False, dynamic_ncols=True
)
tokeniser = data_loader.dataset.tokeniser # type: ignore
for d in data_loader:
d = d.to(device)
inputs, labels = mask_tokens(d, tokeniser) if masked_lm else (d, d)
# The last batch may not be a full batch
curr_batch_size = inputs.size(0)
output = (
model(inputs, masked_lm_labels=labels)
if masked_lm
else model(inputs, labels=labels)
)
loss = output[0]
losses.append(loss.item())
pbar.update(
curr_batch_size
if sampler is None
else curr_batch_size * sampler.num_replicas # type: ignore
)
pbar.close()
loss = torch.mean(torch.tensor(losses, device=device))
# Gather the loss onto the primary process to have accurate metrics.
if sampler is not None:
gathered_losses = [
torch.zeros_like(loss) for _ in range(sampler.num_replicas) # type: ignore
]
dist.all_gather(gathered_losses, loss)
loss = torch.mean(torch.tensor(gathered_losses))
perplexity = torch.exp(loss)
return OrderedDict(loss=loss.item(), perplexity=perplexity.item())
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
"-d",
"--dataset",
dest="datasets",
nargs="+",
metavar="[NAME=]PATH",
required=True,
type=str,
help=(
"List of text files to evaluate. "
"If no name is specified it uses the name of the text file."
),
)
parser.add_argument(
"-c",
"--checkpoint",
dest="checkpoint",
required=True,
nargs="+",
type=str,
help="Paths to the checkpoints to be evaluated",
)
parser.add_argument(
"-b",
"--batch-size",
dest="batch_size",
default=batch_size,
type=int,
help="Size of data batches [Default: {}]".format(batch_size),
)
parser.add_argument(
"-w",
"--workers",
dest="num_workers",
default=num_workers,
type=int,
help="Number of workers for loading the data [Default: {}]".format(num_workers),
)
parser.add_argument(
"-g",
"--gpus",
dest="num_gpus",
default=num_gpus,
type=int,
help="Number of GPUs to use [Default: {}]".format(num_gpus),
)
parser.add_argument(
"--no-cuda",
dest="no_cuda",
action="store_true",
help="Do not use CUDA even if it's available",
)
parser.add_argument(
"-s",
"--seed",
dest="seed",
default=seed,
type=int,
help="Seed for random initialisation [Default: {}]".format(seed),
)
return parser.parse_args()
def main():
options = parse_args()
torch.manual_seed(options.seed)
use_cuda = torch.cuda.is_available() and not options.no_cuda
if use_cuda:
# Somehow this fixes an unknown error on Windows.
torch.cuda.current_device()
# Get rid of the annoying warnings about TensorFlow not being compiled with
# certain CPU instructions.
# TensorFlow is not even used, but because transformers uses it besides PyTorch
# there are constant warnings being spammed.
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
if use_cuda and options.num_gpus > 1:
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12345"
# Manullay adjust the batch size and workers to split amongst the processes.
options.batch_size = options.batch_size // options.num_gpus
options.num_workers = options.num_workers // options.num_gpus
mp.spawn(run, nprocs=options.num_gpus, args=(options, True))
else:
run(0, options)
def run(gpu_id, options, distributed=False):
if distributed:
dist.init_process_group(
backend="nccl",
rank=gpu_id,
world_size=options.num_gpus,
init_method="env://",
)
torch.cuda.set_device(gpu_id)
use_cuda = torch.cuda.is_available() and not options.no_cuda
device = torch.device("cuda" if use_cuda else "cpu")
for cp in options.checkpoint:
checkpoint = load_checkpoint(os.path.join(cp, "stats.pth"))
name = "evaluate/{}".format(cp)
logger = lavd.Logger(name, disabled=gpu_id != 0)
spinner = logger.spinner("Initialising")
spinner.start()
# All but the primary GPU wait here, so that only the primary process loads the
# pre-trained model and the rest uses the cached version.
if distributed and gpu_id != 0:
torch.distributed.barrier()
model_kind = checkpoint["model"].get("kind")
use_special = True
masked_lm = True
add_space = False
if model_kind == "bert" or model_kind == "bert-scratch":
config = BertConfig.from_pretrained(cp)
model = BertForMaskedLM.from_pretrained(cp, config=config)
tokeniser = BertTokenizer.from_pretrained(cp)
elif model_kind == "gpt2" or model_kind == "gpt2-scratch":
config = GPT2Config.from_pretrained(cp)
model = GPT2LMHeadModel.from_pretrained(cp, config=config)
tokeniser = GPT2Tokenizer.from_pretrained(cp)
masked_lm = False
use_special = False
add_space = True
else:
raise Exception("No model available for {}".format(model_kind))
model = model.to(device)
# Primary process has loaded the model and the other can now load the cached
# version.
if distributed and gpu_id == 0:
torch.distributed.barrier()
data_loaders = []
for data_file in options.datasets:
data = data_file.split("=", 1)
if len(data) > 1:
# Remove whitespace around the name
name = data[0].strip()
# Expand the ~ to the full path as it won't be done automatically since
# it's not at the beginning of the word.
file_path = os.path.expanduser(data[1])
else:
name = None
file_path = data[0]
dataset = TextDataset(
file_path,
tokeniser,
name=name,
use_special=use_special,
add_space=add_space,
)
sampler = (
DistributedSampler(
dataset, num_replicas=options.num_gpus, rank=gpu_id, shuffle=False
)
if distributed
else None
)
data_loader = DataLoader(
dataset,
batch_size=options.batch_size,
shuffle=False,
num_workers=options.num_workers,
sampler=sampler,
pin_memory=True,
)
data_loaders.append(data_loader)
if distributed:
model = DistributedDataParallel(
model, device_ids=[gpu_id], find_unused_parameters=True
)
# Wait for all processes to load eveything before starting training.
# Not strictly necessary, since they will wait once the actual model is run, but
# this makes it nicer to show the spinner until all of them are ready.
if distributed:
torch.distributed.barrier()
spinner.stop()
start_time = time.time()
logger.set_prefix("Evaluation - {}".format(cp))
results = []
for data_loader in data_loaders:
data_name = data_loader.dataset.name
logger.start(data_name)
result = evaluate(
data_loader,
model,
device=device,
name=data_name,
logger=logger,
masked_lm=masked_lm,
)
result["name"] = data_name
results.append(result)
logger.end(data_name)
time_difference = time.time() - start_time
evaluation_results = [
OrderedDict(
name=result["name"],
stats=OrderedDict(loss=result["loss"], perplexity=result["perplexity"]),
)
for result in results
]
log_epoch_stats(
logger, evaluation_results, metrics, time_elapsed=time_difference
)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"MASTER_ADDR",
"MASTER_PORT",
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["MASTER_ADDR", "MASTER_PORT", "TF_CPP_MIN_LOG_LEVEL"]
|
python
| 3 | 0 | |
lib/WebOb-1.0.8/tests/performance_test.py
|
#!/usr/bin/env python
import webob
def make_middleware(app):
from repoze.profile.profiler import AccumulatingProfileMiddleware
return AccumulatingProfileMiddleware(
app,
log_filename='/tmp/profile.log',
discard_first_request=True,
flush_at_shutdown=True,
path='/__profile__')
def simple_app(environ, start_response):
resp = webob.Response('Hello world!')
return resp(environ, start_response)
if __name__ == '__main__':
import sys
import os
import signal
if sys.argv[1:]:
arg = sys.argv[1]
else:
arg = None
if arg in ['open', 'run']:
import subprocess
import webbrowser
import time
os.environ['SHOW_OUTPUT'] = '0'
proc = subprocess.Popen([sys.executable, __file__])
time.sleep(1)
subprocess.call(['ab', '-n', '1000', 'http://localhost:8080/'])
if arg == 'open':
webbrowser.open('http://localhost:8080/__profile__')
print 'Hit ^C to end'
try:
while 1:
raw_input()
finally:
os.kill(proc.pid, signal.SIGKILL)
else:
from paste.httpserver import serve
if os.environ.get('SHOW_OUTPUT') != '0':
print 'Note you can also use:'
print ' %s %s open' % (sys.executable, __file__)
print 'to run ab and open a browser (or "run" to just run ab)'
print 'Now do:'
print 'ab -n 1000 http://localhost:8080/'
print 'wget -O - http://localhost:8080/__profile__'
serve(make_middleware(simple_app))
|
[] |
[] |
[
"SHOW_OUTPUT"
] |
[]
|
["SHOW_OUTPUT"]
|
python
| 1 | 0 | |
datalad/tests/utils_testrepos.py
|
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import os
import tempfile
from abc import ABCMeta, abstractmethod
from os.path import join as opj, exists
from ..support.gitrepo import GitRepo
from ..support.annexrepo import AnnexRepo
from ..support.network import get_local_file_url
from ..support.external_versions import external_versions
from ..utils import swallow_outputs
from ..utils import swallow_logs
from ..utils import on_windows
from ..version import __version__
from . import _TEMP_PATHS_GENERATED
from .utils import get_tempfile_kwargs
from datalad.customremotes.base import init_datalad_remote
from datalad import cfg as dl_cfg
# we need a local file, that is supposed to be treated as a remote file via
# file-scheme URL
remote_file_fd, remote_file_path = \
tempfile.mkstemp(**get_tempfile_kwargs(
{'dir': dl_cfg.get("datalad.tests.temp.dir")}, prefix='testrepo'))
# to be removed upon teardown
_TEMP_PATHS_GENERATED.append(remote_file_path)
with open(remote_file_path, "w") as f:
f.write("content to be annex-addurl'd")
# OS-level descriptor needs to be closed!
os.close(remote_file_fd)
class TestRepo(object, metaclass=ABCMeta):
REPO_CLASS = None # Assign to the class to be used in the subclass
def __init__(self, path=None, puke_if_exists=True):
if not path:
path = \
tempfile.mktemp(**get_tempfile_kwargs(
{'dir': dl_cfg.get("datalad.tests.temp.dir")},
prefix='testrepo'))
# to be removed upon teardown
_TEMP_PATHS_GENERATED.append(path)
if puke_if_exists and exists(path):
raise RuntimeError("Directory %s for test repo already exist" % path)
# swallow logs so we don't print all those about crippled FS etc
with swallow_logs():
self.repo = self.REPO_CLASS(path)
# For additional testing of our datalad remote to not interfer
# and manage to handle all http urls and requests:
if self.REPO_CLASS is AnnexRepo and \
os.environ.get('DATALAD_TESTS_DATALADREMOTE'):
init_datalad_remote(self.repo, 'datalad', autoenable=True)
self._created = False
@property
def path(self):
return self.repo.path
@property
def url(self):
return get_local_file_url(self.path, compatibility='git')
def create_file(self, name, content, add=True, annex=False):
filename = opj(self.path, name)
with open(filename, 'wb') as f:
f.write(content.encode())
if add:
if annex:
if isinstance(self.repo, AnnexRepo):
self.repo.add(name)
else:
raise ValueError("Can't annex add to a non-annex repo.")
else:
self.repo.add(name, git=True)
def create(self):
if self._created:
assert(exists(self.path))
return # was already done
with swallow_outputs(): # we don't need those outputs at this point
self.populate()
self._created = True
@abstractmethod
def populate(self):
raise NotImplementedError("Should be implemented in sub-classes")
class BasicAnnexTestRepo(TestRepo):
"""Creates a basic test git-annex repository"""
REPO_CLASS = AnnexRepo
def populate(self):
self.create_info_file()
self.create_file('test.dat', '123\n', annex=False)
self.repo.commit("Adding a basic INFO file and rudimentary load file for annex testing")
# even this doesn't work on bloody Windows
fileurl = get_local_file_url(remote_file_path, compatibility='git-annex')
# Note:
# The line above used to be conditional:
# if not on_windows \
# else "https://raw.githubusercontent.com/datalad/testrepo--basic--r1/master/test.dat"
# This self-reference-ish construction (pointing to 'test.dat'
# and therefore have the same content in git and annex) is outdated and
# causes trouble especially in annex V6 repos.
self.repo.add_url_to_file("test-annex.dat", fileurl)
self.repo.commit("Adding a rudimentary git-annex load file")
self.repo.drop("test-annex.dat") # since available from URL
def create_info_file(self):
annex_version = external_versions['cmd:annex']
git_version = external_versions['cmd:git']
self.create_file('INFO.txt',
"Testrepo: %s\n"
"git: %s\n"
"annex: %s\n"
"datalad: %s\n"
% (self.__class__, git_version, annex_version, __version__),
annex=False)
class BasicGitTestRepo(TestRepo):
"""Creates a basic test git repository."""
REPO_CLASS = GitRepo
def populate(self):
self.create_info_file()
self.create_file('test.dat', '123\n', annex=False)
self.repo.commit("Adding a basic INFO file and rudimentary "
"load file.")
def create_info_file(self):
git_version = external_versions['cmd:git']
self.create_file('INFO.txt',
"Testrepo: %s\n"
"git: %s\n"
"datalad: %s\n"
% (self.__class__, git_version, __version__),
annex=False)
class SubmoduleDataset(BasicAnnexTestRepo):
def populate(self):
super(SubmoduleDataset, self).populate()
# add submodules
annex = BasicAnnexTestRepo()
annex.create()
kw = dict(expect_stderr=True)
self.repo.call_git(
['submodule', 'add', annex.url, 'subm 1'], **kw)
self.repo.call_git(
['submodule', 'add', annex.url, '2'], **kw)
self.repo.commit('Added subm 1 and 2.')
self.repo.call_git(
['submodule', 'update', '--init', '--recursive'], **kw)
# init annex in subdatasets
for s in ('subm 1', '2'):
AnnexRepo(opj(self.path, s), init=True)
class NestedDataset(BasicAnnexTestRepo):
def populate(self):
super(NestedDataset, self).populate()
ds = SubmoduleDataset()
ds.create()
kw = dict(expect_stderr=True)
self.repo.call_git(
['submodule', 'add', ds.url, 'sub dataset1'], **kw)
self.repo.call_git(
['-C', opj(self.path, 'sub dataset1'),
'submodule', 'add', ds.url, 'sub sub dataset1'],
**kw)
GitRepo(opj(self.path, 'sub dataset1')).commit('Added sub dataset.')
self.repo.commit('Added subdatasets.', options=["-a"])
self.repo.call_git(
['submodule', 'update', '--init', '--recursive'],
**kw)
# init all annexes
for s in ('', 'sub dataset1', opj('sub dataset1', 'sub sub dataset1')):
AnnexRepo(opj(self.path, s), init=True)
class InnerSubmodule(object):
def __init__(self):
self._ds = NestedDataset()
@property
def path(self):
return opj(self._ds.path, 'sub dataset1', 'subm 1')
@property
def url(self):
return get_local_file_url(self.path, compatibility='git')
def create(self):
self._ds.create()
|
[] |
[] |
[
"DATALAD_TESTS_DATALADREMOTE"
] |
[]
|
["DATALAD_TESTS_DATALADREMOTE"]
|
python
| 1 | 0 | |
Mini Games/not_mario/utils/__init__.py
|
# Create by Packetsss
# Personal use is allowed
# Commercial use is prohibited
from .settings import *
import os
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
import sys
import time
import pathlib
import numpy as np
import pygame as pg
pg.init()
pg.font.init()
|
[] |
[] |
[
"PYGAME_HIDE_SUPPORT_PROMPT"
] |
[]
|
["PYGAME_HIDE_SUPPORT_PROMPT"]
|
python
| 1 | 0 | |
rbapi/rbapi/settings.py
|
"""
Django settings for rbapi project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECURITY WARNING: don't run with debug turned on in production!
SECRET_KEY = os.environ.get("SECRET_KEY", 'lob2t$8)n*-lh#40k7$d5v(y4vlm1%v%q_%ci261^g=q)(!--c')
API_KEY = os.environ.get("API_KEY", '12345')
DEBUG = int(os.environ.get("DEBUG", default=0))
ALLOWED_HOSTS = os.environ.get("ALLOWED_HOSTS", "*").split(" ")
PERMITTED_HOSTS = os.environ.get("PERMITTED_HOSTS", ['192.168.0.105', '192.168.0.100', '192.168.0.102'])
HOSTNAME = os.environ.get('HOSTNAME', default='localhost:8000')
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
# Application definition
INSTALLED_APPS = [
'jet',
'rest_framework',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps',
'apps.api',
'apps.update',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'rbapi.urls'
WSGI_APPLICATION = 'rbapi.wsgi.application'
DATABASES = {
"default": {
"ENGINE": os.environ.get("SQL_ENGINE", "django.db.backends.postgresql_psycopg2"),
"NAME": os.environ.get("SQL_DATABASE", "rbapi"),
"USER": os.environ.get("SQL_USER", "django"),
"PASSWORD": os.environ.get("SQL_PASSWORD", "django"),
"HOST": os.environ.get("SQL_HOST", "192.168.0.108"),
"PORT": os.environ.get("SQL_PORT", "5432"),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "rbapi/static")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
# Media files
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
],
},
},
]
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 20,
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny',
],
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '{levelname} {asctime} {module} {process:d} {thread:d} {message}',
'style': '{',
},
'simple': {
'format': '{levelname} {message}',
'style': '{',
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.path.join(BASE_DIR, 'api.log'),
'formatter': 'verbose',
},
},
'loggers': {
'django': {
'handlers': ['console', 'file'],
'propagate': True,
},
'django.request': {
'handlers': ['mail_admins', 'file'],
'level': 'ERROR',
'propagate': False,
},
}
}
X_FRAME_OPTIONS = 'SAMEORIGIN'
|
[] |
[] |
[
"ALLOWED_HOSTS",
"SQL_PASSWORD",
"SQL_ENGINE",
"API_KEY",
"SQL_HOST",
"SQL_DATABASE",
"SQL_USER",
"SECRET_KEY",
"HOSTNAME",
"SQL_PORT",
"DEBUG",
"PERMITTED_HOSTS"
] |
[]
|
["ALLOWED_HOSTS", "SQL_PASSWORD", "SQL_ENGINE", "API_KEY", "SQL_HOST", "SQL_DATABASE", "SQL_USER", "SECRET_KEY", "HOSTNAME", "SQL_PORT", "DEBUG", "PERMITTED_HOSTS"]
|
python
| 12 | 0 | |
deploy/docker/conf_util.go
|
package main
import (
"fmt"
"github.com/go-yaml/yaml"
"io/ioutil"
"os"
"strconv"
"strings"
)
var khome = os.Getenv("KUIPER_HOME")
var fileMap = map[string]string{
"edgex": khome + "/etc/sources/edgex.yaml",
"random": khome + "/etc/sources/random.yaml",
"zmq": khome + "/etc/sources/zmq.yaml",
"httppull": khome + "/etc/sources/httppull.yaml",
"mqtt_source": khome + "/etc/mqtt_source.yaml",
"kuiper": khome + "/etc/kuiper.yaml",
"client": khome + "/etc/client.yaml",
}
var file_keys_map = map[string]map[string]string{
"edgex": {
"SERVICESERVER": "serviceServer",
"CLIENTID": "ClientId",
"USERNAME": "Username",
"PASSWORD": "Password",
"QOS": "Qos",
"KEEPALIVE": "KeepAlive",
"RETAINED": "Retained",
"CONNECTIONPAYLOAD": "ConnectionPayload",
"CERTFILE": "CertFile",
"KEYFILE": "KeyFile",
"CERTPEMBLOCK": "CertPEMBlock",
"KEYPEMBLOCK": "KeyPEMBlock",
"SKIPCERTVERIFY": "SkipCertVerify",
},
"mqtt_source": {
"SHAREDSUBSCRIPTION": "sharedSubscription",
"CERTIFICATIONPATH": "certificationPath",
"PRIVATEKEYPATH": "privateKeyPath",
"KUBEEDGEVERSION": "kubeedgeVersion",
"KUBEEDGEMODELFILE": "kubeedgeModelFile",
},
"kuiper": {
"CONSOLELOG": "consoleLog",
"FILELOG": "fileLog",
"RESTPORT": "restPort",
"RESTTLS": "restTls",
"PROMETHEUSPORT": "prometheusPort",
"PLUGINHOSTS": "pluginHosts",
"CHECKPOINTINTERVAL": "checkpointInterval",
"CACHETHRESHOLD": "cacheThreshold",
"CACHETRIGGERCOUNT": "cacheTriggerCount",
"DISABLECACHE": "disableCache",
},
}
func fileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
return false
}
return !info.IsDir()
}
func deleteFile(path string) {
// delete file
var err = os.Remove(path)
if err != nil {
fmt.Printf("Failed to delete original file: %s due to error %s... The conf util is going to exit.\n", path, err)
os.Exit(0)
return
}
fmt.Println("File Deleted")
}
func main() {
fmt.Println(fileMap["edgex"])
files := make(map[string]map[interface{}]interface{})
ProcessEnv(files, os.Environ())
for f, v := range files {
if bs, err := yaml.Marshal(v); err != nil {
fmt.Println(err)
} else {
message := fmt.Sprintf("-------------------\nConf file %s: \n %s", f, string(bs))
fmt.Println(message)
if fname, ok := fileMap[f]; ok {
if fileExists(fname) {
deleteFile(fname)
}
if e := ioutil.WriteFile(fname, bs, 0644); e != nil {
fmt.Println(e)
}
}
}
}
}
func ProcessEnv(files map[string]map[interface{}]interface{}, vars []string) {
for _, e := range vars {
pair := strings.SplitN(e, "=", 2)
if len(pair) != 2 {
fmt.Printf("invalid env %s, skip it.\n", e)
continue
}
valid := false
for k, _ := range fileMap {
if strings.HasPrefix(pair[0], strings.ToUpper(k)) {
valid = true
break
}
}
if !valid {
continue
} else {
fmt.Printf("Find env: %s, start to handle it.\n", e)
}
env_v := strings.ReplaceAll(pair[0], "__", ".")
keys := strings.Split(env_v, ".")
for i, v := range keys {
keys[i] = v
}
if len(keys) < 2 {
fmt.Printf("not concerned env %s, skip it.\n", e)
continue
} else {
k := strings.ToLower(keys[0])
if v, ok := files[k]; !ok {
if data, err := ioutil.ReadFile(fileMap[k]); err != nil {
fmt.Printf("%s\n", err)
} else {
m := make(map[interface{}]interface{})
err = yaml.Unmarshal([]byte(data), &m)
if err != nil {
fmt.Println(err)
}
files[k] = m
Handle(k, m, keys[1:], pair[1])
}
} else {
Handle(k, v, keys[1:], pair[1])
}
}
}
}
func Handle(file string, conf map[interface{}]interface{}, skeys []string, val string) {
key := getKey(file, skeys[0])
if len(skeys) == 1 {
conf[key] = getValueType(val)
} else if len(skeys) >= 2 {
if v, ok := conf[key]; ok {
if v1, ok1 := v.(map[interface{}]interface{}); ok1 {
Handle(file, v1, skeys[1:], val)
} else {
fmt.Printf("Not expected map: %v\n", v)
}
} else {
v1 := make(map[interface{}]interface{})
conf[key] = v1
Handle(file, v1, skeys[1:], val)
}
}
}
func getKey(file string, key string) string {
if m, ok := file_keys_map[file][key]; ok {
return m
} else {
return strings.ToLower(key)
}
}
func getValueType(val string) interface{} {
val = strings.Trim(val, " ")
if strings.HasPrefix(val, "[") && strings.HasSuffix(val, "]") {
val = strings.ReplaceAll(val, "[", "")
val = strings.ReplaceAll(val, "]", "")
vals := strings.Split(val, ",")
var ret []interface{}
for _, v := range vals {
if i, err := strconv.ParseInt(v, 10, 64); err == nil {
ret = append(ret, i)
} else if b, err := strconv.ParseBool(v); err == nil {
ret = append(ret, b)
} else if f, err := strconv.ParseFloat(v, 64); err == nil {
ret = append(ret, f)
} else {
ret = append(ret, v)
}
}
return ret
} else if i, err := strconv.ParseInt(val, 10, 64); err == nil {
return i
} else if b, err := strconv.ParseBool(val); err == nil {
return b
} else if f, err := strconv.ParseFloat(val, 64); err == nil {
return f
}
return val
}
|
[
"\"KUIPER_HOME\""
] |
[] |
[
"KUIPER_HOME"
] |
[]
|
["KUIPER_HOME"]
|
go
| 1 | 0 | |
vendor/github.com/gopherjs/gopherjs/tool.go
|
package main
import (
"bytes"
"errors"
"fmt"
"go/ast"
"go/build"
"go/doc"
"go/parser"
"go/scanner"
"go/token"
"go/types"
"io"
"io/ioutil"
"net"
"net/http"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"syscall"
"text/template"
"time"
"unicode"
"unicode/utf8"
gbuild "github.com/gopherjs/gopherjs/build"
"github.com/gopherjs/gopherjs/compiler"
"github.com/gopherjs/gopherjs/internal/sysutil"
"github.com/kisielk/gotool"
"github.com/neelance/sourcemap"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"golang.org/x/crypto/ssh/terminal"
)
var currentDirectory string
func init() {
var err error
currentDirectory, err = os.Getwd()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
currentDirectory, err = filepath.EvalSymlinks(currentDirectory)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
gopaths := filepath.SplitList(build.Default.GOPATH)
if len(gopaths) == 0 {
fmt.Fprintf(os.Stderr, "$GOPATH not set. For more details see: go help gopath\n")
os.Exit(1)
}
}
func main() {
var (
options = &gbuild.Options{CreateMapFile: true}
pkgObj string
tags string
)
flagVerbose := pflag.NewFlagSet("", 0)
flagVerbose.BoolVarP(&options.Verbose, "verbose", "v", false, "print the names of packages as they are compiled")
flagQuiet := pflag.NewFlagSet("", 0)
flagQuiet.BoolVarP(&options.Quiet, "quiet", "q", false, "suppress non-fatal warnings")
compilerFlags := pflag.NewFlagSet("", 0)
compilerFlags.BoolVarP(&options.Minify, "minify", "m", false, "minify generated code")
compilerFlags.BoolVar(&options.Color, "color", terminal.IsTerminal(int(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb", "colored output")
compilerFlags.StringVar(&tags, "tags", "", "a list of build tags to consider satisfied during the build")
compilerFlags.BoolVar(&options.MapToLocalDisk, "localmap", false, "use local paths for sourcemap")
flagWatch := pflag.NewFlagSet("", 0)
flagWatch.BoolVarP(&options.Watch, "watch", "w", false, "watch for changes to the source files")
cmdBuild := &cobra.Command{
Use: "build [packages]",
Short: "compile packages and dependencies",
}
cmdBuild.Flags().StringVarP(&pkgObj, "output", "o", "", "output file")
cmdBuild.Flags().AddFlagSet(flagVerbose)
cmdBuild.Flags().AddFlagSet(flagQuiet)
cmdBuild.Flags().AddFlagSet(compilerFlags)
cmdBuild.Flags().AddFlagSet(flagWatch)
cmdBuild.Run = func(cmd *cobra.Command, args []string) {
options.BuildTags = strings.Fields(tags)
for {
s := gbuild.NewSession(options)
err := func() error {
// Handle "gopherjs build [files]" ad-hoc package mode.
if len(args) > 0 && (strings.HasSuffix(args[0], ".go") || strings.HasSuffix(args[0], ".inc.js")) {
for _, arg := range args {
if !strings.HasSuffix(arg, ".go") && !strings.HasSuffix(arg, ".inc.js") {
return fmt.Errorf("named files must be .go or .inc.js files")
}
}
if pkgObj == "" {
basename := filepath.Base(args[0])
pkgObj = basename[:len(basename)-3] + ".js"
}
names := make([]string, len(args))
for i, name := range args {
name = filepath.ToSlash(name)
names[i] = name
if s.Watcher != nil {
s.Watcher.Add(name)
}
}
err := s.BuildFiles(args, pkgObj, currentDirectory)
return err
}
// Expand import path patterns.
patternContext := gbuild.NewBuildContext("", options.BuildTags)
pkgs := (&gotool.Context{BuildContext: *patternContext}).ImportPaths(args)
for _, pkgPath := range pkgs {
if s.Watcher != nil {
pkg, err := gbuild.NewBuildContext(s.InstallSuffix(), options.BuildTags).Import(pkgPath, "", build.FindOnly)
if err != nil {
return err
}
s.Watcher.Add(pkg.Dir)
}
pkg, err := gbuild.Import(pkgPath, 0, s.InstallSuffix(), options.BuildTags)
if err != nil {
return err
}
archive, err := s.BuildPackage(pkg)
if err != nil {
return err
}
if len(pkgs) == 1 { // Only consider writing output if single package specified.
if pkgObj == "" {
pkgObj = filepath.Base(pkg.Dir) + ".js"
}
if pkg.IsCommand() && !pkg.UpToDate {
if err := s.WriteCommandPackage(archive, pkgObj); err != nil {
return err
}
}
}
}
return nil
}()
exitCode := handleError(err, options, nil)
if s.Watcher == nil {
os.Exit(exitCode)
}
s.WaitForChange()
}
}
cmdInstall := &cobra.Command{
Use: "install [packages]",
Short: "compile and install packages and dependencies",
}
cmdInstall.Flags().AddFlagSet(flagVerbose)
cmdInstall.Flags().AddFlagSet(flagQuiet)
cmdInstall.Flags().AddFlagSet(compilerFlags)
cmdInstall.Flags().AddFlagSet(flagWatch)
cmdInstall.Run = func(cmd *cobra.Command, args []string) {
options.BuildTags = strings.Fields(tags)
for {
s := gbuild.NewSession(options)
err := func() error {
// Expand import path patterns.
patternContext := gbuild.NewBuildContext("", options.BuildTags)
pkgs := (&gotool.Context{BuildContext: *patternContext}).ImportPaths(args)
if cmd.Name() == "get" {
goGet := exec.Command("go", append([]string{"get", "-d", "-tags=js"}, pkgs...)...)
goGet.Stdout = os.Stdout
goGet.Stderr = os.Stderr
if err := goGet.Run(); err != nil {
return err
}
}
for _, pkgPath := range pkgs {
pkg, err := gbuild.Import(pkgPath, 0, s.InstallSuffix(), options.BuildTags)
if s.Watcher != nil && pkg != nil { // add watch even on error
s.Watcher.Add(pkg.Dir)
}
if err != nil {
return err
}
archive, err := s.BuildPackage(pkg)
if err != nil {
return err
}
if pkg.IsCommand() && !pkg.UpToDate {
if err := s.WriteCommandPackage(archive, pkg.PkgObj); err != nil {
return err
}
}
}
return nil
}()
exitCode := handleError(err, options, nil)
if s.Watcher == nil {
os.Exit(exitCode)
}
s.WaitForChange()
}
}
cmdDoc := &cobra.Command{
Use: "doc [arguments]",
Short: "display documentation for the requested, package, method or symbol",
}
cmdDoc.Run = func(cmd *cobra.Command, args []string) {
goDoc := exec.Command("go", append([]string{"doc"}, args...)...)
goDoc.Stdout = os.Stdout
goDoc.Stderr = os.Stderr
goDoc.Env = append(os.Environ(), "GOARCH=js")
err := goDoc.Run()
exitCode := handleError(err, options, nil)
os.Exit(exitCode)
}
cmdGet := &cobra.Command{
Use: "get [packages]",
Short: "download and install packages and dependencies",
}
cmdGet.Flags().AddFlagSet(flagVerbose)
cmdGet.Flags().AddFlagSet(flagQuiet)
cmdGet.Flags().AddFlagSet(compilerFlags)
cmdGet.Run = cmdInstall.Run
cmdRun := &cobra.Command{
Use: "run [gofiles...] [arguments...]",
Short: "compile and run Go program",
}
cmdRun.Flags().AddFlagSet(flagVerbose)
cmdRun.Flags().AddFlagSet(flagQuiet)
cmdRun.Flags().AddFlagSet(compilerFlags)
cmdRun.Run = func(cmd *cobra.Command, args []string) {
err := func() error {
lastSourceArg := 0
for {
if lastSourceArg == len(args) || !(strings.HasSuffix(args[lastSourceArg], ".go") || strings.HasSuffix(args[lastSourceArg], ".inc.js")) {
break
}
lastSourceArg++
}
if lastSourceArg == 0 {
return fmt.Errorf("gopherjs run: no go files listed")
}
tempfile, err := ioutil.TempFile(currentDirectory, filepath.Base(args[0])+".")
if err != nil && strings.HasPrefix(currentDirectory, runtime.GOROOT()) {
tempfile, err = ioutil.TempFile("", filepath.Base(args[0])+".")
}
if err != nil {
return err
}
defer func() {
tempfile.Close()
os.Remove(tempfile.Name())
os.Remove(tempfile.Name() + ".map")
}()
s := gbuild.NewSession(options)
if err := s.BuildFiles(args[:lastSourceArg], tempfile.Name(), currentDirectory); err != nil {
return err
}
if err := runNode(tempfile.Name(), args[lastSourceArg:], "", options.Quiet); err != nil {
return err
}
return nil
}()
exitCode := handleError(err, options, nil)
os.Exit(exitCode)
}
cmdTest := &cobra.Command{
Use: "test [packages]",
Short: "test packages",
}
bench := cmdTest.Flags().String("bench", "", "Run benchmarks matching the regular expression. By default, no benchmarks run. To run all benchmarks, use '--bench=.'.")
benchtime := cmdTest.Flags().String("benchtime", "", "Run enough iterations of each benchmark to take t, specified as a time.Duration (for example, -benchtime 1h30s). The default is 1 second (1s).")
count := cmdTest.Flags().String("count", "", "Run each test and benchmark n times (default 1). Examples are always run once.")
run := cmdTest.Flags().String("run", "", "Run only those tests and examples matching the regular expression.")
short := cmdTest.Flags().Bool("short", false, "Tell long-running tests to shorten their run time.")
verbose := cmdTest.Flags().BoolP("verbose", "v", false, "Log all tests as they are run. Also print all text from Log and Logf calls even if the test succeeds.")
compileOnly := cmdTest.Flags().BoolP("compileonly", "c", false, "Compile the test binary to pkg.test.js but do not run it (where pkg is the last element of the package's import path). The file name can be changed with the -o flag.")
outputFilename := cmdTest.Flags().StringP("output", "o", "", "Compile the test binary to the named file. The test still runs (unless -c is specified).")
cmdTest.Flags().AddFlagSet(compilerFlags)
cmdTest.Run = func(cmd *cobra.Command, args []string) {
options.BuildTags = strings.Fields(tags)
err := func() error {
// Expand import path patterns.
patternContext := gbuild.NewBuildContext("", options.BuildTags)
args = (&gotool.Context{BuildContext: *patternContext}).ImportPaths(args)
pkgs := make([]*gbuild.PackageData, len(args))
for i, pkgPath := range args {
var err error
pkgs[i], err = gbuild.Import(pkgPath, 0, "", options.BuildTags)
if err != nil {
return err
}
}
var exitErr error
for _, pkg := range pkgs {
if len(pkg.TestGoFiles) == 0 && len(pkg.XTestGoFiles) == 0 {
fmt.Printf("? \t%s\t[no test files]\n", pkg.ImportPath)
continue
}
s := gbuild.NewSession(options)
tests := &testFuncs{Package: pkg.Package}
collectTests := func(testPkg *gbuild.PackageData, testPkgName string, needVar *bool) error {
if testPkgName == "_test" {
for _, file := range pkg.TestGoFiles {
if err := tests.load(filepath.Join(pkg.Package.Dir, file), testPkgName, &tests.ImportTest, &tests.NeedTest); err != nil {
return err
}
}
} else {
for _, file := range pkg.XTestGoFiles {
if err := tests.load(filepath.Join(pkg.Package.Dir, file), "_xtest", &tests.ImportXtest, &tests.NeedXtest); err != nil {
return err
}
}
}
_, err := s.BuildPackage(testPkg)
return err
}
if err := collectTests(&gbuild.PackageData{
Package: &build.Package{
ImportPath: pkg.ImportPath,
Dir: pkg.Dir,
GoFiles: append(pkg.GoFiles, pkg.TestGoFiles...),
Imports: append(pkg.Imports, pkg.TestImports...),
},
IsTest: true,
JSFiles: pkg.JSFiles,
}, "_test", &tests.NeedTest); err != nil {
return err
}
if err := collectTests(&gbuild.PackageData{
Package: &build.Package{
ImportPath: pkg.ImportPath + "_test",
Dir: pkg.Dir,
GoFiles: pkg.XTestGoFiles,
Imports: pkg.XTestImports,
},
IsTest: true,
}, "_xtest", &tests.NeedXtest); err != nil {
return err
}
buf := new(bytes.Buffer)
if err := testmainTmpl.Execute(buf, tests); err != nil {
return err
}
fset := token.NewFileSet()
mainFile, err := parser.ParseFile(fset, "_testmain.go", buf, 0)
if err != nil {
return err
}
importContext := &compiler.ImportContext{
Packages: s.Types,
Import: func(path string) (*compiler.Archive, error) {
if path == pkg.ImportPath || path == pkg.ImportPath+"_test" {
return s.Archives[path], nil
}
return s.BuildImportPath(path)
},
}
mainPkgArchive, err := compiler.Compile("main", []*ast.File{mainFile}, fset, importContext, options.Minify)
if err != nil {
return err
}
if *compileOnly && *outputFilename == "" {
*outputFilename = pkg.Package.Name + "_test.js"
}
var outfile *os.File
if *outputFilename != "" {
outfile, err = os.Create(*outputFilename)
if err != nil {
return err
}
} else {
outfile, err = ioutil.TempFile(currentDirectory, "test.")
if err != nil {
return err
}
}
defer func() {
outfile.Close()
if *outputFilename == "" {
os.Remove(outfile.Name())
os.Remove(outfile.Name() + ".map")
}
}()
if err := s.WriteCommandPackage(mainPkgArchive, outfile.Name()); err != nil {
return err
}
if *compileOnly {
continue
}
var args []string
if *bench != "" {
args = append(args, "-test.bench", *bench)
}
if *benchtime != "" {
args = append(args, "-test.benchtime", *benchtime)
}
if *count != "" {
args = append(args, "-test.count", *count)
}
if *run != "" {
args = append(args, "-test.run", *run)
}
if *short {
args = append(args, "-test.short")
}
if *verbose {
args = append(args, "-test.v")
}
status := "ok "
start := time.Now()
if err := runNode(outfile.Name(), args, pkg.Dir, options.Quiet); err != nil {
if _, ok := err.(*exec.ExitError); !ok {
return err
}
exitErr = err
status = "FAIL"
}
fmt.Printf("%s\t%s\t%.3fs\n", status, pkg.ImportPath, time.Since(start).Seconds())
}
return exitErr
}()
exitCode := handleError(err, options, nil)
os.Exit(exitCode)
}
cmdServe := &cobra.Command{
Use: "serve [root]",
Short: "compile on-the-fly and serve",
}
cmdServe.Flags().AddFlagSet(flagVerbose)
cmdServe.Flags().AddFlagSet(flagQuiet)
cmdServe.Flags().AddFlagSet(compilerFlags)
var addr string
cmdServe.Flags().StringVarP(&addr, "http", "", ":8080", "HTTP bind address to serve")
cmdServe.Run = func(cmd *cobra.Command, args []string) {
options.BuildTags = strings.Fields(tags)
dirs := append(filepath.SplitList(build.Default.GOPATH), build.Default.GOROOT)
var root string
if len(args) > 1 {
cmdServe.HelpFunc()(cmd, args)
os.Exit(1)
}
if len(args) == 1 {
root = args[0]
}
sourceFiles := http.FileServer(serveCommandFileSystem{
serveRoot: root,
options: options,
dirs: dirs,
sourceMaps: make(map[string][]byte),
})
ln, err := net.Listen("tcp", addr)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
if tcpAddr := ln.Addr().(*net.TCPAddr); tcpAddr.IP.Equal(net.IPv4zero) || tcpAddr.IP.Equal(net.IPv6zero) { // Any available addresses.
fmt.Printf("serving at http://localhost:%d and on port %d of any available addresses\n", tcpAddr.Port, tcpAddr.Port)
} else { // Specific address.
fmt.Printf("serving at http://%s\n", tcpAddr)
}
fmt.Fprintln(os.Stderr, http.Serve(tcpKeepAliveListener{ln.(*net.TCPListener)}, sourceFiles))
}
cmdVersion := &cobra.Command{
Use: "version",
Short: "print GopherJS compiler version",
}
cmdVersion.Run = func(cmd *cobra.Command, args []string) {
if len(args) > 0 {
cmdServe.HelpFunc()(cmd, args)
os.Exit(1)
}
fmt.Printf("GopherJS %s\n", compiler.Version)
}
rootCmd := &cobra.Command{
Use: "gopherjs",
Long: "GopherJS is a tool for compiling Go source code to JavaScript.",
}
rootCmd.AddCommand(cmdBuild, cmdGet, cmdInstall, cmdRun, cmdTest, cmdServe, cmdVersion, cmdDoc)
err := rootCmd.Execute()
if err != nil {
os.Exit(2)
}
}
// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted
// connections. It's used by ListenAndServe and ListenAndServeTLS so
// dead TCP connections (e.g. closing laptop mid-download) eventually
// go away.
type tcpKeepAliveListener struct {
*net.TCPListener
}
func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
tc, err := ln.AcceptTCP()
if err != nil {
return
}
tc.SetKeepAlive(true)
tc.SetKeepAlivePeriod(3 * time.Minute)
return tc, nil
}
type serveCommandFileSystem struct {
serveRoot string
options *gbuild.Options
dirs []string
sourceMaps map[string][]byte
}
func (fs serveCommandFileSystem) Open(requestName string) (http.File, error) {
name := path.Join(fs.serveRoot, requestName[1:]) // requestName[0] == '/'
dir, file := path.Split(name)
base := path.Base(dir) // base is parent folder name, which becomes the output file name.
isPkg := file == base+".js"
isMap := file == base+".js.map"
isIndex := file == "index.html"
if isPkg || isMap || isIndex {
// If we're going to be serving our special files, make sure there's a Go command in this folder.
s := gbuild.NewSession(fs.options)
pkg, err := gbuild.Import(path.Dir(name), 0, s.InstallSuffix(), fs.options.BuildTags)
if err != nil || pkg.Name != "main" {
isPkg = false
isMap = false
isIndex = false
}
switch {
case isPkg:
buf := new(bytes.Buffer)
browserErrors := new(bytes.Buffer)
err := func() error {
archive, err := s.BuildPackage(pkg)
if err != nil {
return err
}
sourceMapFilter := &compiler.SourceMapFilter{Writer: buf}
m := &sourcemap.Map{File: base + ".js"}
sourceMapFilter.MappingCallback = gbuild.NewMappingCallback(m, fs.options.GOROOT, fs.options.GOPATH, fs.options.MapToLocalDisk)
deps, err := compiler.ImportDependencies(archive, s.BuildImportPath)
if err != nil {
return err
}
if err := compiler.WriteProgramCode(deps, sourceMapFilter); err != nil {
return err
}
mapBuf := new(bytes.Buffer)
m.WriteTo(mapBuf)
buf.WriteString("//# sourceMappingURL=" + base + ".js.map\n")
fs.sourceMaps[name+".map"] = mapBuf.Bytes()
return nil
}()
handleError(err, fs.options, browserErrors)
if err != nil {
buf = browserErrors
}
return newFakeFile(base+".js", buf.Bytes()), nil
case isMap:
if content, ok := fs.sourceMaps[name]; ok {
return newFakeFile(base+".js.map", content), nil
}
}
}
for _, d := range fs.dirs {
dir := http.Dir(filepath.Join(d, "src"))
f, err := dir.Open(name)
if err == nil {
return f, nil
}
// source maps are served outside of serveRoot
f, err = dir.Open(requestName)
if err == nil {
return f, nil
}
}
if isIndex {
// If there was no index.html file in any dirs, supply our own.
return newFakeFile("index.html", []byte(`<html><head><meta charset="utf-8"><script src="`+base+`.js"></script></head><body></body></html>`)), nil
}
return nil, os.ErrNotExist
}
type fakeFile struct {
name string
size int
io.ReadSeeker
}
func newFakeFile(name string, content []byte) *fakeFile {
return &fakeFile{name: name, size: len(content), ReadSeeker: bytes.NewReader(content)}
}
func (f *fakeFile) Close() error {
return nil
}
func (f *fakeFile) Readdir(count int) ([]os.FileInfo, error) {
return nil, os.ErrInvalid
}
func (f *fakeFile) Stat() (os.FileInfo, error) {
return f, nil
}
func (f *fakeFile) Name() string {
return f.name
}
func (f *fakeFile) Size() int64 {
return int64(f.size)
}
func (f *fakeFile) Mode() os.FileMode {
return 0
}
func (f *fakeFile) ModTime() time.Time {
return time.Time{}
}
func (f *fakeFile) IsDir() bool {
return false
}
func (f *fakeFile) Sys() interface{} {
return nil
}
// handleError handles err and returns an appropriate exit code.
// If browserErrors is non-nil, errors are written for presentation in browser.
func handleError(err error, options *gbuild.Options, browserErrors *bytes.Buffer) int {
switch err := err.(type) {
case nil:
return 0
case compiler.ErrorList:
for _, entry := range err {
printError(entry, options, browserErrors)
}
return 1
case *exec.ExitError:
return err.Sys().(syscall.WaitStatus).ExitStatus()
default:
printError(err, options, browserErrors)
return 1
}
}
// printError prints err to Stderr with options. If browserErrors is non-nil, errors are also written for presentation in browser.
func printError(err error, options *gbuild.Options, browserErrors *bytes.Buffer) {
e := sprintError(err)
options.PrintError("%s\n", e)
if browserErrors != nil {
fmt.Fprintln(browserErrors, `console.error("`+template.JSEscapeString(e)+`");`)
}
}
// sprintError returns an annotated error string without trailing newline.
func sprintError(err error) string {
makeRel := func(name string) string {
if relname, err := filepath.Rel(currentDirectory, name); err == nil {
return relname
}
return name
}
switch e := err.(type) {
case *scanner.Error:
return fmt.Sprintf("%s:%d:%d: %s", makeRel(e.Pos.Filename), e.Pos.Line, e.Pos.Column, e.Msg)
case types.Error:
pos := e.Fset.Position(e.Pos)
return fmt.Sprintf("%s:%d:%d: %s", makeRel(pos.Filename), pos.Line, pos.Column, e.Msg)
default:
return fmt.Sprintf("%s", e)
}
}
func runNode(script string, args []string, dir string, quiet bool) error {
var allArgs []string
if b, _ := strconv.ParseBool(os.Getenv("SOURCE_MAP_SUPPORT")); os.Getenv("SOURCE_MAP_SUPPORT") == "" || b {
allArgs = []string{"--require", "source-map-support/register"}
if err := exec.Command("node", "--require", "source-map-support/register", "--eval", "").Run(); err != nil {
if !quiet {
fmt.Fprintln(os.Stderr, "gopherjs: Source maps disabled. Install source-map-support module for nice stack traces. See https://github.com/gopherjs/gopherjs#gopherjs-run-gopherjs-test.")
}
allArgs = []string{}
}
}
if runtime.GOOS != "windows" {
// We've seen issues with stack space limits causing
// recursion-heavy standard library tests to fail (e.g., see
// https://github.com/gopherjs/gopherjs/pull/669#issuecomment-319319483).
//
// There are two separate limits in non-Windows environments:
//
// - OS process limit
// - Node.js (V8) limit
//
// GopherJS fetches the current OS process limit, and sets the
// Node.js limit to the same value. So both limits are kept in sync
// and can be controlled by setting OS process limit. E.g.:
//
// ulimit -s 10000 && gopherjs test
//
cur, err := sysutil.RlimitStack()
if err != nil {
return fmt.Errorf("failed to get stack size limit: %v", err)
}
allArgs = append(allArgs, fmt.Sprintf("--stack_size=%v", cur/1000)) // Convert from bytes to KB.
}
allArgs = append(allArgs, script)
allArgs = append(allArgs, args...)
node := exec.Command("node", allArgs...)
node.Dir = dir
node.Stdin = os.Stdin
node.Stdout = os.Stdout
node.Stderr = os.Stderr
err := node.Run()
if _, ok := err.(*exec.ExitError); err != nil && !ok {
err = fmt.Errorf("could not run Node.js: %s", err.Error())
}
return err
}
type testFuncs struct {
Tests []testFunc
Benchmarks []testFunc
Examples []testFunc
TestMain *testFunc
Package *build.Package
ImportTest bool
NeedTest bool
ImportXtest bool
NeedXtest bool
}
type testFunc struct {
Package string // imported package name (_test or _xtest)
Name string // function name
Output string // output, for examples
Unordered bool // output is allowed to be unordered.
}
var testFileSet = token.NewFileSet()
func (t *testFuncs) load(filename, pkg string, doImport, seen *bool) error {
f, err := parser.ParseFile(testFileSet, filename, nil, parser.ParseComments)
if err != nil {
return err
}
for _, d := range f.Decls {
n, ok := d.(*ast.FuncDecl)
if !ok {
continue
}
if n.Recv != nil {
continue
}
name := n.Name.String()
switch {
case isTestMain(n):
if t.TestMain != nil {
return errors.New("multiple definitions of TestMain")
}
t.TestMain = &testFunc{pkg, name, "", false}
*doImport, *seen = true, true
case isTest(name, "Test"):
t.Tests = append(t.Tests, testFunc{pkg, name, "", false})
*doImport, *seen = true, true
case isTest(name, "Benchmark"):
t.Benchmarks = append(t.Benchmarks, testFunc{pkg, name, "", false})
*doImport, *seen = true, true
}
}
ex := doc.Examples(f)
sort.Sort(byOrder(ex))
for _, e := range ex {
*doImport = true // import test file whether executed or not
if e.Output == "" && !e.EmptyOutput {
// Don't run examples with no output.
continue
}
t.Examples = append(t.Examples, testFunc{pkg, "Example" + e.Name, e.Output, e.Unordered})
*seen = true
}
return nil
}
type byOrder []*doc.Example
func (x byOrder) Len() int { return len(x) }
func (x byOrder) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byOrder) Less(i, j int) bool { return x[i].Order < x[j].Order }
// isTestMain tells whether fn is a TestMain(m *testing.M) function.
func isTestMain(fn *ast.FuncDecl) bool {
if fn.Name.String() != "TestMain" ||
fn.Type.Results != nil && len(fn.Type.Results.List) > 0 ||
fn.Type.Params == nil ||
len(fn.Type.Params.List) != 1 ||
len(fn.Type.Params.List[0].Names) > 1 {
return false
}
ptr, ok := fn.Type.Params.List[0].Type.(*ast.StarExpr)
if !ok {
return false
}
// We can't easily check that the type is *testing.M
// because we don't know how testing has been imported,
// but at least check that it's *M or *something.M.
if name, ok := ptr.X.(*ast.Ident); ok && name.Name == "M" {
return true
}
if sel, ok := ptr.X.(*ast.SelectorExpr); ok && sel.Sel.Name == "M" {
return true
}
return false
}
// isTest tells whether name looks like a test (or benchmark, according to prefix).
// It is a Test (say) if there is a character after Test that is not a lower-case letter.
// We don't want TesticularCancer.
func isTest(name, prefix string) bool {
if !strings.HasPrefix(name, prefix) {
return false
}
if len(name) == len(prefix) { // "Test" is ok
return true
}
rune, _ := utf8.DecodeRuneInString(name[len(prefix):])
return !unicode.IsLower(rune)
}
var testmainTmpl = template.Must(template.New("main").Parse(`
package main
import (
{{if not .TestMain}}
"os"
{{end}}
"testing"
"testing/internal/testdeps"
{{if .ImportTest}}
{{if .NeedTest}}_test{{else}}_{{end}} {{.Package.ImportPath | printf "%q"}}
{{end}}
{{if .ImportXtest}}
{{if .NeedXtest}}_xtest{{else}}_{{end}} {{.Package.ImportPath | printf "%s_test" | printf "%q"}}
{{end}}
)
var tests = []testing.InternalTest{
{{range .Tests}}
{"{{.Name}}", {{.Package}}.{{.Name}}},
{{end}}
}
var benchmarks = []testing.InternalBenchmark{
{{range .Benchmarks}}
{"{{.Name}}", {{.Package}}.{{.Name}}},
{{end}}
}
var examples = []testing.InternalExample{
{{range .Examples}}
{"{{.Name}}", {{.Package}}.{{.Name}}, {{.Output | printf "%q"}}, {{.Unordered}}},
{{end}}
}
func main() {
m := testing.MainStart(testdeps.TestDeps{}, tests, benchmarks, examples)
{{with .TestMain}}
{{.Package}}.{{.Name}}(m)
{{else}}
os.Exit(m.Run())
{{end}}
}
`))
|
[
"\"TERM\"",
"\"SOURCE_MAP_SUPPORT\"",
"\"SOURCE_MAP_SUPPORT\""
] |
[] |
[
"SOURCE_MAP_SUPPORT",
"TERM"
] |
[]
|
["SOURCE_MAP_SUPPORT", "TERM"]
|
go
| 2 | 0 | |
circle/install_cmake.py
|
"""
Usage::
import install_cmake
install_cmake.install()
"""
import os
import subprocess
import sys
import textwrap
from subprocess import CalledProcessError, check_output
DEFAULT_CMAKE_VERSION = "3.5.0"
def _log(*args):
script_name = os.path.basename(__file__)
print("[circle:%s] " % script_name + " ".join(args))
sys.stdout.flush()
def _check_executables_availability(executables):
"""Try to run each executable with the `--version` argument. If at least
one could not be executed, it raises :exception:`RuntimeError` suggesting
approaches to mitigate the problem.
"""
missing_executables = []
for executable_name in executables:
try:
subprocess.check_output([executable_name, "--version"])
except (OSError, CalledProcessError):
missing_executables.append(executable_name)
if missing_executables:
raise RuntimeError(textwrap.dedent(
"""
The following executables are required to install CMake:
{missing_executables}
Few options to address this:
(1) install the missing executables using the system package manager. For example:
sudo apt-get install {missing_executables}
(2) install CMake wheel using pip. For example:
pip install cmake
""".format(
missing_executables=" ".join(missing_executables),
)
))
def install(cmake_version=DEFAULT_CMAKE_VERSION):
"""Download and install CMake into ``/usr/local``."""
_check_executables_availability(["rsync", "tar", "wget"])
cmake_directory = "/usr/local"
cmake_exe = os.path.join(cmake_directory, 'bin/cmake')
if os.path.exists(cmake_exe):
output = check_output([cmake_exe, '--version']).decode("utf-8")
if output.strip() == cmake_version:
_log("Skipping download: Found %s (v%s)" % (
cmake_exe, cmake_version))
return
_log("Looking for cmake", cmake_version, "in PATH")
try:
output = check_output(
["cmake", "--version"]).decode("utf-8")
current_cmake_version = output.splitlines()[0]
if cmake_version in current_cmake_version:
_log(" ->", "found %s:" % current_cmake_version,
"skipping download: version matches expected one")
return
else:
_log(" ->", "found %s:" % current_cmake_version,
"not the expected version")
except (OSError, CalledProcessError):
_log(" ->", "not found")
pass
cmake_arch = "x86_64"
name = "cmake-{}-Linux-{}".format(cmake_version, cmake_arch)
cmake_package = "{}.tar.gz".format(name)
_log("Downloading", cmake_package)
download_dir = os.environ["HOME"] + "/downloads"
downloaded_package = os.path.join(download_dir, cmake_package)
if not os.path.exists(downloaded_package):
if not os.path.exists(download_dir):
os.makedirs(download_dir)
cmake_version_major = cmake_version.split(".")[0]
cmake_version_minor = cmake_version.split(".")[1]
try:
check_output([
"wget", "--no-check-certificate", "--progress=dot",
"https://cmake.org/files/v{}.{}/{}".format(cmake_version_major, cmake_version_minor, cmake_package),
"-O", downloaded_package
], stderr=subprocess.STDOUT)
except (OSError, CalledProcessError):
_check_executables_availability(['curl'])
check_output([
"curl", "--progress-bar", "-L",
"https://cmake.org/files/v{}.{}/{}".format(cmake_version_major, cmake_version_minor, cmake_package),
"-o", downloaded_package
], stderr=subprocess.STDOUT)
_log(" ->", "done")
else:
_log(" ->", "skipping download: found", downloaded_package)
_log("Extracting", downloaded_package)
check_output(["tar", "xzf", downloaded_package])
_log(" ->", "done")
_log("Installing", name, "into", cmake_directory)
check_output([
"sudo", "rsync", "-avz", name + "/", cmake_directory
])
_log(" ->", "done")
if __name__ == '__main__':
install(sys.argv[1] if len(sys.argv) > 1 else DEFAULT_CMAKE_VERSION)
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
plugins/org.jkiss.dbeaver.ui.app.standalone/src/org/jkiss/dbeaver/ui/app/standalone/DBeaverApplication.java
|
/*
* DBeaver - Universal Database Manager
* Copyright (C) 2010-2021 DBeaver Corp and others
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jkiss.dbeaver.ui.app.standalone;
import org.apache.commons.cli.CommandLine;
import org.eclipse.core.runtime.Platform;
import org.eclipse.equinox.app.IApplication;
import org.eclipse.equinox.app.IApplicationContext;
import org.eclipse.jface.window.Window;
import org.eclipse.osgi.service.datalocation.Location;
import org.eclipse.swt.SWT;
import org.eclipse.swt.widgets.Display;
import org.eclipse.swt.widgets.Listener;
import org.eclipse.swt.widgets.MessageBox;
import org.eclipse.swt.widgets.Shell;
import org.eclipse.ui.IWorkbench;
import org.eclipse.ui.IWorkbenchPreferenceConstants;
import org.eclipse.ui.PlatformUI;
import org.eclipse.ui.internal.WorkbenchPlugin;
import org.eclipse.ui.internal.ide.ChooseWorkspaceData;
import org.eclipse.ui.internal.ide.ChooseWorkspaceDialog;
import org.jkiss.code.NotNull;
import org.jkiss.dbeaver.DBeaverPreferences;
import org.jkiss.dbeaver.Log;
import org.jkiss.dbeaver.model.DBConstants;
import org.jkiss.dbeaver.model.app.DBASecureStorage;
import org.jkiss.dbeaver.model.app.DBPApplicationController;
import org.jkiss.dbeaver.model.impl.app.DefaultSecureStorage;
import org.jkiss.dbeaver.model.preferences.DBPPreferenceStore;
import org.jkiss.dbeaver.registry.BaseApplicationImpl;
import org.jkiss.dbeaver.registry.BaseWorkspaceImpl;
import org.jkiss.dbeaver.registry.updater.VersionDescriptor;
import org.jkiss.dbeaver.runtime.DBWorkbench;
import org.jkiss.dbeaver.ui.UIUtils;
import org.jkiss.dbeaver.ui.app.standalone.rpc.DBeaverInstanceServer;
import org.jkiss.dbeaver.ui.app.standalone.rpc.IInstanceController;
import org.jkiss.dbeaver.ui.app.standalone.rpc.InstanceClient;
import org.jkiss.dbeaver.ui.app.standalone.update.VersionUpdateDialog;
import org.jkiss.dbeaver.utils.GeneralUtils;
import org.jkiss.dbeaver.utils.RuntimeUtils;
import org.jkiss.dbeaver.utils.SystemVariablesResolver;
import org.jkiss.utils.ArrayUtils;
import org.jkiss.utils.CommonUtils;
import org.jkiss.utils.IOUtils;
import org.jkiss.utils.StandardConstants;
import org.osgi.framework.Version;
import java.io.*;
import java.lang.reflect.Field;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.util.*;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* This class controls all aspects of the application's execution
*/
public class DBeaverApplication extends BaseApplicationImpl implements DBPApplicationController {
private static final Log log = Log.getLog(DBeaverApplication.class);
public static final String APPLICATION_PLUGIN_ID = "org.jkiss.dbeaver.ui.app.standalone";
public static final String WORKSPACE_DIR_LEGACY = "${user.home}/.dbeaver"; //$NON-NLS-1$
public static final String WORKSPACE_DIR_4 = "${user.home}/.dbeaver4"; //$NON-NLS-1$
public static final String WORKSPACE_DIR_6; //$NON-NLS-1$
private static final Path FILE_WITH_WORKSPACES;
public static final String DBEAVER_DATA_DIR = "DBeaverData";
public static final String WORKSPACE_DIR_CURRENT;
public static final String[] WORKSPACE_DIR_PREVIOUS = {
WORKSPACE_DIR_4,
WORKSPACE_DIR_LEGACY};
static final String VERSION_PROP_PRODUCT_NAME = "product-name";
static final String VERSION_PROP_PRODUCT_VERSION = "product-version";
private static final String PROP_EXIT_DATA = IApplicationContext.EXIT_DATA_PROPERTY; //$NON-NLS-1$
private static final String PROP_EXIT_CODE = "eclipse.exitcode"; //$NON-NLS-1$
static boolean WORKSPACE_MIGRATED = false;
static DBeaverApplication instance;
private boolean exclusiveMode = false;
private boolean reuseWorkspace = false;
private boolean primaryInstance = true;
private boolean headlessMode = false;
private IInstanceController instanceServer;
private OutputStream debugWriter;
private PrintStream oldSystemOut;
private PrintStream oldSystemErr;
private Display display = null;
private boolean resetUIOnRestart, resetWorkspaceOnRestart;
private long lastUserActivityTime = -1;
static {
// Explicitly set UTF-8 as default file encoding
// In some places Eclipse reads this property directly.
//System.setProperty(StandardConstants.ENV_FILE_ENCODING, GeneralUtils.UTF8_ENCODING);
// Detect default workspace location
// Since 6.1.3 it is different for different OSes
// Windows: %AppData%/DBeaverData
// MacOS: ~/Library/DBeaverData
// Linux: $XDG_DATA_HOME/DBeaverData
String osName = (System.getProperty("os.name")).toUpperCase();
String workingDirectory;
if (osName.contains("WIN")) {
String appData = System.getenv("AppData");
if (appData == null) {
appData = System.getProperty("user.home");
}
workingDirectory = appData + "\\" + DBEAVER_DATA_DIR;
} else if (osName.contains("MAC")) {
workingDirectory = System.getProperty("user.home") + "/Library/" + DBEAVER_DATA_DIR;
} else {
// Linux
String dataHome = System.getProperty("XDG_DATA_HOME");
if (dataHome == null) {
dataHome = System.getProperty("user.home") + "/.local/share";
}
String badWorkingDir = dataHome + "/." + DBEAVER_DATA_DIR;
String goodWorkingDir = dataHome + "/" + DBEAVER_DATA_DIR;
if (!new File(goodWorkingDir).exists() && new File(badWorkingDir).exists()) {
// Let's use bad working dir if it exists (#6316)
workingDirectory = badWorkingDir;
} else {
workingDirectory = goodWorkingDir;
}
}
// Workspace dir
WORKSPACE_DIR_6 = new File(workingDirectory, "workspace6").getAbsolutePath();
WORKSPACE_DIR_CURRENT = WORKSPACE_DIR_6;
FILE_WITH_WORKSPACES = Paths.get(workingDirectory, ".workspaces"); //$NON-NLS-1$
}
/**
* Gets singleton instance of DBeaver application
* @return application or null if application wasn't started or was stopped.
*/
public static DBeaverApplication getInstance() {
return instance;
}
@Override
public long getLastUserActivityTime() {
return lastUserActivityTime;
}
@Override
public Object start(IApplicationContext context) {
instance = this;
Location instanceLoc = Platform.getInstanceLocation();
CommandLine commandLine = DBeaverCommandLine.getCommandLine();
{
String defaultHomePath = getDefaultInstanceLocation();
if (DBeaverCommandLine.handleCommandLine(commandLine, defaultHomePath)) {
System.err.println("Commands processed. Exit " + GeneralUtils.getProductName() + ".");
return IApplication.EXIT_OK;
}
}
boolean ideWorkspaceSet = setIDEWorkspace(instanceLoc);
{
// Lock the workspace
try {
if (!instanceLoc.isSet()) {
if (!setDefaultWorkspacePath(instanceLoc)) {
return IApplication.EXIT_OK;
}
} else if (instanceLoc.isLocked() && !ideWorkspaceSet && !isExclusiveMode()) {
// Check for locked workspace
if (!setDefaultWorkspacePath(instanceLoc)) {
return IApplication.EXIT_OK;
}
}
if (isExclusiveMode()) {
markLocationReadOnly(instanceLoc);
} else {
// Lock the workspace
if (!instanceLoc.isLocked()) {
instanceLoc.lock();
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
// Custom parameters
try {
headlessMode = true;
if (DBeaverCommandLine.handleCustomParameters(commandLine)) {
return IApplication.EXIT_OK;
}
} finally {
headlessMode = false;
}
if (isExclusiveMode()) {
// In shared mode we mustn't run UI
return IApplication.EXIT_OK;
}
initDebugWriter();
updateSplashHandler();
final Runtime runtime = Runtime.getRuntime();
// Init Core plugin and mark it as standalone version
log.debug(GeneralUtils.getProductName() + " " + GeneralUtils.getProductVersion() + " is starting"); //$NON-NLS-1$
log.debug("OS: " + System.getProperty(StandardConstants.ENV_OS_NAME) + " " + System.getProperty(StandardConstants.ENV_OS_VERSION) + " (" + System.getProperty(StandardConstants.ENV_OS_ARCH) + ")");
log.debug("Java version: " + System.getProperty(StandardConstants.ENV_JAVA_VERSION) + " by " + System.getProperty(StandardConstants.ENV_JAVA_VENDOR) + " (" + System.getProperty(StandardConstants.ENV_JAVA_ARCH) + "bit)");
log.debug("Install path: '" + SystemVariablesResolver.getInstallPath() + "'"); //$NON-NLS-1$ //$NON-NLS-2$
log.debug("Instance path: '" + instanceLoc.getURL() + "'"); //$NON-NLS-1$ //$NON-NLS-2$
log.debug("Memory available " + (runtime.totalMemory() / (1024 * 1024)) + "Mb/" + (runtime.maxMemory() / (1024 * 1024)) + "Mb");
// Write version info
writeWorkspaceInfo();
initializeApplication();
// Run instance server
instanceServer = DBeaverInstanceServer.startInstanceServer(commandLine, createInstanceController());
// Prefs default
PlatformUI.getPreferenceStore().setDefault(
IWorkbenchPreferenceConstants.KEY_CONFIGURATION_ID,
ApplicationWorkbenchAdvisor.DBEAVER_SCHEME_NAME);
try {
log.debug("Run workbench");
getDisplay();
int returnCode = PlatformUI.createAndRunWorkbench(display, createWorkbenchAdvisor());
if (resetUIOnRestart || resetWorkspaceOnRestart) {
resetUISettings(instanceLoc);
}
if (resetWorkspaceOnRestart) {
// FIXME: ???
}
// Copy-pasted from IDEApplication
// Magic with exit codes to let Eclipse starter switcg workspace
// the workbench doesn't support relaunch yet (bug 61809) so
// for now restart is used, and exit data properties are checked
// here to substitute in the relaunch return code if needed
if (returnCode != PlatformUI.RETURN_RESTART) {
return EXIT_OK;
}
// if the exit code property has been set to the relaunch code, then
// return that code now, otherwise this is a normal restart
return EXIT_RELAUNCH.equals(Integer.getInteger(PROP_EXIT_CODE)) ? EXIT_RELAUNCH
: EXIT_RESTART;
} catch (Throwable e) {
log.debug("Internal error in workbench lifecycle", e);
return IApplication.EXIT_OK;
} finally {
shutdown();
/*
try {
Job.getJobManager().join(null, new NullProgressMonitor());
}
catch (InterruptedException e) {
e.printStackTrace();
}
*/
display.dispose();
display = null;
}
}
private void markLocationReadOnly(Location instanceLoc) {
try {
Field isReadOnlyField = instanceLoc.getClass().getDeclaredField("isReadOnly");
isReadOnlyField.setAccessible(true);
isReadOnlyField.set(instanceLoc, true);
} catch (Throwable e) {
// ignore
e.printStackTrace();
}
}
private static boolean setIDEWorkspace(@NotNull Location instanceLoc) {
if (instanceLoc.isSet()) {
return false;
}
Collection<String> recentWorkspaces = getRecentWorkspaces(instanceLoc);
if (recentWorkspaces.isEmpty()) {
return false;
}
String lastWorkspace = recentWorkspaces.iterator().next();
if (!CommonUtils.isEmpty(lastWorkspace) && !WORKSPACE_DIR_CURRENT.equals(lastWorkspace)) {
try {
final URL selectedWorkspaceURL = new URL(
"file", //$NON-NLS-1$
null,
lastWorkspace);
instanceLoc.set(selectedWorkspaceURL, true);
return true;
} catch (Exception e) {
System.err.println("Can't set IDE workspace to '" + lastWorkspace + "'");
e.printStackTrace();
}
}
return false;
}
@NotNull
private static Collection<String> getRecentWorkspaces(@NotNull Location instanceLoc) {
ChooseWorkspaceData launchData = new ChooseWorkspaceData(instanceLoc.getDefault());
String[] arrayOfRecentWorkspaces = launchData.getRecentWorkspaces();
Collection<String> recentWorkspaces;
int maxSize;
if (arrayOfRecentWorkspaces == null) {
maxSize = 0;
recentWorkspaces = new ArrayList<>();
} else {
maxSize = arrayOfRecentWorkspaces.length;
recentWorkspaces = new ArrayList<>(Arrays.asList(arrayOfRecentWorkspaces));
}
recentWorkspaces.removeIf(Objects::isNull);
Collection<String> backedUpWorkspaces = getBackedUpWorkspaces();
if (recentWorkspaces.equals(backedUpWorkspaces) && backedUpWorkspaces.contains(WORKSPACE_DIR_CURRENT)) {
return backedUpWorkspaces;
}
List<String> workspaces = Stream.concat(recentWorkspaces.stream(), backedUpWorkspaces.stream())
.distinct()
.limit(maxSize)
.collect(Collectors.toList());
if (!recentWorkspaces.contains(WORKSPACE_DIR_CURRENT)) {
if (recentWorkspaces.size() < maxSize) {
recentWorkspaces.add(WORKSPACE_DIR_CURRENT);
} else if (maxSize > 1) {
workspaces.set(recentWorkspaces.size() - 1, WORKSPACE_DIR_CURRENT);
}
}
launchData.setRecentWorkspaces(Arrays.copyOf(workspaces.toArray(new String[0]), maxSize));
launchData.writePersistedData();
saveWorkspacesToBackup(workspaces);
return workspaces;
}
@NotNull
private static Collection<String> getBackedUpWorkspaces() {
if (!Files.exists(FILE_WITH_WORKSPACES)) {
return Collections.emptyList();
}
try {
return Files.readAllLines(FILE_WITH_WORKSPACES);
} catch (IOException e) {
System.err.println("Unable to read backed up workspaces"); //$NON-NLS-1$
e.printStackTrace();
return Collections.emptyList();
}
}
private static void saveWorkspacesToBackup(@NotNull Iterable<? extends CharSequence> workspaces) {
try {
Files.write(FILE_WITH_WORKSPACES, workspaces, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING);
} catch (IOException e) {
System.err.println("Unable to save backed up workspaces"); //$NON-NLS-1$
e.printStackTrace();
}
}
private String getDefaultInstanceLocation() {
String defaultHomePath = WORKSPACE_DIR_CURRENT;
Location instanceLoc = Platform.getInstanceLocation();
if (instanceLoc.isSet()) {
defaultHomePath = instanceLoc.getURL().getFile();
}
return defaultHomePath;
}
private void updateSplashHandler() {
if (ArrayUtils.contains(Platform.getApplicationArgs(), "-nosplash")) {
return;
}
try {
getDisplay();
// look and see if there's a splash shell we can parent off of
Shell shell = WorkbenchPlugin.getSplashShell(display);
if (shell != null) {
// should set the icon and message for this shell to be the
// same as the chooser dialog - this will be the guy that lives in
// the task bar and without these calls you'd have the default icon
// with no message.
shell.setText(ChooseWorkspaceDialog.getWindowTitle());
shell.setImages(Window.getDefaultImages());
}
} catch (Throwable e) {
e.printStackTrace(System.err);
System.err.println("Error updating splash shell");
}
Log.addListener((message, t) -> DBeaverSplashHandler.showMessage(CommonUtils.toString(message)));
}
protected IInstanceController createInstanceController() {
return new DBeaverInstanceServer();
}
private void resetUISettings(Location instanceLoc) {
try {
File instanceDir = new File(instanceLoc.getURL().toURI());
if (instanceDir.exists()) {
File settingsFile = new File(instanceDir, ".metadata/.plugins/org.eclipse.e4.workbench/workbench.xmi");
if (settingsFile.exists()) {
settingsFile.deleteOnExit();
}
//markFoldertoDelete(new File(instanceDir, ".metadata/.plugins/org.eclipse.core.resources/.root"));
//markFoldertoDelete(new File(instanceDir, ".metadata/.plugins/org.eclipse.core.resources/.safetable"));
}
} catch (Throwable e) {
log.error("Error resetting UI settings", e);
}
}
private void markFoldertoDelete(File folder) {
if (!folder.exists()) {
return;
}
File[] files = folder.listFiles();
if (files != null) {
for (File file : files) {
if (file.isDirectory()) {
markFoldertoDelete(file);
} else {
log.debug("Delete resource file " + file.getAbsolutePath());
file.deleteOnExit();
}
}
}
folder.deleteOnExit();
}
/**
* May be overrided in implementors
*/
protected void initializeApplication() {
}
private Display getDisplay() {
if (display == null) {
log.debug("Create display");
// Set display name at the very beginning (#609)
// This doesn't initialize display - just sets default title
Display.setAppName(GeneralUtils.getProductName());
display = Display.getCurrent();
if (display == null) {
display = PlatformUI.createDisplay();
}
addIdleListeners();
}
return display;
}
private void addIdleListeners() {
int [] events = {SWT.KeyDown, SWT.KeyUp, SWT.MouseDown, SWT.MouseMove, SWT.MouseUp, SWT.MouseWheel};
Listener idleListener = event -> lastUserActivityTime = System.currentTimeMillis();
for (int event : events) {
display.addFilter(event, idleListener);
}
}
private boolean setDefaultWorkspacePath(Location instanceLoc) {
String defaultHomePath = WORKSPACE_DIR_CURRENT;
final Path homeDir = Path.of(defaultHomePath);
try {
if (!Files.exists(homeDir) || Files.list(homeDir).count() == 0) {
Path previousVersionWorkspaceDir = null;
for (String oldDir : WORKSPACE_DIR_PREVIOUS) {
oldDir = GeneralUtils.replaceSystemPropertyVariables(oldDir);
final Path oldWorkspaceDir = Path.of(oldDir);
if (Files.exists(oldWorkspaceDir) &&
Files.exists(GeneralUtils.getMetadataFolder(oldWorkspaceDir))) {
previousVersionWorkspaceDir = oldWorkspaceDir;
break;
}
}
if (previousVersionWorkspaceDir != null) {
DBeaverSettingsImporter importer = new DBeaverSettingsImporter(this, getDisplay());
if (!importer.migrateFromPreviousVersion(previousVersionWorkspaceDir.toFile(), homeDir.toFile())) {
return false;
}
}
}
} catch (Throwable e) {
log.error("Error migrating old workspace version", e);
}
try {
// Make URL manually because file.toURI().toURL() produces bad path (with %20).
final URL defaultHomeURL = new URL(
"file", //$NON-NLS-1$
null,
defaultHomePath);
boolean keepTrying = true;
while (keepTrying) {
if (instanceLoc.isLocked() || !instanceLoc.set(defaultHomeURL, true)) {
if (exclusiveMode || reuseWorkspace) {
instanceLoc.set(defaultHomeURL, false);
keepTrying = false;
primaryInstance = false;
} else {
// Can't lock specified path
int msgResult = showMessageBox(
"DBeaver - Can't lock workspace",
"Can't lock workspace at " + defaultHomePath + ".\n" +
"It seems that you have another DBeaver instance running.\n" +
"You may ignore it and work without lock but it is recommended to shutdown previous instance otherwise you may corrupt workspace data.",
SWT.ICON_WARNING | SWT.IGNORE | SWT.RETRY | SWT.ABORT);
switch (msgResult) {
case SWT.ABORT:
return false;
case SWT.IGNORE:
instanceLoc.set(defaultHomeURL, false);
keepTrying = false;
primaryInstance = false;
break;
case SWT.RETRY:
break;
}
}
} else {
break;
}
}
} catch (Throwable e) {
// Just skip it
// Error may occur if -data parameter was specified at startup
System.err.println("Can't switch workspace to '" + defaultHomePath + "' - " + e.getMessage()); //$NON-NLS-1$ //$NON-NLS-2$
}
return true;
}
public static void writeWorkspaceInfo() {
final Path metadataFolder = GeneralUtils.getMetadataFolder();
Properties props = BaseWorkspaceImpl.readWorkspaceInfo(metadataFolder);
props.setProperty(VERSION_PROP_PRODUCT_NAME, GeneralUtils.getProductName());
props.setProperty(VERSION_PROP_PRODUCT_VERSION, GeneralUtils.getProductVersion().toString());
BaseWorkspaceImpl.writeWorkspaceInfo(metadataFolder, props);
}
@NotNull
protected ApplicationWorkbenchAdvisor createWorkbenchAdvisor() {
return new ApplicationWorkbenchAdvisor();
}
@Override
public void stop() {
final IWorkbench workbench = PlatformUI.getWorkbench();
if (workbench == null)
return;
final Display display = workbench.getDisplay();
display.syncExec(() -> {
if (!display.isDisposed())
workbench.close();
});
}
private void shutdown() {
log.debug("DBeaver is stopping"); //$NON-NLS-1$
try {
instanceServer = null;
RuntimeUtils.runTask(monitor -> {
DBeaverInstanceServer.stopInstanceServer();
}, "Stop RMI", 1000);
} catch (Throwable e) {
log.error(e);
} finally {
instance = null;
log.debug("DBeaver shutdown completed"); //$NON-NLS-1$
stopDebugWriter();
}
}
private void initDebugWriter() {
DBPPreferenceStore preferenceStore = DBWorkbench.getPlatform().getPreferenceStore();
if (!preferenceStore.getBoolean(DBeaverPreferences.LOGS_DEBUG_ENABLED)) {
return;
}
String logLocation = preferenceStore.getString(DBeaverPreferences.LOGS_DEBUG_LOCATION);
if (CommonUtils.isEmpty(logLocation)) {
logLocation = GeneralUtils.getMetadataFolder().resolve(DBConstants.DEBUG_LOG_FILE_NAME).toAbsolutePath().toString(); //$NON-NLS-1$
}
logLocation = GeneralUtils.replaceVariables(logLocation, new SystemVariablesResolver());
File debugLogFile = new File(logLocation);
if (debugLogFile.exists()) {
if (!debugLogFile.delete()) {
System.err.println("Can't delete debug log file"); //$NON-NLS-1$
return;
}
}
try {
debugWriter = new FileOutputStream(debugLogFile);
oldSystemOut = System.out;
oldSystemErr = System.err;
System.setOut(new PrintStream(new ProxyPrintStream(debugWriter, oldSystemOut)));
System.setErr(new PrintStream(new ProxyPrintStream(debugWriter, oldSystemErr)));
} catch (IOException e) {
e.printStackTrace(System.err);
}
}
private void stopDebugWriter() {
if (oldSystemOut != null) System.setOut(oldSystemOut);
if (oldSystemErr != null) System.setErr(oldSystemErr);
if (debugWriter != null) {
IOUtils.close(debugWriter);
debugWriter = null;
}
}
public IInstanceController getInstanceServer() {
return instanceServer;
}
public IInstanceController createInstanceClient() {
return InstanceClient.createClient(getDefaultInstanceLocation());
}
private static File getDefaultWorkspaceLocation(String path) {
return new File(
System.getProperty(StandardConstants.ENV_USER_HOME),
path);
}
@Override
public boolean isStandalone() {
return true;
}
@Override
public boolean isPrimaryInstance() {
return primaryInstance;
}
@Override
public boolean isHeadlessMode() {
return headlessMode;
}
@Override
public boolean isExclusiveMode() {
return exclusiveMode;
}
public void setExclusiveMode(boolean exclusiveMode) {
this.exclusiveMode = exclusiveMode;
}
public boolean isReuseWorkspace() {
return reuseWorkspace;
}
public void setReuseWorkspace(boolean reuseWorkspace) {
this.reuseWorkspace = reuseWorkspace;
}
@Override
public void setHeadlessMode(boolean headlessMode) {
this.headlessMode = headlessMode;
}
@NotNull
@Override
public DBASecureStorage getSecureStorage() {
return DefaultSecureStorage.INSTANCE;
}
@Override
public String getInfoDetails() {
return null;
}
@Override
public String getDefaultProjectName() {
return "General";
}
private int showMessageBox(String title, String message, int style) {
// Can't lock specified path
Shell shell = new Shell(getDisplay(), SWT.ON_TOP);
shell.setText(GeneralUtils.getProductTitle());
MessageBox messageBox = new MessageBox(shell, style);
messageBox.setText(title);
messageBox.setMessage(message);
int msgResult = messageBox.open();
shell.dispose();
return msgResult;
}
public void notifyVersionUpgrade(@NotNull Version currentVersion, @NotNull VersionDescriptor newVersion, boolean showSkip) {
VersionUpdateDialog dialog = new VersionUpdateDialog(
UIUtils.getActiveWorkbenchShell(),
currentVersion,
newVersion,
showSkip);
dialog.open();
}
public void setResetUIOnRestart(boolean resetUIOnRestart) {
this.resetUIOnRestart = resetUIOnRestart;
}
public void setResetWorkspaceOnRestart(boolean resetWorkspaceOnRestart) {
this.resetWorkspaceOnRestart = resetWorkspaceOnRestart;
}
private class ProxyPrintStream extends OutputStream {
private final OutputStream debugWriter;
private final OutputStream stdOut;
ProxyPrintStream(OutputStream debugWriter, OutputStream stdOut) {
this.debugWriter = debugWriter;
this.stdOut = stdOut;
}
@Override
public void write(@NotNull byte[] b) throws IOException {
debugWriter.write(b);
stdOut.write(b);
}
@Override
public void write(@NotNull byte[] b, int off, int len) throws IOException {
debugWriter.write(b, off, len);
stdOut.write(b, off, len);
}
@Override
public void write(int b) throws IOException {
debugWriter.write(b);
stdOut.write(b);
}
@Override
public void flush() throws IOException {
debugWriter.flush();
stdOut.flush();
}
}
}
|
[
"\"AppData\""
] |
[] |
[
"AppData"
] |
[]
|
["AppData"]
|
java
| 1 | 0 | |
conbench/entities/commit.py
|
import functools
import json
import os
import dateutil.parser
import flask as f
import requests
import sqlalchemy as s
from ..entities._entity import (
Base,
EntityMixin,
EntitySerializer,
NotNull,
Nullable,
generate_uuid,
)
class Commit(Base, EntityMixin):
__tablename__ = "commit"
id = NotNull(s.String(50), primary_key=True, default=generate_uuid)
sha = NotNull(s.String(50))
parent = Nullable(s.String(50))
repository = NotNull(s.String(100))
message = NotNull(s.String(250))
author_name = NotNull(s.String(100))
author_login = Nullable(s.String(50))
author_avatar = Nullable(s.String(100))
timestamp = Nullable(s.DateTime(timezone=False))
@staticmethod
def create_no_context():
commit = Commit.first(sha="", repository="")
if not commit:
commit = Commit.create(
{
"sha": "",
"repository": "",
"parent": None,
"timestamp": None,
"message": "",
"author_name": "",
}
)
return commit
@staticmethod
def create_unknown_context(sha, repository):
return Commit.create(
{
"sha": sha,
"repository": repository,
"parent": None,
"timestamp": None,
"message": "",
"author_name": "",
}
)
@staticmethod
def create_github_context(sha, repository, github):
return Commit.create(
{
"sha": sha,
"repository": repository,
"parent": github["parent"],
"timestamp": github["date"],
"message": github["message"],
"author_name": github["author_name"],
"author_login": github["author_login"],
"author_avatar": github["author_avatar"],
}
)
s.Index(
"commit_index",
Commit.sha,
Commit.repository,
unique=True,
)
class _Serializer(EntitySerializer):
def _dump(self, commit):
url = None
if commit.repository and commit.sha:
url = f"{commit.repository}/commit/{commit.sha}"
timestamp = commit.timestamp.isoformat() if commit.timestamp else None
return {
"id": commit.id,
"sha": commit.sha,
"url": url,
"parent_sha": commit.parent,
"repository": commit.repository,
"message": commit.message,
"author_name": commit.author_name,
"author_login": commit.author_login,
"author_avatar": commit.author_avatar,
"timestamp": timestamp,
"links": {
"list": f.url_for("api.commits", _external=True),
"self": f.url_for("api.commit", commit_id=commit.id, _external=True),
},
}
class CommitSerializer:
one = _Serializer()
many = _Serializer(many=True)
GITHUB = "https://api.github.com"
this_dir = os.path.abspath(os.path.dirname(__file__))
def repository_to_name(repository):
if not repository:
return ""
name = repository
if "github.com/" in repository:
name = repository.split("github.com/")[1]
elif "[email protected]:" in repository:
name = repository.split("[email protected]:")[1]
return name
def repository_to_url(repository):
name = repository_to_name(repository)
return f"https://github.com/{name}" if name else ""
def get_github_commit(repository, sha):
if not repository or not sha:
return {}
github = GitHub()
name = repository_to_name(repository)
commit = github.get_commit(name, sha)
if commit is None:
return {}
parent = commit["parent"]
commits = github.get_commits(name, parent)
if parent in commits:
return commit
else:
# This is a pull request, find the parent of the first commit.
# TODO: This will fail if the pull request has more than 50 commits.
# It will also give up if it can't find the parent after 50 tries
# (which could happen for a really old pull request).
parent = commit["parent"]
for _ in range(50):
other = github.get_commit(name, parent)
if other["parent"] in commits:
commit["parent"] = other["parent"]
return commit
else:
parent = other["parent"]
return {}
class GitHub:
def __init__(self):
self.test_shas = {
"02addad336ba19a654f9c857ede546331be7b631": "github_child.json",
"4beb514d071c9beec69b8917b5265e77ade22fb3": "github_parent.json",
"6d703c4c7b15be630af48d5e9ef61628751674b2": "github_grandparent.json",
}
self.test_commits = [
"02addad336ba19a654f9c857ede546331be7b631",
"4beb514d071c9beec69b8917b5265e77ade22fb3",
"6d703c4c7b15be630af48d5e9ef61628751674b2",
"81e9417eb68171e03a304097ae86e1fd83307130",
]
def get_commits(self, name, sha):
if sha in self.test_commits:
return self.test_commits
commits = []
# Grabs the last 1000 commits to the main branch. TODO: If the pull
# request is old, the parent may not be in the last 1000 commits.
for branch in ["master", "main"]:
url = f"{GITHUB}/repos/{name}/commits?sha={branch}&per_page=100"
response = self._get_response(url)
if response:
commits = self._parse_commits(response)
if sha in commits:
return commits
for page in range(2, 11):
url = f"{GITHUB}/repos/{name}/commits?sha={branch}&per_page=100&page={page}"
response = self._get_response(url)
if response:
commits.extend(self._parse_commits(response))
if sha in commits:
return commits
if commits:
break
return commits
def get_commit(self, name, sha):
if sha in self.test_commits:
response = self.test_commit(sha)
else:
url = f"{GITHUB}/repos/{name}/commits/{sha}"
response = self._get_response(url)
return self._parse_commit(response) if response else None
@functools.cached_property
def session(self):
token, session = os.getenv("GITHUB_API_TOKEN"), None
if token:
session = requests.Session()
session.headers = {"Authorization": f"Bearer {token}"}
return session
def test_commit(self, sha):
fixture = f"../tests/entities/{self.test_shas[sha]}"
path = os.path.join(this_dir, fixture)
with open(path) as fixture:
return json.load(fixture)
@staticmethod
def _parse_commits(commits):
return [commit["sha"] for commit in commits]
@staticmethod
def _parse_commit(commit):
author = commit.get("author")
commit_author = commit["commit"]["author"]
return {
"parent": commit["parents"][0]["sha"],
"date": dateutil.parser.isoparse(commit_author["date"]),
"message": commit["commit"]["message"].split("\n")[0],
"author_name": commit_author["name"],
"author_login": author["login"] if author else None,
"author_avatar": author["avatar_url"] if author else None,
}
def _get_response(self, url):
response = self.session.get(url) if self.session else requests.get(url)
if response.status_code != 200:
print(response.json())
return None
return response.json()
|
[] |
[] |
[
"GITHUB_API_TOKEN"
] |
[]
|
["GITHUB_API_TOKEN"]
|
python
| 1 | 0 | |
test/extended/util/test.go
|
package util
import (
"flag"
"fmt"
"os"
"path"
"path/filepath"
"regexp"
"sort"
"strings"
"github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/reporters"
"github.com/onsi/ginkgo/types"
"github.com/onsi/gomega"
"k8s.io/klog"
kapiv1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
kclientset "k8s.io/client-go/kubernetes"
rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/retry"
e2e "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/generated"
// this appears to inexplicably auto-register global flags.
_ "k8s.io/kubernetes/test/e2e/storage/drivers"
projectv1 "github.com/openshift/api/project/v1"
securityv1client "github.com/openshift/client-go/security/clientset/versioned"
"github.com/openshift/openshift-tests/pkg/version"
)
var (
reportFileName string
syntheticSuite string
quiet bool
)
var TestContext *e2e.TestContextType = &e2e.TestContext
func InitStandardFlags() {
e2e.RegisterCommonFlags(flag.CommandLine)
e2e.RegisterClusterFlags(flag.CommandLine)
// replaced by a bare import above.
//e2e.RegisterStorageFlags()
}
func InitTest(dryRun bool) error {
InitDefaultEnvironmentVariables()
// interpret synthetic input in `--ginkgo.focus` and/or `--ginkgo.skip`
ginkgo.BeforeEach(checkSyntheticInput)
TestContext.DeleteNamespace = os.Getenv("DELETE_NAMESPACE") != "false"
TestContext.VerifyServiceAccount = true
testfiles.AddFileSource(testfiles.BindataFileSource{
Asset: generated.Asset,
AssetNames: generated.AssetNames,
})
TestContext.KubectlPath = "kubectl"
TestContext.KubeConfig = KubeConfigPath()
os.Setenv("KUBECONFIG", TestContext.KubeConfig)
// "debian" is used when not set. At least GlusterFS tests need "custom".
// (There is no option for "rhel" or "centos".)
TestContext.NodeOSDistro = "custom"
TestContext.MasterOSDistro = "custom"
// load and set the host variable for kubectl
if !dryRun {
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&clientcmd.ClientConfigLoadingRules{ExplicitPath: TestContext.KubeConfig}, &clientcmd.ConfigOverrides{})
cfg, err := clientConfig.ClientConfig()
if err != nil {
return err
}
TestContext.Host = cfg.Host
}
reportFileName = os.Getenv("TEST_REPORT_FILE_NAME")
if reportFileName == "" {
reportFileName = "junit"
}
quiet = os.Getenv("TEST_OUTPUT_QUIET") == "true"
// Ensure that Kube tests run privileged (like they do upstream)
TestContext.CreateTestingNS = createTestingNS
klog.V(2).Infof("Extended test version %s", version.Get().String())
return nil
}
func ExecuteTest(t ginkgo.GinkgoTestingT, suite string) {
var r []ginkgo.Reporter
if dir := os.Getenv("TEST_REPORT_DIR"); len(dir) > 0 {
TestContext.ReportDir = dir
}
if TestContext.ReportDir != "" {
if err := os.MkdirAll(TestContext.ReportDir, 0755); err != nil {
klog.Errorf("Failed creating report directory: %v", err)
}
defer e2e.CoreDump(TestContext.ReportDir)
}
if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" {
config.GinkgoConfig.SkipString = "Skipped"
}
gomega.RegisterFailHandler(ginkgo.Fail)
if TestContext.ReportDir != "" {
r = append(r, reporters.NewJUnitReporter(path.Join(TestContext.ReportDir, fmt.Sprintf("%s_%02d.xml", reportFileName, config.GinkgoConfig.ParallelNode))))
}
AnnotateTestSuite()
if quiet {
r = append(r, NewSimpleReporter())
ginkgo.RunSpecsWithCustomReporters(t, suite, r)
} else {
ginkgo.RunSpecsWithDefaultAndCustomReporters(t, suite, r)
}
}
func AnnotateTestSuite() {
testRenamer := newGinkgoTestRenamerFromGlobals(e2e.TestContext.Provider, getNetworkSkips())
ginkgo.WalkTests(testRenamer.maybeRenameTest)
}
func getNetworkSkips() []string {
out, err := e2e.KubectlCmd("get", "network.operator.openshift.io", "cluster", "--template", "{{.spec.defaultNetwork.type}}{{if .spec.defaultNetwork.openshiftSDNConfig}} {{.spec.defaultNetwork.type}}/{{.spec.defaultNetwork.openshiftSDNConfig.mode}}{{end}}").CombinedOutput()
if err != nil {
e2e.Logf("Could not get network operator configuration: not adding any plugin-specific skips")
return nil
}
return strings.Split(string(out), " ")
}
func newGinkgoTestRenamerFromGlobals(provider string, networkSkips []string) *ginkgoTestRenamer {
var allLabels []string
matches := make(map[string]*regexp.Regexp)
stringMatches := make(map[string][]string)
excludes := make(map[string]*regexp.Regexp)
for label, items := range testMaps {
sort.Strings(items)
allLabels = append(allLabels, label)
var remain []string
for _, item := range items {
re := regexp.MustCompile(item)
if p, ok := re.LiteralPrefix(); ok {
stringMatches[label] = append(stringMatches[label], p)
} else {
remain = append(remain, item)
}
}
if len(remain) > 0 {
matches[label] = regexp.MustCompile(strings.Join(remain, `|`))
}
}
for label, items := range labelExcludes {
sort.Strings(items)
excludes[label] = regexp.MustCompile(strings.Join(items, `|`))
}
sort.Strings(allLabels)
if provider != "" {
excludedTests = append(excludedTests, fmt.Sprintf(`\[Skipped:%s\]`, provider))
}
for _, network := range networkSkips {
excludedTests = append(excludedTests, fmt.Sprintf(`\[Skipped:Network/%s\]`, network))
}
klog.V(4).Infof("openshift-tests excluded test regex is %q", strings.Join(excludedTests, `|`))
excludedTestsFilter := regexp.MustCompile(strings.Join(excludedTests, `|`))
return &ginkgoTestRenamer{
allLabels: allLabels,
stringMatches: stringMatches,
matches: matches,
excludes: excludes,
excludedTestsFilter: excludedTestsFilter,
}
}
type ginkgoTestRenamer struct {
allLabels []string
stringMatches map[string][]string
matches map[string]*regexp.Regexp
excludes map[string]*regexp.Regexp
excludedTestsFilter *regexp.Regexp
}
func (r *ginkgoTestRenamer) maybeRenameTest(name string, node types.TestNode) {
labels := ""
for {
count := 0
for _, label := range r.allLabels {
if strings.Contains(name, label) {
continue
}
var hasLabel bool
for _, segment := range r.stringMatches[label] {
hasLabel = strings.Contains(name, segment)
if hasLabel {
break
}
}
if !hasLabel {
if re := r.matches[label]; re != nil {
hasLabel = r.matches[label].MatchString(name)
}
}
if hasLabel {
// TODO: remove when we no longer need it
if re, ok := r.excludes[label]; ok && re.MatchString(name) {
continue
}
count++
labels += " " + label
name += " " + label
}
}
if count == 0 {
break
}
}
if !r.excludedTestsFilter.MatchString(name) {
isSerial := strings.Contains(name, "[Serial]")
isConformance := strings.Contains(name, "[Conformance]")
switch {
case isSerial && isConformance:
node.SetText(node.Text() + " [Suite:openshift/conformance/serial/minimal]")
case isSerial:
node.SetText(node.Text() + " [Suite:openshift/conformance/serial]")
case isConformance:
node.SetText(node.Text() + " [Suite:openshift/conformance/parallel/minimal]")
default:
node.SetText(node.Text() + " [Suite:openshift/conformance/parallel]")
}
}
if strings.Contains(node.CodeLocation().FileName, "/origin/test/") && !strings.Contains(node.Text(), "[Suite:openshift") {
node.SetText(node.Text() + " [Suite:openshift]")
}
if strings.Contains(node.CodeLocation().FileName, "/kubernetes/test/e2e/") {
node.SetText(node.Text() + " [Suite:k8s]")
}
node.SetText(node.Text() + labels)
}
// ProwGCPSetup makes sure certain required env vars are available in the case
// that extended tests are invoked directly via calls to ginkgo/extended.test
func InitDefaultEnvironmentVariables() {
if ad := os.Getenv("ARTIFACT_DIR"); len(strings.TrimSpace(ad)) == 0 {
os.Setenv("ARTIFACT_DIR", filepath.Join(os.TempDir(), "artifacts"))
}
}
// TODO: Use either explicit tags (k8s.io) or https://github.com/onsi/ginkgo/pull/228 to implement this.
// isPackage determines wether the test is in a package. Ideally would be implemented in ginkgo.
func isPackage(pkg string) bool {
return strings.Contains(ginkgo.CurrentGinkgoTestDescription().FileName, pkg)
}
// TODO: For both is*Test functions, use either explicit tags (k8s.io) or https://github.com/onsi/ginkgo/pull/228
func isOriginTest() bool {
return isPackage("/origin/test/")
}
func isKubernetesE2ETest() bool {
return isPackage("/kubernetes/test/e2e/")
}
func testNameContains(name string) bool {
return strings.Contains(ginkgo.CurrentGinkgoTestDescription().FullTestText, name)
}
func skipTestNamespaceCustomization() bool {
return (isPackage("/kubernetes/test/e2e/namespace.go") && (testNameContains("should always delete fast") || testNameContains("should delete fast enough")))
}
// createTestingNS ensures that kubernetes e2e tests have their service accounts in the privileged and anyuid SCCs
func createTestingNS(baseName string, c kclientset.Interface, labels map[string]string) (*kapiv1.Namespace, error) {
if !strings.HasPrefix(baseName, "e2e-") {
baseName = "e2e-" + baseName
}
ns, err := e2e.CreateTestingNS(baseName, c, labels)
if err != nil {
return ns, err
}
// Add anyuid and privileged permissions for upstream tests
if strings.HasPrefix(baseName, "e2e-k8s-") || (isKubernetesE2ETest() && !skipTestNamespaceCustomization()) {
clientConfig, err := getClientConfig(KubeConfigPath())
if err != nil {
return ns, err
}
securityClient, err := securityv1client.NewForConfig(clientConfig)
if err != nil {
return ns, err
}
e2e.Logf("About to run a Kube e2e test, ensuring namespace is privileged")
// add the "privileged" scc to ensure pods that explicitly
// request extra capabilities are not rejected
addE2EServiceAccountsToSCC(securityClient, []kapiv1.Namespace{*ns}, "privileged")
// add the "anyuid" scc to ensure pods that don't specify a
// uid don't get forced into a range (mimics upstream
// behavior)
addE2EServiceAccountsToSCC(securityClient, []kapiv1.Namespace{*ns}, "anyuid")
// add the "hostmount-anyuid" scc to ensure pods using hostPath
// can execute tests
addE2EServiceAccountsToSCC(securityClient, []kapiv1.Namespace{*ns}, "hostmount-anyuid")
// The intra-pod test requires that the service account have
// permission to retrieve service endpoints.
rbacClient, err := rbacv1client.NewForConfig(clientConfig)
if err != nil {
return ns, err
}
addRoleToE2EServiceAccounts(rbacClient, []kapiv1.Namespace{*ns}, "view")
// in practice too many kube tests ignore scheduling constraints
allowAllNodeScheduling(c, ns.Name)
}
return ns, err
}
var (
testMaps = map[string][]string{
// tests that require a local host
"[Local]": {
// Doesn't work on scaled up clusters
`\[Feature:ImagePrune\]`,
},
// alpha features that are not gated
"[Disabled:Alpha]": {
`\[Feature:Initializers\]`, // admission controller disabled
`\[Feature:TTLAfterFinished\]`, // flag gate is off
`\[Feature:GPUDevicePlugin\]`, // GPU node needs to be available
`\[sig-scheduling\] GPUDevicePluginAcrossRecreate \[Feature:Recreate\]`, // GPU node needs to be available
`\[Feature:ExpandCSIVolumes\]`, // off by default . sig-storage
`\[Feature:DynamicAudit\]`, // off by default. sig-master
`\[NodeAlphaFeature:VolumeSubpathEnvExpansion\]`, // flag gate is off
`\[Feature:IPv6DualStack.*\]`,
`version v1 should create Endpoints and EndpointSlices for Pods matching a Service`, // off by default.
},
// tests for features that are not implemented in openshift
"[Disabled:Unimplemented]": {
`\[Feature:Networking-IPv6\]`, // openshift-sdn doesn't support yet
`Monitoring`, // Not installed, should be
`Cluster level logging`, // Not installed yet
`Kibana`, // Not installed
`Ubernetes`, // Can't set zone labels today
`kube-ui`, // Not installed by default
`Kubernetes Dashboard`, // Not installed by default (also probably slow image pull)
`\[Feature:ServiceLoadBalancer\]`, // Not enabled yet
`\[Feature:RuntimeClass\]`, // disable runtimeclass tests in 4.1 (sig-pod/[email protected])
`NetworkPolicy.*egress`, // not supported
`NetworkPolicy.*named port`, // not yet implemented
`enforce egress policy`, // not support
`should proxy to cadvisor`, // we don't expose cAdvisor port directly for security reasons
},
// tests that rely on special configuration that we do not yet support
"[Disabled:SpecialConfig]": {
`\[Feature:ImageQuota\]`, // Quota isn't turned on by default, we should do that and then reenable these tests
`\[Feature:Audit\]`, // Needs special configuration
`\[Feature:LocalStorageCapacityIsolation\]`, // relies on a separate daemonset?
`\[sig-cluster-lifecycle\]`, // cluster lifecycle test require a different kind of upgrade hook.
`\[sig-cloud-provider-gcp\]`, // these test require a different configuration - note that GCE tests from the sig-cluster-lifecycle were moved to the sig-cloud-provider-gcpcluster lifecycle see https://github.com/kubernetes/kubernetes/commit/0b3d50b6dccdc4bbd0b3e411c648b092477d79ac#diff-3b1910d08fb8fd8b32956b5e264f87cb
`\[Feature:StatefulUpgrade\]`, // related to cluster lifecycle (in e2e/lifecycle package) and requires an upgrade hook we don't use
`kube-dns-autoscaler`, // Don't run kube-dns
`should check if Kubernetes master services is included in cluster-info`, // Don't run kube-dns
`DNS configMap`, // this tests dns federation configuration via configmap, which we don't support yet
`authentication: OpenLDAP`, // needs separate setup and bucketing for openldap bootstrapping
`NodeProblemDetector`, // requires a non-master node to run on
`Advanced Audit should audit API calls`, // expects to be able to call /logs
`Firewall rule should have correct firewall rules for e2e cluster`, // Upstream-install specific
},
// tests that are known broken and need to be fixed upstream or in openshift
// always add an issue here
"[Disabled:Broken]": {
`mount an API token into pods`, // We add 6 secrets, not 1
`ServiceAccounts should ensure a single API token exists`, // We create lots of secrets
`unchanging, static URL paths for kubernetes api services`, // the test needs to exclude URLs that are not part of conformance (/logs)
`Simple pod should handle in-cluster config`, // kubectl cp is not preserving executable bit
`Services should be able to up and down services`, // we don't have wget installed on nodes
`Network should set TCP CLOSE_WAIT timeout`, // possibly some difference between ubuntu and fedora
`Services should be able to create a functioning NodePort service`, // https://bugzilla.redhat.com/show_bug.cgi?id=1711603
`\[NodeFeature:Sysctls\]`, // needs SCC support
`should check kube-proxy urls`, // previously this test was skipped b/c we reported -1 as the number of nodes, now we report proper number and test fails
`SSH`, // TRIAGE
`should implement service.kubernetes.io/service-proxy-name`, // this is an optional test that requires SSH. sig-network
`should idle the service and DeploymentConfig properly`, // idling with a single service and DeploymentConfig [Conformance]
`should answer endpoint and wildcard queries for the cluster`, // currently not supported by dns operator https://github.com/openshift/cluster-dns-operator/issues/43
`should allow ingress access on one named port`, // https://bugzilla.redhat.com/show_bug.cgi?id=1711602
`ClusterDns \[Feature:Example\] should create pod that uses dns`, // https://bugzilla.redhat.com/show_bug.cgi?id=1711601
`PreemptionExecutionPath runs ReplicaSets to verify preemption running path`, // https://bugzilla.redhat.com/show_bug.cgi?id=1711606
`TaintBasedEvictions`, // https://bugzilla.redhat.com/show_bug.cgi?id=1711608
`recreate nodes and ensure they function upon restart`, // https://bugzilla.redhat.com/show_bug.cgi?id=1756428
`\[Driver: iscsi\]`, // https://bugzilla.redhat.com/show_bug.cgi?id=1711627
`\[Feature:VolumeSnapshotDataSource\]`, // Alpha (disabled by default) in Kubernetes 1.16
// TODO(workloads): reenable
`SchedulerPreemption`,
// requires a 1.14 kubelet, enable when rhcos is built for 4.2
"when the NodeLease feature is enabled",
"RuntimeClass should reject",
// TODO(node): configure the cri handler for the runtime class to make this work
"should run a Pod requesting a RuntimeClass with a configured handler",
"should reject a Pod requesting a RuntimeClass with conflicting node selector",
"should run a Pod requesting a RuntimeClass with scheduling",
// TODO(sdn): reenable when openshift/sdn is rebased to 1.16
`Services should implement service.kubernetes.io/headless`,
// TODO(sdn): test pod fails to connect in 1.16
`should allow ingress access from updated pod`,
},
// tests that may work, but we don't support them
"[Disabled:Unsupported]": {
`\[Driver: rbd\]`, // OpenShift 4.x does not support Ceph RBD (use CSI instead)
`\[Driver: ceph\]`, // OpenShift 4.x does not support CephFS (use CSI instead)
},
// tests too slow to be part of conformance
"[Slow]": {
`\[sig-scalability\]`, // disable from the default set for now
`should create and stop a working application`, // Inordinately slow tests
`\[Feature:PerformanceDNS\]`, // very slow
`should ensure that critical pod is scheduled in case there is no resources available`, // should be tagged disruptive, consumes 100% of cluster CPU
`validates that there exists conflict between pods with same hostPort and protocol but one using 0\.0\.0\.0 hostIP`, // 5m, really?
},
// tests that are known flaky
"[Flaky]": {
`Job should run a job to completion when tasks sometimes fail and are not locally restarted`, // seems flaky, also may require too many resources
`openshift mongodb replication creating from a template`, // flaking on deployment
// TODO(node): test works when run alone, but not in the suite in CI
`\[Feature:HPA\] Horizontal pod autoscaling \(scale resource: CPU\) \[sig-autoscaling\] ReplicationController light Should scale from 1 pod to 2 pods`,
},
// tests that must be run without competition
"[Serial]": {
`\[Disruptive\]`,
`\[Feature:Performance\]`, // requires isolation
`\[Feature:ManualPerformance\]`, // requires isolation
`\[Feature:HighDensityPerformance\]`, // requires no other namespaces
`Service endpoints latency`, // requires low latency
`Clean up pods on node`, // schedules up to max pods per node
`should allow starting 95 pods per node`,
`DynamicProvisioner should test that deleting a claim before the volume is provisioned deletes the volume`, // test is very disruptive to other tests
`Multi-AZ Clusters should spread the pods`, // spreading is a priority, not a predicate, and if the node is temporarily full the priority will be ignored
`Should be able to support the 1\.7 Sample API Server using the current Aggregator`, // down apiservices break other clients today https://bugzilla.redhat.com/show_bug.cgi?id=1623195
`\[Feature:HPA\] Horizontal pod autoscaling \(scale resource: CPU\) \[sig-autoscaling\] ReplicationController light Should scale from 1 pod to 2 pods`,
},
"[Skipped:azure]": {
"Networking should provide Internet connection for containers", // Azure does not allow ICMP traffic to internet.
// openshift-tests cannot access Azure API to create in-line or pre-provisioned volumes, https://bugzilla.redhat.com/show_bug.cgi?id=1723603
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Inline-volume`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Pre-provisioned PV`,
},
"[Skipped:gce]": {
// Requires creation of a different compute instance in a different zone and is not compatible with volumeBindingMode of WaitForFirstConsumer which we use in 4.x
`\[sig-scheduling\] Multi-AZ Cluster Volumes \[sig-storage\] should only be allowed to provision PDs in zones where nodes exist`,
// The following tests try to ssh directly to a node. None of our nodes have external IPs
`\[k8s.io\] \[sig-node\] crictl should be able to run crictl on the node`,
`\[sig-storage\] Flexvolumes should be mountable`,
`\[sig-storage\] Detaching volumes should not work when mount is in progress`,
// We are using openshift-sdn to conceal metadata
`\[sig-auth\] Metadata Concealment should run a check-metadata-concealment job to completion`,
// https://bugzilla.redhat.com/show_bug.cgi?id=1740959
`\[sig-api-machinery\] AdmissionWebhook Should be able to deny pod and configmap creation`,
// https://bugzilla.redhat.com/show_bug.cgi?id=1745720
`\[sig-storage\] CSI Volumes \[Driver: pd.csi.storage.gke.io\]\[Serial\]`,
// https://bugzilla.redhat.com/show_bug.cgi?id=1749882
`\[sig-storage\] CSI Volumes CSI Topology test using GCE PD driver \[Serial\]`,
// https://bugzilla.redhat.com/show_bug.cgi?id=1751367
`gce-localssd-scsi-fs`,
// https://bugzilla.redhat.com/show_bug.cgi?id=1750851
// should be serial if/when it's re-enabled
`\[HPA\] Horizontal pod autoscaling \(scale resource: Custom Metrics from Stackdriver\)`,
},
"[Skipped:openstack]": {
// https://bugzilla.redhat.com/show_bug.cgi?id=1763936
`\[sig-network\] Networking Granular Checks: Services should function for node-Service`,
`\[sig-network\] Networking Granular Checks: Services should function for pod-Service`,
`\[sig-network\] Networking Granular Checks: Services should function for endpoint-Service`,
},
// tests that don't pass under openshift-sdn but that are expected to pass
// with other network plugins (particularly ovn-kubernetes)
"[Skipped:Network/OpenShiftSDN]": {
`NetworkPolicy between server and client should allow egress access on one named port`, // not yet implemented
},
// tests that don't pass under openshift-sdn multitenant mode
"[Skipped:Network/OpenShiftSDN/Multitenant]": {
`\[Feature:NetworkPolicy\]`, // not compatible with multitenant mode
`\[sig-network\] Services should preserve source pod IP for traffic thru service cluster IP`, // known bug, not planned to be fixed
},
// tests that don't pass under OVN Kubernetes
"[Skipped:Network/OVNKubernetes]": {
// https://jira.coreos.com/browse/SDN-510: OVN-K doesn't support session affinity
`\[sig-network\] Networking Granular Checks: Services should function for client IP based session affinity: http`,
`\[sig-network\] Networking Granular Checks: Services should function for client IP based session affinity: udp`,
`\[sig-network\] Services should be able to switch session affinity for NodePort service`,
`\[sig-network\] Services should be able to switch session affinity for service with type clusterIP`,
`\[sig-network\] Services should have session affinity work for NodePort service`,
`\[sig-network\] Services should have session affinity work for service with type clusterIP`,
// SDN-587: OVN-Kubernetes doesn't support hairpin services
`\[sig-network\] Services should allow pods to hairpin back to themselves through services`,
`\[sig-network\] Networking Granular Checks: Services should function for endpoint-Service`,
// https://github.com/ovn-org/ovn-kubernetes/issues/928
`\[sig-network\] Services should be rejected when no endpoints exist`,
},
"[Suite:openshift/scalability]": {},
// tests that replace the old test-cmd script
"[Suite:openshift/test-cmd]": {
`\[Suite:openshift/test-cmd\]`,
},
"[Suite:openshift/csi]": {
`External Storage \[Driver:`,
},
}
// labelExcludes temporarily block tests out of a specific suite
labelExcludes = map[string][]string{}
excludedTests = []string{
`\[Disabled:`,
`\[Disruptive\]`,
`\[Skipped\]`,
`\[Slow\]`,
`\[Flaky\]`,
`\[local\]`,
`\[Suite:openshift/test-cmd\]`,
}
)
// checkSyntheticInput selects tests based on synthetic skips or focuses
func checkSyntheticInput() {
checkSuiteSkips()
}
// checkSuiteSkips ensures Origin/Kubernetes synthetic skip labels are applied
// DEPRECATED: remove in a future release
func checkSuiteSkips() {
switch {
case isOriginTest():
if strings.Contains(config.GinkgoConfig.SkipString, "Synthetic Origin") {
ginkgo.Skip("skipping all openshift/origin tests")
}
case isKubernetesE2ETest():
if strings.Contains(config.GinkgoConfig.SkipString, "Synthetic Kubernetes") {
ginkgo.Skip("skipping all k8s.io/kubernetes tests")
}
}
}
var longRetry = wait.Backoff{Steps: 100}
// allowAllNodeScheduling sets the annotation on namespace that allows all nodes to be scheduled onto.
func allowAllNodeScheduling(c kclientset.Interface, namespace string) {
err := retry.RetryOnConflict(longRetry, func() error {
ns, err := c.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{})
if err != nil {
return err
}
if ns.Annotations == nil {
ns.Annotations = make(map[string]string)
}
ns.Annotations[projectv1.ProjectNodeSelector] = ""
_, err = c.CoreV1().Namespaces().Update(ns)
return err
})
if err != nil {
FatalErr(err)
}
}
func addE2EServiceAccountsToSCC(securityClient securityv1client.Interface, namespaces []kapiv1.Namespace, sccName string) {
// Because updates can race, we need to set the backoff retries to be > than the number of possible
// parallel jobs starting at once. Set very high to allow future high parallelism.
err := retry.RetryOnConflict(longRetry, func() error {
scc, err := securityClient.SecurityV1().SecurityContextConstraints().Get(sccName, metav1.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
return nil
}
return err
}
for _, ns := range namespaces {
if isE2ENamespace(ns.Name) {
scc.Groups = append(scc.Groups, fmt.Sprintf("system:serviceaccounts:%s", ns.Name))
}
}
if _, err := securityClient.SecurityV1().SecurityContextConstraints().Update(scc); err != nil {
return err
}
return nil
})
if err != nil {
FatalErr(err)
}
}
func isE2ENamespace(ns string) bool {
return true
//return strings.HasPrefix(ns, "e2e-") ||
// strings.HasPrefix(ns, "aggregator-") ||
// strings.HasPrefix(ns, "csi-") ||
// strings.HasPrefix(ns, "deployment-") ||
// strings.HasPrefix(ns, "disruption-") ||
// strings.HasPrefix(ns, "gc-") ||
// strings.HasPrefix(ns, "kubectl-") ||
// strings.HasPrefix(ns, "proxy-") ||
// strings.HasPrefix(ns, "provisioning-") ||
// strings.HasPrefix(ns, "statefulset-") ||
// strings.HasPrefix(ns, "services-")
}
func addRoleToE2EServiceAccounts(rbacClient rbacv1client.RbacV1Interface, namespaces []kapiv1.Namespace, roleName string) {
err := retry.RetryOnConflict(longRetry, func() error {
for _, ns := range namespaces {
if isE2ENamespace(ns.Name) && ns.Status.Phase != kapiv1.NamespaceTerminating {
_, err := rbacClient.RoleBindings(ns.Name).Create(&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{GenerateName: "default-" + roleName, Namespace: ns.Name},
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
Name: roleName,
},
Subjects: []rbacv1.Subject{
{Name: "default", Namespace: ns.Name, Kind: rbacv1.ServiceAccountKind},
},
})
if err != nil {
e2e.Logf("Warning: Failed to add role to e2e service account: %v", err)
}
}
}
return nil
})
if err != nil {
FatalErr(err)
}
}
|
[
"\"DELETE_NAMESPACE\"",
"\"TEST_REPORT_FILE_NAME\"",
"\"TEST_OUTPUT_QUIET\"",
"\"TEST_REPORT_DIR\"",
"\"ARTIFACT_DIR\""
] |
[] |
[
"DELETE_NAMESPACE",
"TEST_REPORT_DIR",
"TEST_REPORT_FILE_NAME",
"TEST_OUTPUT_QUIET",
"ARTIFACT_DIR"
] |
[]
|
["DELETE_NAMESPACE", "TEST_REPORT_DIR", "TEST_REPORT_FILE_NAME", "TEST_OUTPUT_QUIET", "ARTIFACT_DIR"]
|
go
| 5 | 0 | |
modules/core/src/test/java/org/apache/ignite/testframework/GridTestUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.testframework;
import java.io.File;
import java.io.FileInputStream;
import java.io.FilenameFilter;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.lang.ref.ReferenceQueue;
import java.lang.ref.SoftReference;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.net.InetAddress;
import java.net.MulticastSocket;
import java.net.ServerSocket;
import java.nio.file.attribute.PosixFilePermission;
import java.security.GeneralSecurityException;
import java.security.KeyStore;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Queue;
import java.util.Random;
import java.util.Set;
import java.util.Timer;
import java.util.TimerTask;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import javax.cache.CacheException;
import javax.cache.configuration.Factory;
import javax.net.ssl.KeyManagerFactory;
import javax.net.ssl.SSLContext;
import javax.net.ssl.TrustManager;
import org.apache.ignite.Ignite;
import org.apache.ignite.IgniteCache;
import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.IgniteException;
import org.apache.ignite.IgniteLogger;
import org.apache.ignite.Ignition;
import org.apache.ignite.cache.query.annotations.QuerySqlFunction;
import org.apache.ignite.cluster.ClusterNode;
import org.apache.ignite.internal.IgniteEx;
import org.apache.ignite.internal.IgniteFutureCancelledCheckedException;
import org.apache.ignite.internal.IgniteInternalFuture;
import org.apache.ignite.internal.IgniteInterruptedCheckedException;
import org.apache.ignite.internal.IgniteKernal;
import org.apache.ignite.internal.client.ssl.GridSslBasicContextFactory;
import org.apache.ignite.internal.client.ssl.GridSslContextFactory;
import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
import org.apache.ignite.internal.processors.cache.GridCacheContext;
import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheAdapter;
import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology;
import org.apache.ignite.internal.processors.cache.distributed.near.GridNearCacheAdapter;
import org.apache.ignite.internal.processors.cache.verify.IdleVerifyResultV2;
import org.apache.ignite.internal.processors.odbc.ClientListenerProcessor;
import org.apache.ignite.internal.processors.port.GridPortRecord;
import org.apache.ignite.internal.util.GridBusyLock;
import org.apache.ignite.internal.util.future.GridFutureAdapter;
import org.apache.ignite.internal.util.lang.GridAbsClosure;
import org.apache.ignite.internal.util.lang.GridAbsPredicate;
import org.apache.ignite.internal.util.lang.IgnitePair;
import org.apache.ignite.internal.util.typedef.F;
import org.apache.ignite.internal.util.typedef.G;
import org.apache.ignite.internal.util.typedef.T2;
import org.apache.ignite.internal.util.typedef.X;
import org.apache.ignite.internal.util.typedef.internal.LT;
import org.apache.ignite.internal.util.typedef.internal.U;
import org.apache.ignite.lang.IgniteFuture;
import org.apache.ignite.lang.IgniteInClosure;
import org.apache.ignite.lang.IgnitePredicate;
import org.apache.ignite.lang.IgniteRunnable;
import org.apache.ignite.plugin.extensions.communication.Message;
import org.apache.ignite.spi.discovery.DiscoverySpiCustomMessage;
import org.apache.ignite.spi.discovery.DiscoverySpiListener;
import org.apache.ignite.ssl.SslContextFactory;
import org.apache.ignite.testframework.config.GridTestProperties;
import org.apache.ignite.testframework.junits.GridAbstractTest;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import static org.junit.Assert.assertNotNull;
/**
* Utility class for tests.
*/
public final class GridTestUtils {
/** Default busy wait sleep interval in milliseconds. */
public static final long DFLT_BUSYWAIT_SLEEP_INTERVAL = 200;
/** */
public static final long DFLT_TEST_TIMEOUT = 5 * 60 * 1000;
/** */
static final String ALPHABETH = "qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM1234567890_";
/**
* Hook object intervenes to discovery message handling
* and thus allows to make assertions or other actions like skipping certain discovery messages.
*/
public static class DiscoveryHook {
/**
* @param msg Message.
*/
public void handleDiscoveryMessage(DiscoverySpiCustomMessage msg) {
}
/**
* @param ignite Ignite.
*/
public void ignite(IgniteEx ignite) {
// No-op.
}
}
/**
* Injects {@link DiscoveryHook} into handling logic.
*/
public static final class DiscoverySpiListenerWrapper implements DiscoverySpiListener {
/** */
private final DiscoverySpiListener delegate;
/** */
private final DiscoveryHook hook;
/**
* @param delegate Delegate.
* @param hook Hook.
*/
private DiscoverySpiListenerWrapper(DiscoverySpiListener delegate, DiscoveryHook hook) {
this.hook = hook;
this.delegate = delegate;
}
/** {@inheritDoc} */
@Override public IgniteFuture<?> onDiscovery(int type, long topVer, ClusterNode node, Collection<ClusterNode> topSnapshot, @Nullable Map<Long, Collection<ClusterNode>> topHist, @Nullable DiscoverySpiCustomMessage spiCustomMsg) {
hook.handleDiscoveryMessage(spiCustomMsg);
return delegate.onDiscovery(type, topVer, node, topSnapshot, topHist, spiCustomMsg);
}
/** {@inheritDoc} */
@Override public void onLocalNodeInitialized(ClusterNode locNode) {
delegate.onLocalNodeInitialized(locNode);
}
/**
* @param delegate Delegate.
* @param discoveryHook Discovery hook.
*/
public static DiscoverySpiListener wrap(DiscoverySpiListener delegate, DiscoveryHook discoveryHook) {
return new DiscoverySpiListenerWrapper(delegate, discoveryHook);
}
}
/** Test parameters scale factor util. */
public static final class SF extends ScaleFactorUtil {
}
/** */
private static final Map<Class<?>, String> addrs = new HashMap<>();
/** */
private static final Map<Class<? extends GridAbstractTest>, Integer> mcastPorts = new HashMap<>();
/** */
private static final Map<Class<? extends GridAbstractTest>, Integer> discoPorts = new HashMap<>();
/** */
private static final Map<Class<? extends GridAbstractTest>, Integer> commPorts = new HashMap<>();
/** */
private static int[] addr;
/** */
private static final int default_mcast_port = 50000;
/** */
private static final int max_mcast_port = 54999;
/** */
private static final int default_comm_port = 45000;
/** */
private static final int max_comm_port = 49999;
/** */
private static final int default_disco_port = 55000;
/** */
private static final int max_disco_port = 59999;
/** */
private static int mcastPort = default_mcast_port;
/** */
private static int discoPort = default_disco_port;
/** */
private static int commPort = default_comm_port;
/** */
private static final GridBusyLock busyLock = new GridBusyLock();
/** */
public static final ConcurrentMap<IgnitePair<UUID>, IgnitePair<Queue<Message>>> msgMap = new ConcurrentHashMap<>();
/**
* Ensure singleton.
*/
private GridTestUtils() {
// No-op.
}
/**
* @param from From node ID.
* @param to To node ID.
* @param msg Message.
* @param sent Sent or received.
*/
public static void addMessage(UUID from, UUID to, Message msg, boolean sent) {
IgnitePair<UUID> key = new IgnitePair<>(from, to);
IgnitePair<Queue<Message>> val = msgMap.get(key);
if (val == null) {
IgnitePair<Queue<Message>> old = msgMap.putIfAbsent(key,
val = new IgnitePair<Queue<Message>>(
new ConcurrentLinkedQueue<Message>(), new ConcurrentLinkedQueue<Message>()));
if (old != null)
val = old;
}
(sent ? val.get1() : val.get2()).add(msg);
}
/**
* Dumps all messages tracked with {@link #addMessage(UUID, UUID, Message, boolean)} to std out.
*/
public static void dumpMessages() {
for (Map.Entry<IgnitePair<UUID>, IgnitePair<Queue<Message>>> entry : msgMap.entrySet()) {
U.debug("\n" + entry.getKey().get1() + " [sent to] " + entry.getKey().get2());
for (Message message : entry.getValue().get1())
U.debug("\t" + message);
U.debug(entry.getKey().get2() + " [received from] " + entry.getKey().get1());
for (Message message : entry.getValue().get2())
U.debug("\t" + message);
}
}
// static {
// new Thread(new Runnable() {
// @Override public void run() {
// JOptionPane.showMessageDialog(null, "Close this to dump messages.");
//
// dumpMessages();
// }
// }).start();
// }
/**
* Checks whether callable throws expected exception or not.
*
* @param log Logger (optional).
* @param call Callable.
* @param cls Exception class.
* @param msg Exception message (optional). If provided exception message
* and this message should be equal.
* @return Thrown throwable.
*/
public static Throwable assertThrows(@Nullable IgniteLogger log, Callable<?> call,
Class<? extends Throwable> cls, @Nullable String msg) {
assert call != null;
assert cls != null;
try {
call.call();
}
catch (Throwable e) {
if (cls != e.getClass() && !cls.isAssignableFrom(e.getClass())) {
if (e.getClass() == CacheException.class && e.getCause() != null && e.getCause().getClass() == cls)
e = e.getCause();
else {
U.error(log, "Unexpected exception.", e);
fail("Exception class is not as expected [expected=" + cls + ", actual=" + e.getClass() + ']', e);
}
}
if (msg != null && (e.getMessage() == null || !e.getMessage().contains(msg))) {
U.error(log, "Unexpected exception message.", e);
fail("Exception message is not as expected [expected=" + msg + ", actual=" + e.getMessage() + ']', e);
}
if (log != null) {
if (log.isInfoEnabled())
log.info("Caught expected exception: " + e.getMessage());
}
else
X.println("Caught expected exception: " + e.getMessage());
return e;
}
throw new AssertionError("Exception has not been thrown.");
}
/**
* Checks whether callable throws an exception with specified cause.
*
* @param log Logger (optional).
* @param call Callable.
* @param cls Exception class.
* @param msg Exception message (optional). If provided exception message
* and this message should be equal.
* @return Thrown throwable.
*/
public static Throwable assertThrowsAnyCause(@Nullable IgniteLogger log, Callable<?> call,
Class<? extends Throwable> cls, @Nullable String msg) {
assert call != null;
assert cls != null;
try {
call.call();
}
catch (Throwable e) {
Throwable t = e;
while (t != null) {
if (cls == t.getClass() && (msg == null || (t.getMessage() != null && t.getMessage().contains(msg)))) {
if (log != null && log.isInfoEnabled())
log.info("Caught expected exception: " + t.getMessage());
return t;
}
t = t.getCause();
}
fail("Unexpected exception", e);
}
throw new AssertionError("Exception has not been thrown.");
}
/**
* Checks whether callable throws expected exception or its child or not.
*
* @param log Logger (optional).
* @param call Callable.
* @param cls Exception class.
* @param msg Exception message (optional). If provided exception message
* and this message should be equal.
* @return Thrown throwable.
*/
@Nullable public static Throwable assertThrowsInherited(@Nullable IgniteLogger log, Callable<?> call,
Class<? extends Throwable> cls, @Nullable String msg) {
assert call != null;
assert cls != null;
try {
call.call();
}
catch (Throwable e) {
if (!cls.isAssignableFrom(e.getClass()))
fail("Exception class is not as expected [expected=" + cls + ", actual=" + e.getClass() + ']', e);
if (msg != null && (e.getMessage() == null || !e.getMessage().startsWith(msg)))
fail("Exception message is not as expected [expected=" + msg + ", actual=" + e.getMessage() + ']', e);
if (log != null) {
if (log.isDebugEnabled())
log.debug("Caught expected exception: " + e.getMessage());
}
else
X.println("Caught expected exception: " + e.getMessage());
return e;
}
throw new AssertionError("Exception has not been thrown.");
}
/**
* Checks whether callable throws exception, which is itself of a specified
* class, or has a cause of the specified class.
*
* @param runnable Runnable.
* @param cls Expected class.
* @return Thrown throwable.
*/
@Nullable public static Throwable assertThrowsWithCause(Runnable runnable, Class<? extends Throwable> cls) {
return assertThrowsWithCause(new Callable<Integer>() {
@Override public Integer call() throws Exception {
runnable.run();
return 0;
}
}, cls);
}
/**
* Checks whether callable throws exception, which is itself of a specified
* class, or has a cause of the specified class.
*
* @param call Callable.
* @param cls Expected class.
* @return Thrown throwable.
*/
@Nullable public static Throwable assertThrowsWithCause(Callable<?> call, Class<? extends Throwable> cls) {
assert call != null;
assert cls != null;
try {
call.call();
}
catch (Throwable e) {
if (!X.hasCause(e, cls))
fail("Exception is neither of a specified class, nor has a cause of the specified class: " + cls, e);
return e;
}
throw new AssertionError("Exception has not been thrown.");
}
/**
* Checks whether closure throws exception, which is itself of a specified
* class, or has a cause of the specified class.
*
* @param call Closure.
* @param p Parameter passed to closure.
* @param cls Expected class.
* @return Thrown throwable.
*/
public static <P> Throwable assertThrowsWithCause(IgniteInClosure<P> call, P p, Class<? extends Throwable> cls) {
assert call != null;
assert cls != null;
try {
call.apply(p);
}
catch (Throwable e) {
if (!X.hasCause(e, cls))
fail("Exception is neither of a specified class, nor has a cause of the specified class: " + cls, e);
return e;
}
throw new AssertionError("Exception has not been thrown.");
}
/**
* Asserts that the specified runnable completes within the specified timeout.
*
* @param msg Assertion message in case of timeout.
* @param timeout Timeout.
* @param timeUnit Timeout {@link TimeUnit}.
* @param runnable {@link Runnable} to check.
* @throws Exception In case of any exception distinct from {@link TimeoutException}.
*/
public static void assertTimeout(String msg, long timeout, TimeUnit timeUnit, Runnable runnable) throws Exception {
ExecutorService executorSvc = Executors.newSingleThreadExecutor();
Future<?> fut = executorSvc.submit(runnable);
try {
fut.get(timeout, timeUnit);
}
catch (TimeoutException ignored) {
fail(msg, null);
}
finally {
executorSvc.shutdownNow();
}
}
/**
* Asserts that the specified runnable completes within the specified timeout.
*
* @param timeout Timeout.
* @param timeUnit Timeout {@link TimeUnit}.
* @param runnable {@link Runnable} to check.
* @throws Exception In case of any exception distinct from {@link TimeoutException}.
*/
public static void assertTimeout(long timeout, TimeUnit timeUnit, Runnable runnable) throws Exception {
assertTimeout("Timeout occurred.", timeout, timeUnit, runnable);
}
/**
* Throw assertion error with specified error message and initialized cause.
*
* @param msg Error message.
* @param cause Error cause.
* @return Assertion error.
*/
private static AssertionError fail(String msg, @Nullable Throwable cause) {
AssertionError e = new AssertionError(msg);
if (cause != null)
e.initCause(cause);
throw e;
}
/**
* Checks whether object's method call throws expected exception or not.
*
* @param log Logger (optional).
* @param cls Exception class.
* @param msg Exception message (optional). If provided exception message
* and this message should be equal.
* @param obj Object to invoke method for.
* @param mtd Object's method to invoke.
* @param params Method parameters.
* @return Thrown throwable.
*/
@Nullable public static Throwable assertThrows(@Nullable IgniteLogger log, Class<? extends Throwable> cls,
@Nullable String msg, final Object obj, final String mtd, final Object... params) {
return assertThrows(log, new Callable() {
@Override public Object call() throws Exception {
return invoke(obj, mtd, params);
}
}, cls, msg);
}
/**
* Asserts that each element in iterable has one-to-one correspondence with a
* predicate from list.
*
* @param it Input iterable of elements.
* @param ps Array of predicates (by number of elements in iterable).
*/
public static <T> void assertOneToOne(Iterable<T> it, IgnitePredicate<T>... ps) {
Collection<IgnitePredicate<T>> ps0 = new ArrayList<>(Arrays.asList(ps));
Collection<T2<IgnitePredicate<T>, T>> passed = new ArrayList<>();
for (T elem : it) {
for (T2<IgnitePredicate<T>, T> p : passed) {
if (p.get1().apply(elem))
throw new AssertionError("Two elements match one predicate [elem1=" + p.get2() +
", elem2=" + elem + ", pred=" + p.get1() + ']');
}
IgnitePredicate<T> matched = null;
for (IgnitePredicate<T> p : ps0) {
if (p.apply(elem)) {
if (matched != null)
throw new AssertionError("Element matches more than one predicate [elem=" + elem +
", pred1=" + p + ", pred2=" + matched + ']');
matched = p;
}
}
if (matched == null) // None matched.
throw new AssertionError("The element does not match [elem=" + elem +
", numRemainingPreds=" + ps0.size() + ']');
ps0.remove(matched);
passed.add(new T2<>(matched, elem));
}
}
/**
* Every invocation of this method will never return a
* repeating multicast port for a different test case.
*
* @param cls Class.
* @return Next multicast port.
*/
public static synchronized int getNextMulticastPort(Class<? extends GridAbstractTest> cls) {
Integer portRet = mcastPorts.get(cls);
if (portRet != null)
return portRet;
int startPort = mcastPort;
while (true) {
if (mcastPort >= max_mcast_port)
mcastPort = default_mcast_port;
else
mcastPort++;
if (startPort == mcastPort)
break;
portRet = mcastPort;
MulticastSocket sock = null;
try {
sock = new MulticastSocket(portRet);
break;
}
catch (IOException ignored) {
// No-op.
}
finally {
U.closeQuiet(sock);
}
}
// Cache port to be reused by the same test.
mcastPorts.put(cls, portRet);
return portRet;
}
/**
* Every invocation of this method will never return a
* repeating communication port for a different test case.
*
* @param cls Class.
* @return Next communication port.
*/
public static synchronized int getNextCommPort(Class<? extends GridAbstractTest> cls) {
Integer portRet = commPorts.get(cls);
if (portRet != null)
return portRet;
if (commPort >= max_comm_port)
commPort = default_comm_port;
else
// Reserve 10 ports per test.
commPort += 10;
portRet = commPort;
// Cache port to be reused by the same test.
commPorts.put(cls, portRet);
return portRet;
}
/**
* Every invocation of this method will never return a
* repeating discovery port for a different test case.
*
* @param cls Class.
* @return Next discovery port.
*/
public static synchronized int getNextDiscoPort(Class<? extends GridAbstractTest> cls) {
Integer portRet = discoPorts.get(cls);
if (portRet != null)
return portRet;
if (discoPort >= max_disco_port)
discoPort = default_disco_port;
else
discoPort += 10;
portRet = discoPort;
// Cache port to be reused by the same test.
discoPorts.put(cls, portRet);
return portRet;
}
/**
* @return Free communication port number on localhost.
* @throws IOException If unable to find a free port.
*/
public static int getFreeCommPort() throws IOException {
for (int port = default_comm_port; port < max_comm_port; port++) {
try (ServerSocket sock = new ServerSocket(port)) {
return sock.getLocalPort();
}
catch (IOException ignored) {
// No-op.
}
}
throw new IOException("Unable to find a free communication port.");
}
/**
* Every invocation of this method will never return a
* repeating multicast group for a different test case.
*
* @param cls Class.
* @return Next multicast group.
*/
public static synchronized String getNextMulticastGroup(Class<?> cls) {
String addrStr = addrs.get(cls);
if (addrStr != null)
return addrStr;
// Increment address.
if (addr[3] == 255) {
if (addr[2] == 255)
assert false;
else {
addr[2] += 1;
addr[3] = 1;
}
}
else
addr[3] += 1;
// Convert address to string.
StringBuilder b = new StringBuilder(15);
for (int i = 0; i < addr.length; i++) {
b.append(addr[i]);
if (i < addr.length - 1)
b.append('.');
}
addrStr = b.toString();
// Cache address to be reused by the same test.
addrs.put(cls, addrStr);
return addrStr;
}
/**
* Runs runnable object in specified number of threads.
*
* @param run Target runnable.
* @param threadNum Number of threads.
* @param threadName Thread name.
* @return Execution time in milliseconds.
* @throws Exception Thrown if at least one runnable execution failed.
*/
public static long runMultiThreaded(Runnable run, int threadNum, String threadName) throws Exception {
return runMultiThreaded(makeCallable(run, null), threadNum, threadName);
}
/**
* Runs runnable object in specified number of threads.
*
* @param run Target runnable.
* @param threadNum Number of threads.
* @param threadName Thread name.
* @return Future for the run. Future returns execution time in milliseconds.
*/
public static IgniteInternalFuture<Long> runMultiThreadedAsync(Runnable run, int threadNum, String threadName) {
return runMultiThreadedAsync(makeCallable(run, null), threadNum, threadName);
}
/**
* Runs callable object in specified number of threads.
*
* @param call Callable.
* @param threadNum Number of threads.
* @param threadName Thread names.
* @return Execution time in milliseconds.
* @throws Exception If failed.
*/
public static long runMultiThreaded(Callable<?> call, int threadNum, String threadName) throws Exception {
List<Callable<?>> calls = Collections.<Callable<?>>nCopies(threadNum, call);
return runMultiThreaded(calls, threadName);
}
/**
* @param call Closure that receives thread index.
* @param threadNum Number of threads.
* @param threadName Thread names.
* @return Execution time in milliseconds.
* @throws Exception If failed.
*/
public static long runMultiThreaded(final IgniteInClosure<Integer> call, int threadNum, String threadName)
throws Exception {
List<Callable<?>> calls = new ArrayList<>(threadNum);
for (int i = 0; i < threadNum; i++) {
final int idx = i;
calls.add(new Callable<Void>() {
@Override public Void call() throws Exception {
call.apply(idx);
return null;
}
});
}
return runMultiThreaded(calls, threadName);
}
/**
* Runs callable object in specified number of threads.
*
* @param call Callable.
* @param threadNum Number of threads.
* @param threadName Thread names.
* @return Future for the run. Future returns execution time in milliseconds.
*/
public static IgniteInternalFuture<Long> runMultiThreadedAsync(Callable<?> call, int threadNum, final String threadName) {
final List<Callable<?>> calls = Collections.<Callable<?>>nCopies(threadNum, call);
final GridTestSafeThreadFactory threadFactory = new GridTestSafeThreadFactory(threadName);
IgniteInternalFuture<Long> runFut = runAsync(() -> runMultiThreaded(calls, threadFactory));
GridFutureAdapter<Long> resFut = new GridFutureAdapter<Long>() {
@Override public boolean cancel() throws IgniteCheckedException {
super.cancel();
if (isDone())
return false;
runFut.cancel();
threadFactory.interruptAllThreads();
return onCancelled();
}
};
runFut.listen(fut -> {
try {
resFut.onDone(fut.get());
}
catch (IgniteFutureCancelledCheckedException e) {
resFut.onCancelled();
}
catch (Throwable e) {
resFut.onDone(e);
}
});
return resFut;
}
/**
* Runs callable tasks each in separate threads.
*
* @param calls Callable tasks.
* @param threadName Thread name.
* @return Execution time in milliseconds.
* @throws Exception If failed.
*/
public static long runMultiThreaded(Iterable<Callable<?>> calls, String threadName) throws Exception {
return runMultiThreaded(calls, new GridTestSafeThreadFactory(threadName));
}
/**
* Runs callable tasks each in separate threads.
*
* @param calls Callable tasks.
* @param threadFactory Thread factory.
* @return Execution time in milliseconds.
* @throws Exception If failed.
*/
public static long runMultiThreaded(Iterable<Callable<?>> calls, GridTestSafeThreadFactory threadFactory)
throws Exception {
if (!busyLock.enterBusy())
throw new IllegalStateException("Failed to start new threads (test is being stopped).");
Collection<Thread> threads = new ArrayList<>();
long time;
try {
for (Callable<?> call : calls)
threads.add(threadFactory.newThread(call));
time = System.currentTimeMillis();
for (Thread t : threads)
t.start();
}
finally {
busyLock.leaveBusy();
}
// Wait threads finish their job.
try {
for (Thread t : threads)
t.join();
} catch (InterruptedException e) {
for (Thread t : threads)
t.interrupt();
throw e;
}
time = System.currentTimeMillis() - time;
// Validate errors happens
threadFactory.checkError();
return time;
}
/**
* Runs runnable task asyncronously.
*
* @param task Runnable.
* @return Future with task result.
*/
public static IgniteInternalFuture runAsync(final Runnable task) {
return runAsync(task,"async-runnable-runner");
}
/**
* Runs runnable task asyncronously.
*
* @param task Runnable.
* @return Future with task result.
*/
public static IgniteInternalFuture runAsync(final Runnable task, String threadName) {
return runAsync(() -> {
task.run();
return null;
}, threadName);
}
/**
* Runs callable task asyncronously.
*
* @param task Callable.
* @return Future with task result.
*/
public static <T> IgniteInternalFuture<T> runAsync(final Callable<T> task) {
return runAsync(task, "async-callable-runner");
}
/**
* Runs callable task asyncronously.
*
* @param task Callable.
* @param threadName Thread name.
* @return Future with task result.
*/
public static <T> IgniteInternalFuture<T> runAsync(final Callable<T> task, String threadName) {
if (!busyLock.enterBusy())
throw new IllegalStateException("Failed to start new threads (test is being stopped).");
try {
final GridTestSafeThreadFactory thrFactory = new GridTestSafeThreadFactory(threadName);
final GridFutureAdapter<T> fut = new GridFutureAdapter<T>() {
@Override public boolean cancel() throws IgniteCheckedException {
super.cancel();
if (isDone())
return false;
thrFactory.interruptAllThreads();
try {
get();
return false;
}
catch (IgniteFutureCancelledCheckedException e) {
return true;
}
catch (IgniteCheckedException e) {
return false;
}
}
};
thrFactory.newThread(() -> {
try {
// Execute task.
T res = task.call();
fut.onDone(res);
}
catch (InterruptedException e) {
fut.onCancelled();
}
catch (Throwable e) {
fut.onDone(e);
}
}).start();
return fut;
}
finally {
busyLock.leaveBusy();
}
}
/**
* Wait for all passed futures to complete even if they fail.
*
* @param futs Futures.
* @throws AssertionError Suppresses underlying exceptions if some futures failed.
*/
public static void waitForAllFutures(IgniteInternalFuture<?>... futs) {
AssertionError err = null;
for (IgniteInternalFuture<?> fut : futs) {
try {
fut.get();
}
catch (Throwable t) {
if (err == null)
err = new AssertionError("One or several futures threw the exception.");
err.addSuppressed(t);
}
}
if (err != null)
throw err;
}
/**
* Interrupts and waits for termination of all the threads started
* so far by current test.
*
* @param log Logger.
*/
public static void stopThreads(IgniteLogger log) {
busyLock.block();
try {
GridTestSafeThreadFactory.stopAllThreads(log);
}
finally {
busyLock.unblock();
}
}
/**
* @return Ignite home.
* @throws Exception If failed.
*/
@SuppressWarnings({"ProhibitedExceptionThrown"})
public static String getIgniteHome() throws Exception {
String ggHome = System.getProperty("IGNITE_HOME");
if (ggHome == null)
ggHome = System.getenv("IGNITE_HOME");
if (ggHome == null)
throw new Exception("IGNITE_HOME parameter must be set either as system or environment variable.");
File dir = new File(ggHome);
if (!dir.exists())
throw new Exception("Ignite home does not exist [ignite-home=" + dir.getAbsolutePath() + ']');
if (!dir.isDirectory())
throw new Exception("Ignite home is not a directory [ignite-home=" + dir.getAbsolutePath() + ']');
return ggHome;
}
/**
* @param <T> Type.
* @param cls Class.
* @param annCls Annotation class.
* @return Annotation.
*/
@Nullable public static <T extends Annotation> T getAnnotation(Class<?> cls, Class<T> annCls) {
for (Class<?> cls0 = cls; cls0 != null; cls0 = cls0.getSuperclass()) {
T ann = cls0.getAnnotation(annCls);
if (ann != null)
return ann;
}
return null;
}
/**
* Initializes address.
*/
static {
InetAddress locHost = null;
try {
locHost = U.getLocalHost();
}
catch (IOException e) {
assert false : "Unable to get local address. This leads to the same multicast addresses " +
"in the local network.";
}
if (locHost != null) {
int thirdByte = locHost.getAddress()[3];
if (thirdByte < 0)
thirdByte += 256;
// To get different addresses for different machines.
addr = new int[] {229, thirdByte, 1, 1};
}
else
addr = new int[] {229, 1, 1, 1};
}
/**
* @param path Path.
* @param startFilter Start filter.
* @param endFilter End filter.
* @return List of JARs that corresponds to the filters.
* @throws IOException If failed.
*/
private static Collection<String> getFiles(String path, @Nullable final String startFilter,
@Nullable final String endFilter) throws IOException {
Collection<String> res = new ArrayList<>();
File file = new File(path);
assert file.isDirectory();
File[] jars = file.listFiles(new FilenameFilter() {
/**
* @see FilenameFilter#accept(File, String)
*/
@SuppressWarnings({"UnnecessaryJavaDocLink"})
@Override public boolean accept(File dir, String name) {
// Exclude spring.jar because it tries to load META-INF/spring-handlers.xml from
// all available JARs and create instances of classes from there for example.
// Exclude logging as it is used by spring and casted to Log interface.
// Exclude log4j because of the design - 1 per VM.
if (name.startsWith("spring") || name.startsWith("log4j") ||
name.startsWith("commons-logging") || name.startsWith("junit") ||
name.startsWith("ignite-tests"))
return false;
boolean ret = true;
if (startFilter != null)
ret = name.startsWith(startFilter);
if (ret && endFilter != null)
ret = name.endsWith(endFilter);
return ret;
}
});
for (File jar : jars)
res.add(jar.getCanonicalPath());
return res;
}
/**
* Silent stop grid.
* Method doesn't throw any exception.
*
* @param ignite Grid to stop.
* @param log Logger.
*/
public static void close(Ignite ignite, IgniteLogger log) {
if (ignite != null)
try {
G.stop(ignite.name(), false);
}
catch (Throwable e) {
U.error(log, "Failed to stop grid: " + ignite.name(), e);
}
}
/**
* Silent stop grid.
* Method doesn't throw any exception.
*
* @param igniteInstanceName Ignite instance name.
* @param log Logger.
*/
public static void stopGrid(String igniteInstanceName, IgniteLogger log) {
try {
G.stop(igniteInstanceName, false);
}
catch (Throwable e) {
U.error(log, "Failed to stop grid: " + igniteInstanceName, e);
}
}
/**
* Gets file representing the path passed in. First the check is made if path is absolute.
* If not, then the check is made if path is relative to ${IGNITE_HOME}. If both checks fail,
* then {@code null} is returned, otherwise file representing path is returned.
* <p>
* See {@link #getIgniteHome()} for information on how {@code IGNITE_HOME} is retrieved.
*
* @param path Path to resolve.
* @return Resolved path, or {@code null} if file cannot be resolved.
* @see #getIgniteHome()
*/
@Nullable public static File resolveIgnitePath(String path) {
return resolvePath(null, path);
}
/**
* @param igniteHome Optional ignite home path.
* @param path Path to resolve.
* @return Resolved path, or {@code null} if file cannot be resolved.
*/
@Nullable private static File resolvePath(@Nullable String igniteHome, String path) {
File file = new File(path).getAbsoluteFile();
if (!file.exists()) {
String home = igniteHome != null ? igniteHome : U.getIgniteHome();
if (home == null)
return null;
file = new File(home, path);
return file.exists() ? file : null;
}
return file;
}
/**
* @param cache Cache.
* @return Cache context.
*/
public static <K, V> GridCacheContext<K, V> cacheContext(IgniteCache<K, V> cache) {
return ((IgniteKernal)cache.unwrap(Ignite.class)).<K, V>internalCache(cache.getName()).context();
}
/**
* @param cache Cache.
* @return Near cache.
*/
public static <K, V> GridNearCacheAdapter<K, V> near(IgniteCache<K, V> cache) {
return cacheContext(cache).near();
}
/**
* @param cache Cache.
* @return DHT cache.
*/
public static <K, V> GridDhtCacheAdapter<K, V> dht(IgniteCache<K, V> cache) {
return near(cache).dht();
}
/**
* @param cacheName Cache name.
* @param backups Number of backups.
* @param log Logger.
* @throws Exception If failed.
*/
@SuppressWarnings("BusyWait")
public static <K, V> void waitTopologyUpdate(@Nullable String cacheName, int backups, IgniteLogger log)
throws Exception {
for (Ignite g : Ignition.allGrids()) {
IgniteCache<K, V> cache = ((IgniteEx)g).cache(cacheName);
GridDhtPartitionTopology top = dht(cache).topology();
while (true) {
boolean wait = false;
for (int p = 0; p < g.affinity(cacheName).partitions(); p++) {
Collection<ClusterNode> nodes = top.nodes(p, AffinityTopologyVersion.NONE);
if (nodes.size() > backups + 1) {
LT.warn(log, "Partition map was not updated yet (will wait) [igniteInstanceName=" + g.name() +
", p=" + p + ", nodes=" + F.nodeIds(nodes) + ']');
wait = true;
break;
}
}
if (wait)
Thread.sleep(20);
else
break; // While.
}
}
}
/**
* Convert runnable tasks with callable.
*
* @param run Runnable task to convert into callable one.
* @param res Callable result.
* @param <T> The result type of method <tt>call</tt>, always {@code null}.
* @return Callable task around the specified runnable one.
*/
public static <T> Callable<T> makeCallable(final Runnable run, @Nullable final T res) {
return new Callable<T>() {
@Override public T call() throws Exception {
run.run();
return res;
}
};
}
/**
* Get object field value via reflection.
*
* @param obj Object or class to get field value from.
* @param cls Class.
* @param fieldName Field names to get value for.
* @param <T> Expected field class.
* @return Field value.
* @throws IgniteException In case of error.
*/
public static <T> T getFieldValue(Object obj, Class cls, String fieldName) throws IgniteException {
assert obj != null;
assert fieldName != null;
try {
return (T)findField(cls, obj, fieldName);
}
catch (NoSuchFieldException | IllegalAccessException e) {
throw new IgniteException("Failed to get object field [obj=" + obj +
", fieldName=" + fieldName + ']', e);
}
}
/**
* Get object field value via reflection.
*
* @param obj Object or class to get field value from.
* @param fieldNames Field names to get value for: obj->field1->field2->...->fieldN.
* @param <T> Expected field class.
* @return Field value.
* @throws IgniteException In case of error.
*/
public static <T> T getFieldValue(Object obj, String... fieldNames) throws IgniteException {
assert obj != null;
assert fieldNames != null;
assert fieldNames.length >= 1;
try {
for (String fieldName : fieldNames) {
Class<?> cls = obj instanceof Class ? (Class)obj : obj.getClass();
try {
obj = findField(cls, obj, fieldName);
}
catch (NoSuchFieldException e) {
// Resolve inner class, if not an inner field.
Class<?> innerCls = getInnerClass(cls, fieldName);
if (innerCls == null)
throw new IgniteException("Failed to get object field [obj=" + obj +
", fieldNames=" + Arrays.toString(fieldNames) + ']', e);
obj = innerCls;
}
}
return (T)obj;
}
catch (IllegalAccessException e) {
throw new IgniteException("Failed to get object field [obj=" + obj +
", fieldNames=" + Arrays.toString(fieldNames) + ']', e);
}
}
/**
* Get object field value via reflection(including superclass).
*
* @param obj Object or class to get field value from.
* @param fieldNames Field names to get value for: obj->field1->field2->...->fieldN.
* @param <T> Expected field class.
* @return Field value.
* @throws IgniteException In case of error.
*/
public static <T> T getFieldValueHierarchy(Object obj, String... fieldNames) throws IgniteException {
assert obj != null;
assert fieldNames != null;
assert fieldNames.length >= 1;
try {
for (String fieldName : fieldNames) {
Class<?> cls = obj instanceof Class ? (Class)obj : obj.getClass();
while (cls != null) {
try {
obj = findField(cls, obj, fieldName);
break;
}
catch (NoSuchFieldException e) {
cls = cls.getSuperclass();
}
}
}
return (T)obj;
}
catch (IllegalAccessException e) {
throw new IgniteException("Failed to get object field [obj=" + obj +
", fieldNames=" + Arrays.toString(fieldNames) + ']', e);
}
}
/**
* @param cls Class for searching.
* @param obj Target object.
* @param fieldName Field name for search.
* @return Field from object if it was found.
*/
private static Object findField(Class<?> cls, Object obj,
String fieldName) throws NoSuchFieldException, IllegalAccessException {
// Resolve inner field.
Field field = cls.getDeclaredField(fieldName);
boolean accessible = field.isAccessible();
if (!accessible)
field.setAccessible(true);
return field.get(obj);
}
/**
* Get inner class by its name from the enclosing class.
*
* @param parentCls Parent class to resolve inner class for.
* @param innerClsName Name of the inner class.
* @return Inner class.
*/
@Nullable public static <T> Class<T> getInnerClass(Class<?> parentCls, String innerClsName) {
for (Class<?> cls : parentCls.getDeclaredClasses())
if (innerClsName.equals(cls.getSimpleName()))
return (Class<T>)cls;
return null;
}
/**
* Set object field value via reflection.
*
* @param obj Object to set field value to.
* @param fieldName Field name to set value for.
* @param val New field value.
* @throws IgniteException In case of error.
*/
public static void setFieldValue(Object obj, String fieldName, Object val) throws IgniteException {
assert obj != null;
assert fieldName != null;
try {
Class<?> cls = obj instanceof Class ? (Class)obj : obj.getClass();
Field field = cls.getDeclaredField(fieldName);
boolean accessible = field.isAccessible();
if (!accessible)
field.setAccessible(true);
field.set(obj, val);
}
catch (NoSuchFieldException | IllegalAccessException e) {
throw new IgniteException("Failed to set object field [obj=" + obj + ", field=" + fieldName + ']', e);
}
}
/**
* Set object field value via reflection.
*
* @param obj Object to set field value to.
* @param cls Class to get field from.
* @param fieldName Field name to set value for.
* @param val New field value.
* @throws IgniteException In case of error.
*/
public static void setFieldValue(Object obj, Class cls, String fieldName, Object val) throws IgniteException {
assert fieldName != null;
try {
Field field = cls.getDeclaredField(fieldName);
boolean accessible = field.isAccessible();
if (!accessible)
field.setAccessible(true);
boolean isFinal = (field.getModifiers() & Modifier.FINAL) != 0;
if (isFinal) {
Field modifiersField = Field.class.getDeclaredField("modifiers");
modifiersField.setAccessible(true);
modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL);
}
field.set(obj, val);
}
catch (NoSuchFieldException | IllegalAccessException e) {
throw new IgniteException("Failed to set object field [obj=" + obj + ", field=" + fieldName + ']', e);
}
}
/**
* Invoke method on an object.
*
* @param obj Object to call method on.
* @param mtd Method to invoke.
* @param params Parameters of the method.
* @return Method invocation result.
* @throws Exception If failed.
*/
@Nullable public static <T> T invoke(Object obj, String mtd, Object... params) throws Exception {
Class<?> cls = obj.getClass();
do {
// We cannot resolve method by parameter classes due to some of parameters can be null.
// Search correct method among all methods collection.
for (Method m : cls.getDeclaredMethods()) {
// Filter methods by name.
if (!m.getName().equals(mtd))
continue;
if (!areCompatible(params, m.getParameterTypes()))
continue;
try {
boolean accessible = m.isAccessible();
if (!accessible)
m.setAccessible(true);
return (T)m.invoke(obj, params);
}
catch (IllegalAccessException e) {
throw new RuntimeException("Failed to access method" +
" [obj=" + obj + ", mtd=" + mtd + ", params=" + Arrays.toString(params) + ']', e);
}
catch (InvocationTargetException e) {
Throwable cause = e.getCause();
if (cause instanceof Error)
throw (Error) cause;
if (cause instanceof Exception)
throw (Exception) cause;
throw new RuntimeException("Failed to invoke method)" +
" [obj=" + obj + ", mtd=" + mtd + ", params=" + Arrays.toString(params) + ']', e);
}
}
cls = cls.getSuperclass();
} while (cls != Object.class);
throw new RuntimeException("Failed to find method" +
" [obj=" + obj + ", mtd=" + mtd + ", params=" + Arrays.toString(params) + ']');
}
/**
* Check objects and corresponding types are compatible.
*
* @param objs Objects array.
* @param types Classes array.
* @return Objects in array can be casted to corresponding types.
*/
private static boolean areCompatible(Object[] objs, Class[] types) {
if (objs.length != types.length)
return false;
for (int i = 0, size = objs.length; i < size; i++) {
Object o = objs[i];
if (o != null && !types[i].isInstance(o))
return false;
}
return true;
}
/**
* Tries few times to perform some assertion. In the worst case
* {@code assertion} closure will be executed {@code retries} + 1 times and
* thread will spend approximately {@code retries} * {@code retryInterval} sleeping.
*
* @param log Log.
* @param retries Number of retries.
* @param retryInterval Interval between retries in milliseconds.
* @param c Closure with assertion. All {@link AssertionError}s thrown
* from this closure will be ignored {@code retries} times.
* @throws org.apache.ignite.internal.IgniteInterruptedCheckedException If interrupted.
*/
@SuppressWarnings("ErrorNotRethrown")
public static void retryAssert(@Nullable IgniteLogger log, int retries, long retryInterval, GridAbsClosure c)
throws IgniteInterruptedCheckedException {
for (int i = 0; i < retries; i++) {
try {
c.apply();
return;
}
catch (AssertionError e) {
U.warn(log, "Check failed (will retry in " + retryInterval + "ms).", e);
U.sleep(retryInterval);
}
}
// Apply the last time without guarding try.
c.apply();
}
/**
* Reads entire file into byte array.
*
* @param file File to read.
* @return Content of file in byte array.
* @throws IOException If failed.
*/
public static byte[] readFile(File file) throws IOException {
assert file.exists();
assert file.length() < Integer.MAX_VALUE;
byte[] bytes = new byte[(int) file.length()];
try (FileInputStream fis = new FileInputStream(file)) {
int readBytesCnt = fis.read(bytes);
assert readBytesCnt == bytes.length;
}
return bytes;
}
/**
* Sleeps and increments an integer.
* <p>
* Allows for loops like the following:
* <pre>{@code
* for (int i = 0; i < 20 && !condition; i = sleepAndIncrement(200, i)) {
* ...
* }
* }</pre>
* for busy-waiting limited number of iterations.
*
* @param sleepDur Sleep duration in milliseconds.
* @param i Integer to increment.
* @return Incremented value.
* @throws org.apache.ignite.internal.IgniteInterruptedCheckedException If sleep was interrupted.
*/
public static int sleepAndIncrement(int sleepDur, int i) throws IgniteInterruptedCheckedException {
U.sleep(sleepDur);
return i + 1;
}
/**
* Waits for condition, polling in busy wait loop.
*
* @param cond Condition to wait for.
* @param timeout Max time to wait in milliseconds.
* @return {@code true} if condition was achieved, {@code false} otherwise.
* @throws org.apache.ignite.internal.IgniteInterruptedCheckedException If interrupted.
*/
public static boolean waitForCondition(GridAbsPredicate cond, long timeout) throws IgniteInterruptedCheckedException {
long curTime = U.currentTimeMillis();
long endTime = curTime + timeout;
if (endTime < 0)
endTime = Long.MAX_VALUE;
while (curTime < endTime) {
if (cond.apply())
return true;
U.sleep(DFLT_BUSYWAIT_SLEEP_INTERVAL);
curTime = U.currentTimeMillis();
}
return false;
}
/**
* Creates an SSL context from test key store with disabled trust manager.
*
* @return Initialized context.
* @throws GeneralSecurityException In case if context could not be initialized.
* @throws IOException If keystore cannot be accessed.
*/
public static SSLContext sslContext() throws GeneralSecurityException, IOException {
SSLContext ctx = SSLContext.getInstance("TLS");
char[] storePass = keyStorePassword().toCharArray();
KeyManagerFactory keyMgrFactory = KeyManagerFactory.getInstance("SunX509");
KeyStore keyStore = KeyStore.getInstance("JKS");
keyStore.load(new FileInputStream(U.resolveIgnitePath(GridTestProperties.getProperty("ssl.keystore.path"))),
storePass);
keyMgrFactory.init(keyStore, storePass);
ctx.init(keyMgrFactory.getKeyManagers(),
new TrustManager[]{GridSslBasicContextFactory.getDisabledTrustManager()}, null);
return ctx;
}
/**
* Creates test-purposed SSL context factory from test key store with disabled trust manager.
*
* @return SSL context factory used in test.
*/
public static GridSslContextFactory sslContextFactory() {
GridSslBasicContextFactory factory = new GridSslBasicContextFactory();
factory.setKeyStoreFilePath(
U.resolveIgnitePath(GridTestProperties.getProperty("ssl.keystore.path")).getAbsolutePath());
factory.setKeyStorePassword(keyStorePassword().toCharArray());
factory.setTrustManagers(GridSslBasicContextFactory.getDisabledTrustManager());
return factory;
}
/**
* Creates test-purposed SSL context factory from test key store with disabled trust manager.
*
* @return SSL context factory used in test.
*/
public static Factory<SSLContext> sslFactory() {
SslContextFactory factory = new SslContextFactory();
factory.setKeyStoreFilePath(
U.resolveIgnitePath(GridTestProperties.getProperty("ssl.keystore.path")).getAbsolutePath());
factory.setKeyStorePassword(keyStorePassword().toCharArray());
factory.setTrustManagers(SslContextFactory.getDisabledTrustManager());
return factory;
}
/**
* Creates test-purposed SSL context factory from specified key store and trust store.
*
* @param keyStore Key store name.
* @param trustStore Trust store name.
* @return SSL context factory used in test.
*/
public static Factory<SSLContext> sslTrustedFactory(String keyStore, String trustStore) {
SslContextFactory factory = new SslContextFactory();
factory.setKeyStoreFilePath(keyStorePath(keyStore));
factory.setKeyStorePassword(keyStorePassword().toCharArray());
factory.setTrustStoreFilePath(keyStorePath(trustStore));
factory.setTrustStorePassword(keyStorePassword().toCharArray());
return factory;
}
public static String keyStorePassword() {
return GridTestProperties.getProperty("ssl.keystore.password");
}
@NotNull public static String keyStorePath(String keyStore) {
return U.resolveIgnitePath(GridTestProperties.getProperty(
"ssl.keystore." + keyStore + ".path")).getAbsolutePath();
}
/**
* @param o1 Object 1.
* @param o2 Object 2.
* @return Equals or not.
*/
public static boolean deepEquals(@Nullable Object o1, @Nullable Object o2) {
if (o1 == o2)
return true;
else if (o1 == null || o2 == null)
return false;
else if (o1.getClass() != o2.getClass())
return false;
else {
Class<?> cls = o1.getClass();
assert o2.getClass() == cls;
for (Field f : cls.getDeclaredFields()) {
f.setAccessible(true);
Object v1;
Object v2;
try {
v1 = f.get(o1);
v2 = f.get(o2);
}
catch (IllegalAccessException e) {
throw new AssertionError(e);
}
if (!Objects.deepEquals(v1, v2))
return false;
}
return true;
}
}
/**
* Converts integer permission mode into set of {@link PosixFilePermission}.
*
* @param mode File mode.
* @return Set of {@link PosixFilePermission}.
*/
public static Set<PosixFilePermission> modeToPermissionSet(int mode) {
Set<PosixFilePermission> res = EnumSet.noneOf(PosixFilePermission.class);
if ((mode & 0400) > 0)
res.add(PosixFilePermission.OWNER_READ);
if ((mode & 0200) > 0)
res.add(PosixFilePermission.OWNER_WRITE);
if ((mode & 0100) > 0)
res.add(PosixFilePermission.OWNER_EXECUTE);
if ((mode & 040) > 0)
res.add(PosixFilePermission.GROUP_READ);
if ((mode & 020) > 0)
res.add(PosixFilePermission.GROUP_WRITE);
if ((mode & 010) > 0)
res.add(PosixFilePermission.GROUP_EXECUTE);
if ((mode & 04) > 0)
res.add(PosixFilePermission.OTHERS_READ);
if ((mode & 02) > 0)
res.add(PosixFilePermission.OTHERS_WRITE);
if ((mode & 01) > 0)
res.add(PosixFilePermission.OTHERS_EXECUTE);
return res;
}
/**
* @param name Name.
* @param run Run.
*/
public static void benchmark(@Nullable String name, @NotNull Runnable run) {
benchmark(name, 8000, 10000, run);
}
/**
* @param name Name.
* @param warmup Warmup.
* @param executionTime Time.
* @param run Run.
*/
public static void benchmark(@Nullable String name, long warmup, long executionTime, @NotNull Runnable run) {
final AtomicBoolean stop = new AtomicBoolean();
class Stopper extends TimerTask {
@Override public void run() {
stop.set(true);
}
}
new Timer(true).schedule(new Stopper(), warmup);
while (!stop.get())
run.run();
stop.set(false);
new Timer(true).schedule(new Stopper(), executionTime);
long startTime = System.currentTimeMillis();
int cnt = 0;
do {
run.run();
cnt++;
}
while (!stop.get());
double dur = (System.currentTimeMillis() - startTime) / 1000d;
System.out.printf("%s:\n operations:%d, duration=%fs, op/s=%d, latency=%fms\n", name, cnt, dur,
(long)(cnt / dur), dur / cnt);
}
/**
* Prompt to execute garbage collector.
* {@code System.gc();} is not guaranteed to garbage collection, this method try to fill memory to crowd out dead
* objects.
*/
public static void runGC() {
System.gc();
ReferenceQueue<byte[]> queue = new ReferenceQueue<>();
Collection<SoftReference<byte[]>> refs = new ArrayList<>();
while (true) {
byte[] bytes = new byte[128 * 1024];
refs.add(new SoftReference<>(bytes, queue));
if (queue.poll() != null)
break;
}
System.gc();
}
/**
* @return Path to apache ignite.
*/
public static String apacheIgniteTestPath() {
return System.getProperty("IGNITE_TEST_PATH", U.getIgniteHome() + "/target/ignite");
}
/**
* {@link Class#getSimpleName()} does not return outer class name prefix for inner classes, for example,
* getSimpleName() returns "RegularDiscovery" instead of "GridDiscoveryManagerSelfTest$RegularDiscovery"
* This method return correct simple name for inner classes.
*
* @param cls Class
* @return Simple name with outer class prefix.
*/
public static String fullSimpleName(@NotNull Class cls) {
if (cls.getEnclosingClass() != null)
return cls.getEnclosingClass().getSimpleName() + "." + cls.getSimpleName();
else
return cls.getSimpleName();
}
/**
* Adds test class to the list only if it's not in {@code ignoredTests} set.
*
* @param suite List where to place the test class.
* @param test Test.
* @param ignoredTests Tests to ignore. If test contained in the collection it is not included in suite
*/
public static void addTestIfNeeded(@NotNull final List<Class<?>> suite, @NotNull final Class<?> test,
@Nullable final Collection<Class> ignoredTests) {
if (ignoredTests != null && ignoredTests.contains(test))
return;
suite.add(test);
}
/**
* Generate random alphabetical string.
*
* @param rnd Random object.
* @param maxLen Maximal length of string
* @return Random string object.
*/
public static String randomString(Random rnd, int maxLen) {
int len = rnd.nextInt(maxLen);
StringBuilder b = new StringBuilder(len);
for (int i = 0; i < len; i++)
b.append(ALPHABETH.charAt(rnd.nextInt(ALPHABETH.length())));
return b.toString();
}
/**
* @param node Node.
* @param topVer Ready exchange version to wait for before trying to merge exchanges.
*/
public static void mergeExchangeWaitVersion(Ignite node, long topVer) {
((IgniteEx)node).context().cache().context().exchange().mergeExchangesTestWaitVersion(
new AffinityTopologyVersion(topVer, 0), null);
}
/**
* @param node Node.
* @param topVer Ready exchange version to wait for before trying to merge exchanges.
*/
public static void mergeExchangeWaitVersion(Ignite node, long topVer, List mergedEvts) {
((IgniteEx)node).context().cache().context().exchange().mergeExchangesTestWaitVersion(
new AffinityTopologyVersion(topVer, 0), mergedEvts);
}
/** Test parameters scale factor util. */
private static class ScaleFactorUtil {
/** Test speed scale factor property name. */
private static final String TEST_SCALE_FACTOR_PROPERTY = "TEST_SCALE_FACTOR";
/** Min test scale factor value. */
private static final double MIN_TEST_SCALE_FACTOR_VALUE = 0.1;
/** Max test scale factor value. */
private static final double MAX_TEST_SCALE_FACTOR_VALUE = 1.0;
/** Test speed scale factor. */
private static final double TEST_SCALE_FACTOR_VALUE = readScaleFactor();
/** */
private static double readScaleFactor() {
double scaleFactor = Double.parseDouble(System.getProperty(TEST_SCALE_FACTOR_PROPERTY, "1.0"));
scaleFactor = Math.max(scaleFactor, MIN_TEST_SCALE_FACTOR_VALUE);
scaleFactor = Math.min(scaleFactor, MAX_TEST_SCALE_FACTOR_VALUE);
return scaleFactor;
}
/** */
public static int apply(int val) {
return (int)Math.round(TEST_SCALE_FACTOR_VALUE * val);
}
/** */
public static int apply(int val, int lowerBound, int upperBound) {
return applyUB(applyLB(val, lowerBound), upperBound);
}
/** Apply scale factor with lower bound */
public static int applyLB(int val, int lowerBound) {
return Math.max(apply(val), lowerBound);
}
/** Apply scale factor with upper bound */
public static int applyUB(int val, int upperBound) {
return Math.min(apply(val), upperBound);
}
}
/**
* @param node Node to connect to.
* @param params Connection parameters.
* @return Thin JDBC connection to specified node.
*/
public static Connection connect(IgniteEx node, String params) throws SQLException {
Collection<GridPortRecord> recs = node.context().ports().records();
GridPortRecord cliLsnrRec = null;
for (GridPortRecord rec : recs) {
if (rec.clazz() == ClientListenerProcessor.class) {
cliLsnrRec = rec;
break;
}
}
assertNotNull(cliLsnrRec);
String connStr = "jdbc:ignite:thin://127.0.0.1:" + cliLsnrRec.port();
if (!F.isEmpty(params))
connStr += "/?" + params;
return DriverManager.getConnection(connStr);
}
/**
* Removes idle_verify log files created in tests.
*/
public static void cleanIdleVerifyLogFiles() {
File dir = new File(".");
for (File f : dir.listFiles(n -> n.getName().startsWith(IdleVerifyResultV2.IDLE_VERIFY_FILE_PREFIX)))
f.delete();
}
public static class SqlTestFunctions {
/** Sleep milliseconds. */
public static volatile long sleepMs;
/** Fail flag. */
public static volatile boolean fail;
/**
* Do sleep {@code sleepMs} milliseconds
*
* @return amount of milliseconds to sleep
*/
@QuerySqlFunction
@SuppressWarnings("BusyWait")
public static long sleep() {
long end = System.currentTimeMillis() + sleepMs;
long remainTime =sleepMs;
do {
try {
Thread.sleep(remainTime);
}
catch (InterruptedException ignored) {
// No-op
}
}
while ((remainTime = end - System.currentTimeMillis()) > 0);
return sleepMs;
}
/**
* Function do fail in case of {@code fail} is true, return 0 otherwise.
*
* @return in case of {@code fail} is false return 0, fail otherwise.
*/
@QuerySqlFunction
public static int can_fail() {
if (fail)
throw new IllegalArgumentException();
else
return 0;
}
/**
* Function do sleep {@code sleepMs} milliseconds and do fail in case of {@code fail} is true, return 0 otherwise.
*
* @return amount of milliseconds to sleep in case of {@code fail} is false, fail otherwise.
*/
@QuerySqlFunction
public static long sleep_and_can_fail() {
long sleep = sleep();
can_fail();
return sleep;
}
}
/**
* Runnable that can throw exceptions.
*/
@FunctionalInterface
public interface RunnableX extends Runnable {
/**
* Runnable body.
*
* @throws Exception If failed.
*/
void runx() throws Exception;
/** {@inheritdoc} */
@Override default void run() {
try {
runx();
}
catch (Exception e) {
throw new IgniteException(e);
}
}
}
/**
* IgniteRunnable that can throw exceptions.
*/
@FunctionalInterface
public interface IgniteRunnableX extends IgniteRunnable {
/**
* Runnable body.
*
* @throws Exception If failed.
*/
void runx() throws Exception;
/** {@inheritdoc} */
@Override default void run() {
try {
runx();
}
catch (Exception e) {
throw new IgniteException(e);
}
}
}
}
|
[
"\"IGNITE_HOME\""
] |
[] |
[
"IGNITE_HOME"
] |
[]
|
["IGNITE_HOME"]
|
java
| 1 | 0 | |
vendor/github.com/mitchellh/go-homedir/homedir.go
|
package homedir
import (
"bytes"
"errors"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
)
// DisableCache will disable caching of the home directory. Caching is enabled
// by default.
var DisableCache bool
var homedirCache string
var cacheLock sync.RWMutex
// Dir returns the home directory for the executing user.
//
// This uses an OS-specific method for discovering the home directory.
// An error is returned if a home directory cannot be detected.
func Dir() (string, error) {
if !DisableCache {
cacheLock.RLock()
cached := homedirCache
cacheLock.RUnlock()
if cached != "" {
return cached, nil
}
}
cacheLock.Lock()
defer cacheLock.Unlock()
var result string
var err error
if runtime.GOOS == "windows" {
result, err = dirWindows()
} else {
// Unix-like system, so just assume Unix
result, err = dirUnix()
}
if err != nil {
return "", err
}
homedirCache = result
return result, nil
}
// Expand expands the path to include the home directory if the path
// is prefixed with `~`. If it isn't prefixed with `~`, the path is
// returned as-is.
func Expand(path string) (string, error) {
if len(path) == 0 {
return path, nil
}
if path[0] != '~' {
return path, nil
}
if len(path) > 1 && path[1] != '/' && path[1] != '\\' {
return "", errors.New("cannot expand user-specific home dir")
}
dir, err := Dir()
if err != nil {
return "", err
}
return filepath.Join(dir, path[1:]), nil
}
func dirUnix() (string, error) {
// First prefer the HOME environmental variable
if home := os.Getenv("HOME"); home != "" {
return home, nil
}
// If that fails, try getent
var stdout bytes.Buffer
cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid()))
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
// If "getent" is missing, ignore it
if err == exec.ErrNotFound {
return "", err
}
} else {
if passwd := strings.TrimSpace(stdout.String()); passwd != "" {
// username:password:uid:gid:gecos:home:shell
passwdParts := strings.SplitN(passwd, ":", 7)
if len(passwdParts) > 5 {
return passwdParts[5], nil
}
}
}
// If all else fails, try the shell
stdout.Reset()
cmd = exec.Command("sh", "-c", "cd && pwd")
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
return "", err
}
result := strings.TrimSpace(stdout.String())
if result == "" {
return "", errors.New("blank output when reading home directory")
}
return result, nil
}
func dirWindows() (string, error) {
drive := os.Getenv("HOMEDRIVE")
path := os.Getenv("HOMEPATH")
home := drive + path
if drive == "" || path == "" {
home = os.Getenv("USERPROFILE")
}
if home == "" {
return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank")
}
return home, nil
}
|
[
"\"HOME\"",
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"USERPROFILE\""
] |
[] |
[
"USERPROFILE",
"HOME",
"HOMEPATH",
"HOMEDRIVE"
] |
[]
|
["USERPROFILE", "HOME", "HOMEPATH", "HOMEDRIVE"]
|
go
| 4 | 0 | |
V2RaycSpider0825/Panel/all_airPort.py
|
import csv
import easygui
import requests
import webbrowser
from bs4 import BeautifulSoup
from config import *
"""*************************菜单设置*************************"""
# 菜单设置
home_list = ['[1]白嫖机场', '[2]高端机场', '[3]机场汇总', '[4]返回', '[5]退出']
func_list = ['[1]查看', '[2]保存', '[3]返回']
"""*************************INIT*************************"""
# 初始化文档树
def INIT_docTree():
if not os.path.exists(SYS_LOCAL_fPATH):
os.mkdir(SYS_LOCAL_fPATH)
INIT_docTree()
"""###########################################################"""
# 保存数据至本地
def out_flow(dataFlow, reFP=''):
try:
with open(SYS_LOCAL_aPATH, 'w', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
for x in dataFlow:
writer.writerow(x)
except PermissionError:
easygui.exceptionbox('系统监测到您正在占用核心文件,请解除该文件的资源占用:{}'.format(SYS_LOCAL_aPATH))
# 通过前端panel展示数据
dataList = []
def show_response():
"""
:return:
"""
usr_c = easygui.choicebox(msg='选中即可跳转目标网址,部分机场需要代理才能访问', title=TITLE, choices=dataList)
if usr_c:
if 'http' in usr_c:
url = usr_c.split(' ')[-1][1:-1]
webbrowser.open(url)
else:
easygui.msgbox('机场网址失效或操作有误', title=TITLE, ok_button='返回')
show_response()
elif usr_c is None:
return 'present'
"""###########################################################"""
class sAirportSpider(object):
def __init__(self):
# 白嫖首页
self.airHome = 'https://52bp.org'
# 自启
# self.Home()
def Home(self):
"""GUI导航"""
usr_c = easygui.choicebox('功能列表', TITLE, home_list, preselect=0)
resp = True
try:
if '[1]' in usr_c:
resp = self.slaver(self.airHome + '/free-airport.html', )
elif '[2]' in usr_c:
resp = self.slaver(self.airHome + '/vip-airport.html', )
elif '[3]' in usr_c:
resp = self.slaver(self.airHome + '/airport.html', )
elif '[4]' in usr_c:
# __retrace__('返回')
return resp
else:
# __retrace__('退出')
resp = False
except TypeError:
return False
finally:
if resp == 'present':
return self.Home()
else:
return resp
@staticmethod
def slaver(url, ):
# 审查网络状况
def layer():
try:
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'}
res = requests.get(url, headers=headers)
res.raise_for_status()
return res.text
except Exception as e:
print(e)
return False
# 获取导航语
def h3Log(target):
own = target.find_all('span', class_='fake-TITLE')
souls = []
for soul in own:
try:
soul = soul.text.split('.')[-1].strip()
souls.append(soul)
except TypeError as e:
print(e)
return souls
# 清洗链接中的邀请码和注册码,返回纯净的链接
def href_cleaner(hrefTarget, ):
if isinstance(hrefTarget, list):
clean_href = []
for href in hrefs:
if '?' in href:
href = href.split('?')[0]
clean_href.append(href)
else:
clean_href.append(href)
return clean_href
elif isinstance(hrefTarget, str):
return hrefTarget.split('?')[0]
def show_data(show=True):
# 使用全局变量输出前端信息
global dataList
Out_flow = ['序号 机场名 官网链接']
if show:
dataList = Out_flow + ['【{}】 【{}】 【{}】'.format(i + 1, list(x)[0], list(x)[-1]) for i, x in
enumerate(zip(names, hrefs)) if 'http' in list(x)[-1]]
# 前端展示API
return show_response()
else:
return [['序号', '机场名', '官网连接'], ] + \
[[i + 1, list(x)[0], list(x)[-1]] for i, x in
enumerate(zip(names, hrefs)) if 'http' in list(x)[-1]]
# func_list = ['[1]查看', '[2]保存', '[3]返回']
usr_d = easygui.choicebox(title=TITLE, choices=func_list)
if '返回' in usr_d:
return 'present'
response = layer()
if response:
soup = BeautifulSoup(response, 'html.parser')
# 定位导航语
# barInfo = h3Log(soup)
# 定位项目
items = soup.find_all('li', class_='link-item')
# 机场名
names = [item.find('span', class_='sitename').text.strip() for item in items]
# 获取去除邀请码的机场链接
hrefs = [item.find('a')['href'] for item in items]
hrefs = href_cleaner(hrefs)
if '保存' in usr_d:
# 保存至本地
out_flow(show_data(show=False))
# 自动打开
os.startfile(SYS_LOCAL_aPATH)
elif '查看' in usr_d:
# 前端打印
return show_data()
"""###########################################################"""
if __name__ == '__main__':
sAirportSpider()
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
tools/aev-benchmark-size.py
|
import time
import torch
import torchani
import pynvml
import gc
import os
from ase.io import read
import argparse
summary = '\n'
runcounter = 0
N = 200
last_py_speed = None
def checkgpu(device=None):
i = device if device else torch.cuda.current_device()
t = torch.cuda.get_device_properties(i).total_memory
c = torch.cuda.memory_reserved(i)
name = torch.cuda.get_device_properties(i).name
print(' GPU Memory Cached (pytorch) : {:7.1f}MB / {:.1f}MB ({})'.format(c / 1024 / 1024, t / 1024 / 1024, name))
real_i = int(os.environ['CUDA_VISIBLE_DEVICES'][0]) if 'CUDA_VISIBLE_DEVICES' in os.environ else i
pynvml.nvmlInit()
h = pynvml.nvmlDeviceGetHandleByIndex(real_i)
info = pynvml.nvmlDeviceGetMemoryInfo(h)
name = pynvml.nvmlDeviceGetName(h)
print(' GPU Memory Used (nvidia-smi): {:7.1f}MB / {:.1f}MB ({})'.format(info.used / 1024 / 1024, info.total / 1024 / 1024, name.decode()))
return f'{(info.used / 1024 / 1024):.1f}MB'
def alert(text):
print('\033[91m{}\33[0m'.format(text)) # red
def info(text):
print('\033[32m{}\33[0m'.format(text)) # green
def format_time(t):
if t < 1:
t = f'{t * 1000:.1f} ms'
else:
t = f'{t:.3f} sec'
return t
def addSummaryLine(items=None, init=False):
if init:
addSummaryEmptyLine()
items = ['RUN', 'PDB', 'Size', 'forward', 'backward', 'Others', 'Total', f'Total({N})', 'Speedup', 'GPU']
global summary
summary += items[0].ljust(20) + items[1].ljust(13) + items[2].ljust(13) + items[3].ljust(13) + items[4].ljust(13) + items[5].ljust(13) + \
items[6].ljust(13) + items[7].ljust(13) + items[8].ljust(13) + items[9].ljust(13) + '\n'
def addSummaryEmptyLine():
global summary
summary += f"{'-'*20}".ljust(20) + f"{'-'*13}".ljust(13) + f"{'-'*13}".ljust(13) + f"{'-'*13}".ljust(13) + f"{'-'*13}".ljust(13) + f"{'-'*13}".ljust(13) + \
f"{'-'*13}".ljust(13) + f"{'-'*13}".ljust(13) + f"{'-'*13}".ljust(13) + f"{'-'*13}".ljust(13) + '\n'
def benchmark(speciesPositions, aev_comp, runbackward=False, mol_info=None, verbose=True):
global runcounter
global last_py_speed
runname = f"{'cu' if aev_comp.use_cuda_extension else 'py'} aev fd{'+bd' if runbackward else''}"
items = [f'{(runcounter+1):02} {runname}', f"{mol_info['name']}", f"{mol_info['atoms']}", '-', '-', '-', '-', '-', '-', '-']
forward_time = 0
force_time = 0
torch.cuda.empty_cache()
gc.collect()
torch.cuda.synchronize()
start = time.time()
aev = None
force = None
gpumem = None
for i in range(N):
species, coordinates = speciesPositions
coordinates = coordinates.requires_grad_(runbackward)
torch.cuda.synchronize()
forward_start = time.time()
try:
_, aev = aev_comp((species, coordinates))
except Exception as e:
alert(f" AEV faild: {str(e)[:50]}...")
addSummaryLine(items)
runcounter += 1
return None, None, None
torch.cuda.synchronize()
forward_time += time.time() - forward_start
if runbackward: # backward
force_start = time.time()
try:
force = -torch.autograd.grad(aev.sum(), coordinates, create_graph=True, retain_graph=True)[0]
except Exception as e:
alert(f" Force faild: {str(e)[:50]}...")
addSummaryLine(items)
runcounter += 1
return None, None, None
torch.cuda.synchronize()
force_time += time.time() - force_start
if i == 2 and verbose:
gpumem = checkgpu()
torch.cuda.synchronize()
total_time = (time.time() - start) / N
force_time = force_time / N
forward_time = forward_time / N
others_time = total_time - force_time - forward_time
if verbose:
if aev_comp.use_cuda_extension:
if last_py_speed is not None:
speed_up = last_py_speed / total_time
speed_up = f'{speed_up:.2f}'
else:
speed_up = '-'
last_py_speed = None
else:
last_py_speed = total_time
speed_up = '-'
if verbose:
print(f' Duration: {total_time * N:.2f} s')
print(f' Speed: {total_time*1000:.2f} ms/it')
if runcounter == 0:
addSummaryLine(init=True)
addSummaryEmptyLine()
if runcounter >= 0:
items = [f'{(runcounter+1):02} {runname}',
f"{mol_info['name']}",
f"{mol_info['atoms']}",
f'{format_time(forward_time)}',
f'{format_time(force_time)}',
f'{format_time(others_time)}',
f'{format_time(total_time)}',
f'{format_time(total_time * N)}',
f'{speed_up}',
f'{gpumem}']
addSummaryLine(items)
runcounter += 1
return aev, total_time, force
def check_speedup_error(aev, aev_ref, speed, speed_ref):
if (speed_ref is not None) and (speed is not None) and (aev is not None) and (aev_ref is not None):
speedUP = speed_ref / speed
if speedUP > 1:
info(f' Speed up: {speedUP:.2f} X\n')
else:
alert(f' Speed up (slower): {speedUP:.2f} X\n')
aev_error = torch.max(torch.abs(aev - aev_ref))
assert aev_error < 0.02, f' Error: {aev_error:.1e}\n'
def run(file, nnp_ref, nnp_cuaev, runbackward, maxatoms=10000):
filepath = os.path.join(path, f'../dataset/pdb/{file}')
mol = read(filepath)
species = torch.tensor([mol.get_atomic_numbers()], device=device)
positions = torch.tensor([mol.get_positions()], dtype=torch.float32, requires_grad=False, device=device)
spelist = list(torch.unique(species.flatten()).cpu().numpy())
species = species[:, :maxatoms]
positions = positions[:, :maxatoms, :]
speciesPositions = nnp_ref.species_converter((species, positions))
print(f'File: {file}, Molecule size: {species.shape[-1]}, Species: {spelist}\n')
if args.nsight:
torch.cuda.nvtx.range_push(file)
print('Original TorchANI:')
mol_info = {'name': file, 'atoms': species.shape[-1]}
aev_ref, delta_ref, force_ref = benchmark(speciesPositions, nnp_ref.aev_computer, runbackward, mol_info)
print()
print('CUaev:')
# warm up
_, _, _ = benchmark(speciesPositions, nnp_cuaev.aev_computer, runbackward, mol_info, verbose=False)
# run
aev, delta, force_cuaev = benchmark(speciesPositions, nnp_cuaev.aev_computer, runbackward, mol_info)
if args.nsight:
torch.cuda.nvtx.range_pop()
check_speedup_error(aev, aev_ref, delta, delta_ref)
print('-' * 70 + '\n')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--nsight',
action='store_true',
help='use nsight profile')
parser.add_argument('-b', '--backward',
action='store_true',
help='benchmark double backward')
parser.add_argument('-n', '--N',
help='Number of Repeat',
default=200, type=int)
parser.set_defaults(backward=0)
args = parser.parse_args()
path = os.path.dirname(os.path.realpath(__file__))
N = args.N
device = torch.device('cuda')
files = ['small.pdb', '1hz5.pdb', '6W8H.pdb']
# files = ['small.pdb']
nnp_ref = torchani.models.ANI2x(periodic_table_index=True, model_index=None).to(device)
nnp_cuaev = torchani.models.ANI2x(periodic_table_index=True, model_index=None).to(device)
nnp_cuaev.aev_computer.use_cuda_extension = True
if args.nsight:
N = 3
torch.cuda.profiler.start()
for file in files:
run(file, nnp_ref, nnp_cuaev, runbackward=False)
for maxatom in [6000, 10000]:
file = '1C17.pdb'
run(file, nnp_ref, nnp_cuaev, runbackward=False, maxatoms=maxatom)
addSummaryEmptyLine()
info('Add Backward\n')
for file in files:
run(file, nnp_ref, nnp_cuaev, runbackward=True)
for maxatom in [6000, 10000]:
file = '1C17.pdb'
run(file, nnp_ref, nnp_cuaev, runbackward=True, maxatoms=maxatom)
addSummaryEmptyLine()
print(summary)
if args.nsight:
torch.cuda.profiler.stop()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
Webots_Projects/controllers/calibration_recorder/calibration_recorder.py
|
"""Defines calibration_recorder controller
----------------------------------------------------------------------------------------------------------
This file is part of Sim-ATAV project and licensed under MIT license.
Copyright (c) 2018 Cumhur Erkan Tuncali, Georgios Fainekos, Danil Prokhorov, Hisahiro Ito, James Kapinski.
For questions please contact:
C. Erkan Tuncali (etuncali [at] asu.edu)
----------------------------------------------------------------------------------------------------------
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
import numpy as np
import cv2
FILE_PATH = os.path.dirname(os.path.realpath(__file__))
from Sim_ATAV.vehicle_control.base_controller.base_controller import BaseCarController
sys.path.append(FILE_PATH + "/../../../")
#LIBRARY_PATH = os.environ.get("WEBOTS_HOME") + "/projects/automobile/libraries/python"
try:
LIBRARY_PATH = os.environ.get("WEBOTS_HOME") + "/lib/python36"
except:
LIBRARY_PATH = 'C:/Program Files/Webots/lib/python36'
LIBRARY_PATH.replace('/', os.sep)
sys.path.append(LIBRARY_PATH)
# **********************************************************************************************
# This controller only takes a snapshot of the scene and saves to file for calibration purposes
# **********************************************************************************************
class CalibrationRecorder(BaseCarController):
"""CalibrationRecorder class is a car controller class for Webots.
This controller is used to record camera images for distance calculation calibration."""
def __init__(self, controller_parameters):
(car_model, calibration_id) = controller_parameters
BaseCarController.__init__(self, car_model)
self.camera_name = "camera"
self.camera = None
self.calibration_id = calibration_id
print("CalibrationRecorder Initialized: {}, id: {}".format(car_model, self.calibration_id))
def run(self):
"""Runs the Controller.
Only takes a snapshot of the scene and saves to file for calibration purposes.
"""
# Start camera and the car engine:
self.camera = self.getCamera(self.camera_name)
if self.camera is not None:
self.camera.enable(16)
self.start_car()
self.step()
image_array_rgb = self.camera.getImageArray()
image_array_rgb = np.array(image_array_rgb)
image_array_rgb = np.rot90(image_array_rgb, -1)
image_array_rgb = np.fliplr(image_array_rgb)
image_array_bgr = image_array_rgb[..., ::-1]
file_name = "..\\..\\calibration_images\\cal_" + \
self.car_model + "_" + self.calibration_id + ".png"
cv2.imwrite(file_name, image_array_bgr)
print('Camera image saved to {}'.format(file_name))
self.step()
def main():
"""For running the controller directly from Webots
without passing it as a parameter to vehicle_controller"""
controller = CalibrationRecorder(sys.argv)
controller.run()
if __name__ == "__main__":
main()
|
[] |
[] |
[
"WEBOTS_HOME"
] |
[]
|
["WEBOTS_HOME"]
|
python
| 1 | 0 | |
instauto/api/actions/request.py
|
import os
import requests
import time
import json
import urllib.parse
import logging
from typing import Dict, Callable, Union
from instauto.api.structs import DeviceProfile, IGProfile, State, Method
from instauto.api.constants import API_BASE_URL
from instauto.api.exceptions import WrongMethodException, IncorrectLoginDetails, InvalidUserId, BadResponse, AuthorizationError
logger = logging.getLogger(__name__)
logging.captureWarnings(True)
class RequestMixin:
ig_profile: IGProfile
device_profile: DeviceProfile
state: State
_user_agent: str
_encode_password: Callable
_session: requests.Session
_request_finished_callbacks: list
_handle_challenge: Callable
def _build_user_agent(self) -> str:
"""Builds a user agent, making use from all required values in `self.ig_profile`, `self.device_profile` and
`self.state`.
Returns
-------
s : str
The user agent
"""
s = f"Instagram {self.ig_profile.version} Android ({self.device_profile.android_sdk_version}/" \
f"{self.device_profile.android_release}; {self.device_profile.dpi}dpi;" \
f" {self.device_profile.resolution[0]}x{self.device_profile.resolution[1]}; " \
f"{self.device_profile.manufacturer}; {self.device_profile.device}; {self.device_profile.device};" \
f" {self.device_profile.chipset}; {self.state.app_locale}; {self.ig_profile.build_number})"
return s
def _build_default_headers(self) -> Dict[str, str]:
"""Builds a dictionary that contains all header values required for all other requests sent.
Returns
-------
d : dict:
Dictionary containing the mappings
"""
return {
'x-cm-bandwidth-kbps': '-1.000',
'x-cm-latency': '-1.000',
'x-ads-opt-out': str(int(self.state.ads_opt_out)),
'x-ig-app-locale': self.state.app_locale,
'x-ig-app-startup-country': self.state.startup_country,
'x-ig-device-locale': self.state.device_locale,
'x-ig-mapped-locale': self.state.device_locale,
'x-ig-connection-speed': self.state.connection_speed,
'x-ig-bandwidth-speed-kbps': self.state.bandwidth_speed_kbps,
'x-ig-bandwidth-totalbytes-b': self.state.bandwidth_totalbytes_b,
'x-ig-bandwidth-totaltime-ms': self.state.bandwidth_totaltime_ms,
'x-ig-www-claim': self.state.www_claim,
'x-ig-device-id': self.state.device_id,
'x-ig-android-id': self.state.android_id,
'x-ig-connection-type': self.state.connection_type,
'x-ig-capabilities': self.ig_profile.capabilities,
'x-ig-app-id': self.ig_profile.id,
'user-agent': self._user_agent,
'accept-language': self.state.accept_language,
'x-mid': self.state.mid,
'ig-u-rur': self.state.rur,
'accept-encoding': self.state.accept_encoding,
'x-fb-http-engine': self.ig_profile.http_engine,
'authorization': self.state.authorization,
'connection': 'close',
'x-pigeon-session-id': self.state.pigeon_session_id,
'x-pigeon-rawclienttime': str(round(time.time(), 3)),
'x-bloks-version-id': self.state.bloks_version_id,
'x-bloks-is-layout-rtl': self.state.bloks_is_layout_rtl,
'host': 'i.instagram.com'
}
def _update_state_from_headers(self, headers: Dict[str, str]) -> None:
"""Updates self.state with values received from ig-set-* headers.
In most cases, the assignments are redundant, because the previous and new value are the same, but we'd rather
be a little bit too cautious then too little. Sending back a wrong header value to Instagram would probably
result in undefined behaviour.
Parameters
----------
headers : Dict[str, str]
Mapping of header names to header values
"""
www_claim = headers.get('ig-set-www-claim')
if www_claim is not None: self.state.www_claim = www_claim
authorization = headers.get('ig-set-authorization')
if authorization is not None: self.state.authorization = authorization
user_id = headers.get('ig-set-ig-u-ds-user-id')
if user_id is not None: self.state.user_id = user_id
direct_region_hint = headers.get('ig-set-ig-u-direct_region_hint')
if direct_region_hint is not None: self.state.direct_region_hint = direct_region_hint
shbid = headers.get('ig-set-ig-u-shbid')
if shbid is not None: self.state.shbid = shbid
shbts = headers.get('ig-set-ig-u-shbts')
if shbts is not None: self.state.shbts = shbts
target = headers.get('ig-set-ig-u-target')
if target is not None: self.state.target = target
rur = headers.get('ig-set-ig-u-rur')
if rur is not None: self.state.rur = rur
mid = headers.get('ig-set-x-mid')
if mid is not None: self.state.mid = mid
public_api_key_id = headers.get('ig-set-password-encryption-key-id')
if public_api_key_id is not None: self.state.public_api_key_id = public_api_key_id
public_api_key = headers.get('ig-set-password-encryption-pub-key')
if public_api_key is not None: self.state.public_api_key = public_api_key; self._encode_password()
def _request(self, endpoint: str, method: Method, query: dict = None, data: Union[dict, bytes] = None, headers: Dict[str, str]
= None, default_headers: bool = None, signed: bool = None) -> requests.Response:
"""Creates and sends a request to the specified endpoint.
Parameters
----------
endpoint : str
The endpoint that the request should be send to. `endpoint` should start with a non '/' character.
`endpoint` should end with a '/'.
method : {Method.Post, Method.Get}
Specifies which method to use for sending the HTTP request.
query : dict, optional
A dictionary that contains all key-value pairs that should be added to the final url, as a query string.
data : dict, optional
A dictionary that contains all key-value pairs that should be send along with a post request.
headers : dict, optional
A dictionary that contains all key-value pairs of the headers that should be sent along with the HTTP
request. Header values from this argument take priority over the default headers. Default headers are
overwritten if header values co-exist in both dict's.
Returns
-------
resp : requests.Response
The response that is returned by Instagram API.
Other Parameters
-------
default_headers : bool, optional
Defaults to True. If set to True, the default headers, `from ApiClient._build_default_headers`,
will be included in the request. This argument is pretty much only used in the initial request,
where we don't want to send headers, which we shouldn't know about at that point.
Raises
-------
WrongMethodException
When the `data` argument is provided, but the `method` argument is set to `Method.GET`. POST data cannot
be send along with GET requests. It is most likely that it either was mistaken for the `query` argument,
or that the method should be set to POST.
"""
if query is None: query = {}
if data is None: data = {}
if default_headers is None: default_headers = True
if headers is None: headers = {}
if signed is None: signed = False
if endpoint.startswith('/'):
logger.warning("Are you sure that the endpoint starts with a slash?")
# This isn't the cleanest, but it works. This makes sure we can just pass in the full url for endpoints that
# do not start with /api/v1 (pretty much only for uploading pictures/videos), without adding an extra boolean
# or method to the function/class.
if 'https://' not in endpoint:
url = API_BASE_URL.format(endpoint)
else:
url = endpoint
if query:
url += f"?{urllib.parse.urlencode(query)}"
if default_headers:
h = self._build_default_headers()
h.update(headers)
headers = h
if method == Method.GET and data:
raise WrongMethodException("Conflicting information. ApiClient._request was called with the method set to"
"Method.GET, but was also provided the `data` argument. Data can only be used "
"with GET request, did you want to use Method.POST instead?")
if method == Method.POST:
headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
# pretty sure everything will works just fine if this is removed, but I haven't tasted it yet. TODO:
if signed:
as_json = json.dumps(data)
data = {
'ig_sig_key_version': self.ig_profile.signature_key_version,
'signed_body': f'SIGNATURE.{as_json}'
}
try:
if method == Method.POST:
resp = self._session.post(url, data, headers=headers)
elif method == Method.GET:
resp = self._session.get(url, headers=headers)
else:
raise ValueError("Request method should be POST or GET")
logger.debug(f"Sent request to {url}, method: {resp.request.method} with data: \n {data}")
except ValueError as e:
raise e
except Exception as e: # todo: narrow down
logger.exception(f"Exception while sending request to {url} with data: \n {data}")
raise e
logger.debug(
f'{"*" * 20} START REQUEST {"*" * 20}\n'
f'METHOD: {resp.request.method}\n'
f'URL: {url}\n'
f'DATA: {data}\n'
f'HEADERS: {headers}\n'
f'RESPONSE: {resp.content}\n'
f'{"*" * 20} END REQUEST {"*" * 20}'
)
self._check_response_for_errors(resp)
for func in self._request_finished_callbacks:
func(resp.headers)
return resp
def _check_response_for_errors(self, resp: requests.Response) -> None:
if resp.ok:
return
try:
parsed = resp.json()
except json.JSONDecodeError:
if resp.status_code == 404 and '/friendships/' in resp.url:
raise InvalidUserId(f"account id: {resp.url.split('/')[-2]} is not recognized by Instagram or you do not have a relation with this account.")
logger.exception(f"response received: \n{resp.text}\nurl: {resp.url}\nstatus code: {resp.status_code}")
raise BadResponse("Received a non-200 response from Instagram")
if parsed.get('error_type') == 'bad_password':
raise IncorrectLoginDetails("Instagram does not recognize the provided login details")
if parsed.get('message') in ("checkpoint_required", "challenge_required"):
if not hasattr(self, '_handle_challenge'):
raise BadResponse("Challenge required. ChallengeMixin is not mixed in.")
eh = self._handle_challenge(resp)
if eh:
return
if parsed.get('message') == 'feedback_required':
if os.environ.get("ENABLE_INSTAUTO_USAGE_METRICS", True):
# This logs which actions cause limitations on Instagram accounts.
# I use this data to focus my development on area's where it's most needed.
requests.post('https://instauto.rooy.dev/feedback_required', data={
'feedback_url': parsed.get('feedback_url'),
'category': parsed.get('category')
})
raise BadResponse("Something unexpected happened. Please check the IG app.")
if parsed.get('message') == 'rate_limit_error':
raise TimeoutError("Calm down. Please try again in a few minutes.")
if parsed.get('message') == 'Not authorized to view user':
raise AuthorizationError("This is a private user, which you do not follow.")
raise BadResponse("Received a non-200 response from Instagram")
|
[] |
[] |
[
"ENABLE_INSTAUTO_USAGE_METRICS"
] |
[]
|
["ENABLE_INSTAUTO_USAGE_METRICS"]
|
python
| 1 | 0 | |
topic_clustering.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import numpy as np
import pandas as pd
from sklearn.metrics import silhouette_score
import tensorflow.compat.v2 as tf
import tensorflow_hub as hub
import tensorflow_text # pylint: disable=unused-import
# This flag disables GPU usage. Comment to use GPU with tensorflow.
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
CLUSTER_LABELS_FILE = "cluster_labels.txt"
class TopicClustering(object):
"""Handles the clustering of reviews into topics."""
def __init__(self):
# Reduce verbosity of tensorflow
tf.get_logger().setLevel("ERROR")
default_folder = os.path.dirname(os.path.realpath(__file__))
self.cluster_labels_file_location = os.path.join(
default_folder, CLUSTER_LABELS_FILE)
self.model = hub.load(
"https://tfhub.dev/google/universal-sentence-encoder-multilingual/3")
self.candidate_cluster_names = []
if os.path.isfile(self.cluster_labels_file_location):
with open(self.cluster_labels_file_location, "r") as labels_file:
self.candidate_cluster_names = labels_file.read().splitlines()
logging.info("Found cluster labels file. %d labels loaded.",
len(self.candidate_cluster_names))
labels_file.close()
def recommend_topics(self, nouns):
"""Recommends a list of topics for a given set of nouns based on repetition.
Args:
nouns: a list with nouns for each review.
Returns:
The recommended list of topics.
"""
nouns = [s.replace("translated by google", " ") for s in nouns]
candidate_cluster_names = pd.Series(
" ".join(nouns).split()).value_counts()[0:150].index.to_list()
with open(self.cluster_labels_file_location, "w") as labels_file:
for label in candidate_cluster_names:
labels_file.write(label + "\n")
labels_file.close()
return candidate_cluster_names
def determine_topics(self, reviews):
"""Determines the topic for a given set of reviews.
Args:
reviews: the full set of reviews to classify. This is modified to add
a topic field with the calculated topic for each review.
Returns:
Nothing.
"""
nouns = [
self.extract_tokens(review["annotation"]["tokens"], "NOUN")
for review in reviews
]
if not self.candidate_cluster_names:
self.candidate_cluster_names = self.recommend_topics(nouns)
topics = self.modelling_pipeline(pd.DataFrame(nouns), [5, 10])
topics = topics.to_list()
for review in reviews:
review["topic"] = topics.pop(0)
return
def extract_tokens(self, token_syntax, tag):
"""Extracts specified token type for API request.
Args:
token_syntax: API request return from Language API
tag: type of token to return e.g. "NOUN" or "ADJ"
Returns:
string containing only words of specified syntax in API request
"""
return " ".join([
s["lemma"].lower()
for s in token_syntax
if s[u"partOfSpeech"][u"tag"] == tag
])
def modelling_pipeline(self, reviews, num_clusters_list, max_iterations=10):
"""Runs the clustering modelling pipeline with k-means.
Args:
reviews: pandas series of strings to assign to clusters
num_clusters_list: a list of the number of clusters to attempt. The
modelling pipeline will select the number with the best silhoutte
coefficient
max_iterations: the maximum number of iterations for k-means to perform
Returns:
numpy array containing the cluster names corresponding to reviews.
"""
if not isinstance(num_clusters_list, list):
raise ValueError("num_clusters_list is not a list")
vectors = self.model(reviews)
scores = [
self.generate_silhouette_score(vectors, k, max_iterations)
for k in num_clusters_list
]
scores = dict(zip(num_clusters_list, scores))
best_silhouette_score = max(scores, key=scores.get)
logging.info("Optimal clusters is {} with silhouette score {}".format(
best_silhouette_score, scores[best_silhouette_score]))
cluster_indices, cluster_centers = self.generate_clusters(
vectors, best_silhouette_score, max_iterations)
index = self.return_most_similar_index(
cluster_centers, self.model(self.candidate_cluster_names))
cluster_names = dict(
zip(
np.arange(len(cluster_centers)),
[self.candidate_cluster_names[i] for i in list(index)]))
return pd.Series(cluster_indices).map(cluster_names)
def generate_silhouette_score(self,
vectors,
num_clusters,
max_iterations=10,
seed=32):
"""Generates the silhouette score of the clustering model.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (a) and the mean nearest-cluster distance (b) for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Args:
vectors: Tensor containing the embeddings of the review
num_clusters: the number of clusters to use
max_iterations: the maximum number of iterations for k-means to perform
seed: seed
Returns:
silhouette score as a float
"""
cluster_indices, _ = self.generate_clusters(
vectors, num_clusters, max_iterations=max_iterations, seed=seed)
score = silhouette_score(vectors.numpy(), np.array(cluster_indices))
logging.info("{} clusters yields {} silhouette score".format(
num_clusters, score))
return score
def generate_clusters(self,
vectors,
num_clusters,
max_iterations=10,
seed=32):
"""Generates clusters using vectors using K-means on cosine distance.
Args:
vectors: Tensor containing the embeddings of the reviews
num_clusters: the number of clusters to use
max_iterations: the maximum number of iterations for k-means to perform
seed: seed
Returns:
df with named topics
"""
kmeans = tf.compat.v1.estimator.experimental.KMeans(
num_clusters=num_clusters,
use_mini_batch=False,
seed=seed,
distance_metric=tf.compat.v1.estimator.experimental.KMeans
.COSINE_DISTANCE)
def input_fn():
return tf.compat.v1.train.limit_epochs(
# first convert to numpy due to v1 & eager incompatability
tf.convert_to_tensor(vectors.numpy(), dtype=tf.float32),
num_epochs=1)
previous_centers = None
score = 0
for i in range(max_iterations):
kmeans.train(input_fn)
cluster_centers = kmeans.cluster_centers()
if previous_centers is not None:
previous_centers = cluster_centers
new_score = kmeans.score(input_fn) # sum of squared distances
# break if score improves by less than (arbitrary) 10%
logging.debug("Iteration %d - Sum of squared distances: %.0f", i,
new_score)
if np.divide(score, new_score) > 1.1 or score == 0:
score = new_score
else:
break
return list(kmeans.predict_cluster_index(input_fn)), cluster_centers
def return_most_similar_index(self, a, b, limit_cosine_similarity=0):
"""Returns the elements in b with the highest cosine similarity in a.
limit_cosine_similarity sets a lower bound limit on the cosine similarity
for an element to be returned (and returns -1 for these values).
Args:
a: Tensor of vectors
b: Tensor of vectors
limit_cosine_similarity: integer between 0 and 1
"""
similarity = tf.reduce_sum(a[:, tf.newaxis] * b, axis=-1)
similarity = tf.math.divide(
similarity,
tf.norm(a[:, tf.newaxis], axis=-1) * tf.norm(b, axis=-1))
indices = tf.math.argmax(similarity, axis=1).numpy()
if limit_cosine_similarity > 0:
max_cosine_similarity = tf.math.reduce_max(similarity, axis=1).numpy()
indices[max_cosine_similarity < limit_cosine_similarity] = -1
return indices
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
scripts/add_staffline_symbols.py
|
#!/usr/bin/env python
"""The script ``add_staffline_symbols.py`` takes as input a CVC-MUSCIMA
(page, writer) index and a corresponding CropObjectList file
and adds to the CropObjectList staffline and staff objects."""
from __future__ import print_function, unicode_literals
from __future__ import division
from builtins import zip
from builtins import range
import argparse
import logging
import os
import time
import numpy
from skimage.io import imread
from skimage.measure import label
from skimage.morphology import watershed
from skimage.filters import gaussian
import matplotlib.pyplot as plt
from muscima.dataset import CVC_MUSCIMA
from muscima.io import parse_cropobject_list, export_cropobject_list
from muscima.cropobject import CropObject
from muscima.utils import connected_components2bboxes, compute_connected_components
__version__ = "0.0.1"
__author__ = "Jan Hajic jr."
##############################################################################
STAFFLINE_CLSNAME = 'staff_line'
STAFFSPACE_CLSNAME = 'staff_space'
STAFF_CLSNAME = 'staff'
#
# def connected_components2bboxes(labels):
# """Returns a dictionary of bounding boxes (upper left c., lower right c.)
# for each label.
#
# >>> labels = [[0, 0, 1, 1], [2, 0, 0, 1], [2, 0, 0, 0], [0, 0, 3, 3]]
# >>> bboxes = connected_components2bboxes(labels)
# >>> bboxes[0]
# [0, 0, 4, 4]
# >>> bboxes[1]
# [0, 2, 2, 4]
# >>> bboxes[2]
# [1, 0, 3, 1]
# >>> bboxes[3]
# [3, 2, 4, 4]
#
#
# :param labels: The output of cv2.connectedComponents().
#
# :returns: A dict indexed by labels. The values are quadruplets
# (xmin, ymin, xmax, ymax) so that the component with the given label
# lies exactly within labels[xmin:xmax, ymin:ymax].
# """
# bboxes = {}
# for x, row in enumerate(labels):
# for y, l in enumerate(row):
# if l not in bboxes:
# bboxes[l] = [x, y, x+1, y+1]
# else:
# box = bboxes[l]
# if x < box[0]:
# box[0] = x
# elif x + 1 > box[2]:
# box[2] = x + 1
# if y < box[1]:
# box[1] = y
# elif y + 1 > box[3]:
# box[3] = y + 1
# return bboxes
#
#
# def compute_connected_components(image):
# labels = label(image, background=0)
# cc = int(labels.max())
# bboxes = connected_components2bboxes(labels)
# return cc, labels, bboxes
#
##############################################################################
def build_argument_parser():
parser = argparse.ArgumentParser(description=__doc__, add_help=True,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-i', '--staff_imfile', action='store',
help='The image file with the staff-only image.'
' If not given, will use -n, -w and -r'
' to load it from the CVC-MUSCIMA staff removal'
' ground truth.')
parser.add_argument('-n', '--number', action='store', type=int,
help='Number of the CVC-MUSCIMA page (1 - 20)')
parser.add_argument('-w', '--writer', action='store', type=int,
help='Writer of the CVC-MUSCIMA page (1 - 50)')
parser.add_argument('-r', '--root', action='store',
default=os.getenv('CVC_MUSCIMA_ROOT', None),
help='Path to CVC-MUSCIMA dataset root. By default, will attempt'
' to read the CVC_MUSCIMA_ROOT env var. If that does not'
' work, the script will fail.')
parser.add_argument('-a', '--annot', action='store', # required=True,
help='The annotation file for which the staffline and staff'
' CropObjects should be added. If not supplied, default'
' doc/collection names will be used and cropobjects will'
' be numberd from 0 in the output.')
parser.add_argument('-e', '--export', action='store',
help='A filename to which the output CropObjectList'
' should be saved. If not given, will print to'
' stdout.')
parser.add_argument('--stafflines_only', action='store_true',
help='If set, will only output stafflines, not other symbols.')
parser.add_argument('-v', '--verbose', action='store_true',
help='Turn on INFO messages.')
parser.add_argument('--debug', action='store_true',
help='Turn on DEBUG messages.')
return parser
def main(args):
logging.info('Starting main...')
_start_time = time.clock()
########################################################
# Load gt image.
logging.info('Loading staffline image.')
# - Initialize Dataset. This checks for the root.
if args.staff_imfile is None:
cvc_dataset = CVC_MUSCIMA(root=args.root)
args.staff_imfile = cvc_dataset.imfile(page=args.number,
writer=args.writer,
distortion='ideal',
mode='staff_only')
# - Load the image.
gt = (imread(args.staff_imfile, as_grey=True) * 255).astype('uint8')
# - Cast as binary mask.
gt[gt > 0] = 1
########################################################
# Locate stafflines in gt image.
logging.info('Getting staffline connected components.')
# - Get connected components in gt image.
cc, labels, bboxes = compute_connected_components(gt)
# - Use vertical dimension of CCs to determine which ones belong together
# to form stafflines. (Criterion: row overlap.)
n_rows, n_cols = gt.shape
intervals = [[] for _ in range(n_rows)] # For each row: which CCs have pxs on that row?
for label, (t, l, b, r) in list(bboxes.items()):
if label == 0:
continue
# Ignore very short staffline segments that can easily be artifacts
# and should not affect the vertical range of the staffline anyway.
if (r - l) < 8:
continue
for row in range(t, b):
intervals[row].append(label)
logging.info('Grouping staffline connected components into stafflines.')
staffline_components = [] # For each staffline, we collect the CCs that it is made of
_in_staffline = False
_current_staffline_components = []
for r_labels in intervals:
if not _in_staffline:
# Last row did not contain staffline components.
if len(r_labels) == 0:
# No staffline component on current row
continue
else:
_in_staffline = True
_current_staffline_components += r_labels
else:
# Last row contained staffline components.
if len(r_labels) == 0:
# Current staffline has no more rows.
staffline_components.append(set(_current_staffline_components))
_current_staffline_components = []
_in_staffline = False
continue
else:
# Current row contains staffline components: the current
# staffline continues.
_current_staffline_components += r_labels
logging.info('No. of stafflines, with component groups: {0}'
''.format(len(staffline_components)))
# Now: merge the staffline components into one bbox/mask.
logging.info('Merging staffline components into staffline bboxes and masks.')
staffline_bboxes = []
staffline_masks = []
for sc in sorted(staffline_components,
key=lambda c: min([bboxes[cc][0]
for cc in c])): # Sorted top-down
st, sl, sb, sr = n_rows, n_cols, 0, 0
for component in sc:
t, l, b, r = bboxes[component]
st, sl, sb, sr = min(t, st), min(l, sl), max(b, sb), max(r, sr)
_sm = gt[st:sb, sl:sr]
staffline_bboxes.append((st, sl, sb, sr))
staffline_masks.append(_sm)
# Check if n. of stafflines is divisible by 5
n_stafflines = len(staffline_bboxes)
logging.info('\tTotal stafflines: {0}'.format(n_stafflines))
if n_stafflines % 5 != 0:
import matplotlib.pyplot as plt
stafllines_mask_image = numpy.zeros(gt.shape)
for i, (_sb, _sm) in enumerate(zip(staffline_bboxes, staffline_masks)):
t, l, b, r = _sb
stafllines_mask_image[t:b, l:r] = min(255, (i * 333) % 255 + 40)
plt.imshow(stafllines_mask_image, cmap='jet', interpolation='nearest')
plt.show()
raise ValueError('No. of stafflines is not divisible by 5!')
logging.info('Creating staff bboxes and masks.')
# - Go top-down and group the stafflines by five to get staves.
# (The staffline bboxes are already sorted top-down.)
staff_bboxes = []
staff_masks = []
for i in range(n_stafflines // 5):
_sbb = staffline_bboxes[5*i:5*(i+1)]
_st = min([bb[0] for bb in _sbb])
_sl = min([bb[1] for bb in _sbb])
_sb = max([bb[2] for bb in _sbb])
_sr = max([bb[3] for bb in _sbb])
staff_bboxes.append((_st, _sl, _sb, _sr))
staff_masks.append(gt[_st:_sb, _sl:_sr])
logging.info('Total staffs: {0}'.format(len(staff_bboxes)))
##################################################################
# (Optionally fill in missing pixels, based on full image.)
logging.info('SKIP: fill in missing pixels based on full image.')
# - Load full image
# - Find gap regions
# - Obtain gap region masks from full image
# - Add gap region mask to staffline mask.
# Create the CropObjects for stafflines and staffs:
# - Load corresponding annotation, to which the stafflines and
# staves should be added. (This is needed to correctly set docname
# and objids.)
if not args.annot:
cropobjects = []
next_objid = 0
dataset_namespace = 'FCNOMR'
docname = os.path.splitext(os.path.basename(args.staff_imfile))[0]
else:
if not os.path.isfile(args.annot):
raise ValueError('Annotation file {0} does not exist!'.format(args.annot))
logging.info('Creating cropobjects...')
cropobjects = parse_cropobject_list(args.annot)
logging.info('Non-staffline cropobjects: {0}'.format(len(cropobjects)))
next_objid = max([c.objid for c in cropobjects]) + 1
dataset_namespace = cropobjects[0].dataset
docname = cropobjects[0].doc
# - Create the staffline CropObjects
staffline_cropobjects = []
for sl_bb, sl_m in zip(staffline_bboxes, staffline_masks):
uid = CropObject.build_uid(dataset_namespace, docname, next_objid)
t, l, b, r = sl_bb
c = CropObject(objid=next_objid,
clsname=STAFFLINE_CLSNAME,
top=t, left=l, height=b - t, width=r - l,
mask=sl_m,
uid=uid)
staffline_cropobjects.append(c)
next_objid += 1
if not args.stafflines_only:
# - Create the staff CropObjects
staff_cropobjects = []
for s_bb, s_m in zip(staff_bboxes, staff_masks):
uid = CropObject.build_uid(dataset_namespace, docname, next_objid)
t, l, b, r = s_bb
c = CropObject(objid=next_objid,
clsname=STAFF_CLSNAME,
top=t, left=l, height=b - t, width=r - l,
mask=s_m,
uid=uid)
staff_cropobjects.append(c)
next_objid += 1
# - Add the inlinks/outlinks
for i, sc in enumerate(staff_cropobjects):
sl_from = 5 * i
sl_to = 5 * (i + 1)
for sl in staffline_cropobjects[sl_from:sl_to]:
sl.inlinks.append(sc.objid)
sc.outlinks.append(sl.objid)
# Add the staffspaces.
staffspace_cropobjects = []
for i, staff in enumerate(staff_cropobjects):
current_stafflines = [sc for sc in staffline_cropobjects if sc.objid in staff.outlinks]
sorted_stafflines = sorted(current_stafflines, key=lambda x: x.top)
current_staffspace_cropobjects = []
# Percussion single-line staves do not have staffspaces.
if len(sorted_stafflines) == 1:
continue
# Internal staffspace
for s1, s2 in zip(sorted_stafflines[:-1], sorted_stafflines[1:]):
# s1 is the UPPER staffline, s2 is the LOWER staffline
# Left and right limits: to simplify things, we take the column
# *intersection* of (s1, s2). This gives the invariant that
# the staffspace is limited from top and bottom in each of its columns.
l = max(s1.left, s2.left)
r = min(s1.right, s2.right)
# Shift s1, s2 to the right by this much to have the cols. align
# All of these are non-negative.
dl1, dl2 = l - s1.left, l - s2.left
dr1, dr2 = s1.right - r, s2.right - r
# The stafflines are not necessarily straight,
# so top is given for the *topmost bottom edge* of the top staffline + 1
# First create mask
canvas = numpy.zeros((s2.bottom - s1.top, r - l), dtype='uint8')
# Paste masks into canvas.
# This assumes that the top of the bottom staffline is below
# the top of the top staffline... and that the bottom
# of the top staffline is above the bottom of the bottom
# staffline. This may not hold in very weird situations,
# but it's good for now.
logging.debug(s1.bounding_box, s1.mask.shape)
logging.debug(s2.bounding_box, s2.mask.shape)
logging.debug(canvas.shape)
logging.debug('l={0}, dl1={1}, dl2={2}, r={3}, dr1={4}, dr2={5}'
''.format(l, dl1, dl2, r, dr1, dr2))
#canvas[:s1.height, :] += s1.mask[:, dl1:s1.width-dr1]
#canvas[-s2.height:, :] += s2.mask[:, dl2:s2.width-dr2]
# We have to deal with staffline interruptions.
# One way to do this
# is watershed fill: put markers along the bottom and top
# edge, use mask * 10000 as elevation
s1_above, s1_below = staffline_surroundings_mask(s1)
s2_above, s2_below = staffline_surroundings_mask(s2)
# Get bounding boxes of the individual stafflines' masks
# that intersect with the staffspace bounding box, in terms
# of the staffline bounding box.
s1_t, s1_l, s1_b, s1_r = 0, dl1, \
s1.height, s1.width - dr1
s1_h, s1_w = s1_b - s1_t, s1_r - s1_l
s2_t, s2_l, s2_b, s2_r = canvas.shape[0] - s2.height, dl2, \
canvas.shape[0], s2.width - dr2
s2_h, s2_w = s2_b - s2_t, s2_r - s2_l
logging.debug(s1_t, s1_l, s1_b, s1_r, (s1_h, s1_w))
# We now take the intersection of s1_below and s2_above.
# If there is empty space in the middle, we fill it in.
staffspace_mask = numpy.ones(canvas.shape)
staffspace_mask[s1_t:s1_b, :] -= (1 - s1_below[:, dl1:s1.width-dr1])
staffspace_mask[s2_t:s2_b, :] -= (1 - s2_above[:, dl2:s2.width-dr2])
ss_top = s1.top
ss_bottom = s2.bottom
ss_left = l
ss_right = r
uid = CropObject.build_uid(dataset_namespace, docname, next_objid)
staffspace = CropObject(next_objid, STAFFSPACE_CLSNAME,
top=ss_top, left=ss_left,
height=ss_bottom - ss_top,
width=ss_right - ss_left,
mask=staffspace_mask,
uid=uid)
staffspace.inlinks.append(staff.objid)
staff.outlinks.append(staffspace.objid)
current_staffspace_cropobjects.append(staffspace)
next_objid += 1
# Add top and bottom staffspace.
# These outer staffspaces will have the width
# of their bottom neighbor, and height derived
# from its mask columns.
# This is quite approximate, but it should do.
# Upper staffspace
tsl = sorted_stafflines[0]
tsl_heights = tsl.mask.sum(axis=0)
tss = current_staffspace_cropobjects[0]
tss_heights = tss.mask.sum(axis=0)
uss_top = max(0, tss.top - max(tss_heights))
uss_left = tss.left
uss_width = tss.width
# We use 1.5, so that large noteheads
# do not "hang out" of the staffspace.
uss_height = int(tss.height / 1.2)
# Shift because of height downscaling:
uss_top += tss.height - uss_height
uss_mask = tss.mask[:uss_height, :] * 1
uid = CropObject.build_uid(dataset_namespace, docname, next_objid)
staffspace = CropObject(next_objid, STAFFSPACE_CLSNAME,
top=uss_top, left=uss_left,
height=uss_height,
width=uss_width,
mask=uss_mask,
uid=uid)
current_staffspace_cropobjects.append(staffspace)
staff.outlinks.append(staffspace.objid)
staffspace.inlinks.append(staff.objid)
next_objid += 1
# Lower staffspace
bss = current_staffspace_cropobjects[-1]
bss_heights = bss.mask.sum(axis=0)
bsl = sorted_stafflines[-1]
bsl_heights = bsl.mask.sum(axis=0)
lss_top = bss.bottom # + max(bsl_heights)
lss_left = bss.left
lss_width = bss.width
lss_height = int(bss.height / 1.2)
lss_mask = bss.mask[:lss_height, :] * 1
uid = CropObject.build_uid(dataset_namespace, docname, next_objid)
staffspace = CropObject(next_objid, STAFFSPACE_CLSNAME,
top=lss_top, left=lss_left,
height=lss_height,
width=lss_width,
mask=lss_mask,
uid=uid)
current_staffspace_cropobjects.append(staffspace)
staff.outlinks.append(staffspace.objid)
staffspace.inlinks.append(staff.objid)
next_objid += 1
# ################ End of dealing with upper/lower staffspace ######
# Add to current list
staffspace_cropobjects += current_staffspace_cropobjects
# - Join the lists together
cropobjects_with_staffs = cropobjects \
+ staffline_cropobjects \
+ staffspace_cropobjects \
+ staff_cropobjects
else:
cropobjects_with_staffs = cropobjects + staffline_cropobjects
logging.info('Exporting the new cropobject list: {0} objects'
''.format(len(cropobjects_with_staffs)))
# - Export the combined list.
cropobject_string = export_cropobject_list(cropobjects_with_staffs)
if args.export is not None:
with open(args.export, 'w') as hdl:
hdl.write(cropobject_string)
else:
print(cropobject_string)
_end_time = time.clock()
logging.info('add_staffline_symbols.py done in {0:.3f} s'
''.format(_end_time - _start_time))
def staffline_surroundings_mask(staffline_cropobject):
"""Find the parts of the staffline's bounding box which lie
above or below the actual staffline.
These areas will be very small for straight stafflines,
but might be considerable when staffline curvature grows.
"""
# We segment both masks into "above staffline" and "below staffline"
# areas.
elevation = staffline_cropobject.mask * 255
# Blur, to plug small holes somewhat:
elevation = gaussian(elevation, sigma=1.0)
# Prepare the segmentation markers: 1 is ABOVE, 2 is BELOW
markers = numpy.zeros(staffline_cropobject.mask.shape)
markers[0, :] = 1
markers[-1, :] = 2
markers[staffline_cropobject.mask != 0] = 0
seg = watershed(elevation, markers)
bmask = numpy.ones(seg.shape)
bmask[seg != 2] = 0
tmask = numpy.ones(seg.shape)
tmask[seg != 1] = 0
return bmask, tmask
if __name__ == '__main__':
parser = build_argument_parser()
args = parser.parse_args()
if args.verbose:
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
if args.debug:
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
main(args)
|
[] |
[] |
[
"CVC_MUSCIMA_ROOT"
] |
[]
|
["CVC_MUSCIMA_ROOT"]
|
python
| 1 | 0 | |
src/zhekudblog/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'zhekudblog.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
column/uint32_unsafe_test.go
|
//go:build (386 || amd64 || amd64p32 || arm || arm64 || mipsle || mips64le || mips64p32le || ppc64le || riscv || riscv64) && !purego
// +build 386 amd64 amd64p32 arm arm64 mipsle mips64le mips64p32le ppc64le riscv riscv64
// +build !purego
package column_test
import (
"context"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/vahid-sohrabloo/chconn"
"github.com/vahid-sohrabloo/chconn/column"
)
func TestUint32Unsafe(t *testing.T) {
t.Parallel()
connString := os.Getenv("CHX_TEST_TCP_CONN_STRING")
conn, err := chconn.Connect(context.Background(), connString)
require.NoError(t, err)
res, err := conn.Exec(context.Background(), `DROP TABLE IF EXISTS test_uint32_unsafe`)
require.NoError(t, err)
require.Nil(t, res)
res, err = conn.Exec(context.Background(), `CREATE TABLE test_uint32_unsafe (
uint32 UInt32
) Engine=Memory`)
require.NoError(t, err)
require.Nil(t, res)
col := column.NewUint32(false)
var colInsert []uint32
rows := 10
for i := 0; i < rows; i++ {
val := uint32(i * 4)
col.Append(val)
colInsert = append(colInsert, val)
}
err = conn.Insert(context.Background(), `INSERT INTO
test_uint32_unsafe (uint32)
VALUES`, col)
require.NoError(t, err)
// example get all
selectStmt, err := conn.Select(context.Background(), `SELECT
uint32
FROM test_uint32_unsafe`)
require.NoError(t, err)
require.True(t, conn.IsBusy())
colRead := column.NewUint32(false)
var colData []uint32
for selectStmt.Next() {
err = selectStmt.ReadColumns(colRead)
require.NoError(t, err)
colData = append(colData, colRead.GetAllUnsafe()...)
}
assert.Equal(t, colInsert, colData)
selectStmt.Close()
conn.Close()
}
|
[
"\"CHX_TEST_TCP_CONN_STRING\""
] |
[] |
[
"CHX_TEST_TCP_CONN_STRING"
] |
[]
|
["CHX_TEST_TCP_CONN_STRING"]
|
go
| 1 | 0 | |
api4/system_test.go
|
package api4
import (
"fmt"
"net/http"
"net/http/httptest"
"os"
"strings"
"testing"
"github.com/mattermost/mattermost-server/mlog"
"github.com/mattermost/mattermost-server/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestGetPing(t *testing.T) {
th := Setup().InitBasic()
defer th.TearDown()
Client := th.Client
goRoutineHealthThreshold := *th.App.Config().ServiceSettings.GoroutineHealthThreshold
defer func() {
th.App.UpdateConfig(func(cfg *model.Config) { *cfg.ServiceSettings.GoroutineHealthThreshold = goRoutineHealthThreshold })
}()
status, resp := Client.GetPing()
CheckNoError(t, resp)
if status != "OK" {
t.Fatal("should return OK")
}
th.App.UpdateConfig(func(cfg *model.Config) { *cfg.ServiceSettings.GoroutineHealthThreshold = 10 })
status, resp = th.SystemAdminClient.GetPing()
CheckInternalErrorStatus(t, resp)
if status != "unhealthy" {
t.Fatal("should return unhealthy")
}
}
func TestGetConfig(t *testing.T) {
th := Setup().InitBasic()
defer th.TearDown()
Client := th.Client
_, resp := Client.GetConfig()
CheckForbiddenStatus(t, resp)
cfg, resp := th.SystemAdminClient.GetConfig()
CheckNoError(t, resp)
require.NotEqual(t, "", cfg.TeamSettings.SiteName)
if *cfg.LdapSettings.BindPassword != model.FAKE_SETTING && len(*cfg.LdapSettings.BindPassword) != 0 {
t.Fatal("did not sanitize properly")
}
if *cfg.FileSettings.PublicLinkSalt != model.FAKE_SETTING {
t.Fatal("did not sanitize properly")
}
if cfg.FileSettings.AmazonS3SecretAccessKey != model.FAKE_SETTING && len(cfg.FileSettings.AmazonS3SecretAccessKey) != 0 {
t.Fatal("did not sanitize properly")
}
if cfg.EmailSettings.InviteSalt != model.FAKE_SETTING {
t.Fatal("did not sanitize properly")
}
if cfg.EmailSettings.SMTPPassword != model.FAKE_SETTING && len(cfg.EmailSettings.SMTPPassword) != 0 {
t.Fatal("did not sanitize properly")
}
if cfg.GitLabSettings.Secret != model.FAKE_SETTING && len(cfg.GitLabSettings.Secret) != 0 {
t.Fatal("did not sanitize properly")
}
if *cfg.SqlSettings.DataSource != model.FAKE_SETTING {
t.Fatal("did not sanitize properly")
}
if cfg.SqlSettings.AtRestEncryptKey != model.FAKE_SETTING {
t.Fatal("did not sanitize properly")
}
if !strings.Contains(strings.Join(cfg.SqlSettings.DataSourceReplicas, " "), model.FAKE_SETTING) && len(cfg.SqlSettings.DataSourceReplicas) != 0 {
t.Fatal("did not sanitize properly")
}
if !strings.Contains(strings.Join(cfg.SqlSettings.DataSourceSearchReplicas, " "), model.FAKE_SETTING) && len(cfg.SqlSettings.DataSourceSearchReplicas) != 0 {
t.Fatal("did not sanitize properly")
}
}
func TestReloadConfig(t *testing.T) {
th := Setup().InitBasic()
defer th.TearDown()
Client := th.Client
flag, resp := Client.ReloadConfig()
CheckForbiddenStatus(t, resp)
if flag {
t.Fatal("should not Reload the config due no permission.")
}
flag, resp = th.SystemAdminClient.ReloadConfig()
CheckNoError(t, resp)
if !flag {
t.Fatal("should Reload the config")
}
th.App.UpdateConfig(func(cfg *model.Config) { *cfg.TeamSettings.MaxUsersPerTeam = 50 })
th.App.UpdateConfig(func(cfg *model.Config) { *cfg.TeamSettings.EnableOpenServer = true })
}
func TestUpdateConfig(t *testing.T) {
th := Setup().InitBasic()
defer th.TearDown()
Client := th.Client
cfg, resp := th.SystemAdminClient.GetConfig()
CheckNoError(t, resp)
_, resp = Client.UpdateConfig(cfg)
CheckForbiddenStatus(t, resp)
SiteName := th.App.Config().TeamSettings.SiteName
cfg.TeamSettings.SiteName = "MyFancyName"
cfg, resp = th.SystemAdminClient.UpdateConfig(cfg)
CheckNoError(t, resp)
require.Equal(t, "MyFancyName", cfg.TeamSettings.SiteName, "It should update the SiteName")
//Revert the change
cfg.TeamSettings.SiteName = SiteName
cfg, resp = th.SystemAdminClient.UpdateConfig(cfg)
CheckNoError(t, resp)
require.Equal(t, SiteName, cfg.TeamSettings.SiteName, "It should update the SiteName")
t.Run("Should not be able to modify PluginSettings.EnableUploads", func(t *testing.T) {
oldEnableUploads := *th.App.GetConfig().PluginSettings.EnableUploads
*cfg.PluginSettings.EnableUploads = !oldEnableUploads
cfg, resp = th.SystemAdminClient.UpdateConfig(cfg)
CheckNoError(t, resp)
assert.Equal(t, oldEnableUploads, *cfg.PluginSettings.EnableUploads)
assert.Equal(t, oldEnableUploads, *th.App.GetConfig().PluginSettings.EnableUploads)
cfg.PluginSettings.EnableUploads = nil
cfg, resp = th.SystemAdminClient.UpdateConfig(cfg)
CheckNoError(t, resp)
assert.Equal(t, oldEnableUploads, *cfg.PluginSettings.EnableUploads)
assert.Equal(t, oldEnableUploads, *th.App.GetConfig().PluginSettings.EnableUploads)
})
}
func TestUpdateConfigMessageExportSpecialHandling(t *testing.T) {
th := Setup().InitBasic()
defer th.TearDown()
messageExportEnabled := *th.App.Config().MessageExportSettings.EnableExport
messageExportTimestamp := *th.App.Config().MessageExportSettings.ExportFromTimestamp
defer th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.MessageExportSettings.EnableExport = messageExportEnabled
*cfg.MessageExportSettings.ExportFromTimestamp = messageExportTimestamp
})
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.MessageExportSettings.EnableExport = false
*cfg.MessageExportSettings.ExportFromTimestamp = int64(0)
})
// Turn it on, timestamp should be updated.
cfg, resp := th.SystemAdminClient.GetConfig()
CheckNoError(t, resp)
*cfg.MessageExportSettings.EnableExport = true
cfg, resp = th.SystemAdminClient.UpdateConfig(cfg)
CheckNoError(t, resp)
assert.True(t, *th.App.Config().MessageExportSettings.EnableExport)
assert.NotEqual(t, int64(0), *th.App.Config().MessageExportSettings.ExportFromTimestamp)
// Turn it off, timestamp should be cleared.
cfg, resp = th.SystemAdminClient.GetConfig()
CheckNoError(t, resp)
*cfg.MessageExportSettings.EnableExport = false
cfg, resp = th.SystemAdminClient.UpdateConfig(cfg)
CheckNoError(t, resp)
assert.False(t, *th.App.Config().MessageExportSettings.EnableExport)
assert.Equal(t, int64(0), *th.App.Config().MessageExportSettings.ExportFromTimestamp)
// Set a value from the config file.
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.MessageExportSettings.EnableExport = false
*cfg.MessageExportSettings.ExportFromTimestamp = int64(12345)
})
// Turn it on, timestamp should *not* be updated.
cfg, resp = th.SystemAdminClient.GetConfig()
CheckNoError(t, resp)
*cfg.MessageExportSettings.EnableExport = true
cfg, resp = th.SystemAdminClient.UpdateConfig(cfg)
CheckNoError(t, resp)
assert.True(t, *th.App.Config().MessageExportSettings.EnableExport)
assert.Equal(t, int64(12345), *th.App.Config().MessageExportSettings.ExportFromTimestamp)
// Turn it off, timestamp should be cleared.
cfg, resp = th.SystemAdminClient.GetConfig()
CheckNoError(t, resp)
*cfg.MessageExportSettings.EnableExport = false
cfg, resp = th.SystemAdminClient.UpdateConfig(cfg)
CheckNoError(t, resp)
assert.False(t, *th.App.Config().MessageExportSettings.EnableExport)
assert.Equal(t, int64(0), *th.App.Config().MessageExportSettings.ExportFromTimestamp)
}
func TestGetEnvironmentConfig(t *testing.T) {
os.Setenv("MM_SERVICESETTINGS_SITEURL", "http://example.mattermost.com")
os.Setenv("MM_SERVICESETTINGS_ENABLECUSTOMEMOJI", "true")
defer os.Unsetenv("MM_SERVICESETTINGS_SITEURL")
th := Setup().InitBasic()
defer th.TearDown()
t.Run("as system admin", func(t *testing.T) {
SystemAdminClient := th.SystemAdminClient
envConfig, resp := SystemAdminClient.GetEnvironmentConfig()
CheckNoError(t, resp)
if serviceSettings, ok := envConfig["ServiceSettings"]; !ok {
t.Fatal("should've returned ServiceSettings")
} else if serviceSettingsAsMap, ok := serviceSettings.(map[string]interface{}); !ok {
t.Fatal("should've returned ServiceSettings as a map")
} else {
if siteURL, ok := serviceSettingsAsMap["SiteURL"]; !ok {
t.Fatal("should've returned ServiceSettings.SiteURL")
} else if siteURLAsBool, ok := siteURL.(bool); !ok {
t.Fatal("should've returned ServiceSettings.SiteURL as a boolean")
} else if !siteURLAsBool {
t.Fatal("should've returned ServiceSettings.SiteURL as true")
}
if enableCustomEmoji, ok := serviceSettingsAsMap["EnableCustomEmoji"]; !ok {
t.Fatal("should've returned ServiceSettings.EnableCustomEmoji")
} else if enableCustomEmojiAsBool, ok := enableCustomEmoji.(bool); !ok {
t.Fatal("should've returned ServiceSettings.EnableCustomEmoji as a boolean")
} else if !enableCustomEmojiAsBool {
t.Fatal("should've returned ServiceSettings.EnableCustomEmoji as true")
}
}
if _, ok := envConfig["TeamSettings"]; ok {
t.Fatal("should not have returned TeamSettings")
}
})
t.Run("as team admin", func(t *testing.T) {
TeamAdminClient := th.CreateClient()
th.LoginTeamAdminWithClient(TeamAdminClient)
_, resp := TeamAdminClient.GetEnvironmentConfig()
CheckForbiddenStatus(t, resp)
})
t.Run("as regular user", func(t *testing.T) {
Client := th.Client
_, resp := Client.GetEnvironmentConfig()
CheckForbiddenStatus(t, resp)
})
t.Run("as not-regular user", func(t *testing.T) {
Client := th.CreateClient()
_, resp := Client.GetEnvironmentConfig()
CheckUnauthorizedStatus(t, resp)
})
}
func TestGetOldClientConfig(t *testing.T) {
th := Setup().InitBasic()
defer th.TearDown()
testKey := "supersecretkey"
th.App.UpdateConfig(func(cfg *model.Config) { cfg.ServiceSettings.GoogleDeveloperKey = testKey })
t.Run("with session", func(t *testing.T) {
th.App.UpdateConfig(func(cfg *model.Config) {
cfg.ServiceSettings.GoogleDeveloperKey = testKey
})
Client := th.Client
config, resp := Client.GetOldClientConfig("")
CheckNoError(t, resp)
if len(config["Version"]) == 0 {
t.Fatal("config not returned correctly")
}
if config["GoogleDeveloperKey"] != testKey {
t.Fatal("config missing developer key")
}
})
t.Run("without session", func(t *testing.T) {
th.App.UpdateConfig(func(cfg *model.Config) {
cfg.ServiceSettings.GoogleDeveloperKey = testKey
})
Client := th.CreateClient()
config, resp := Client.GetOldClientConfig("")
CheckNoError(t, resp)
if len(config["Version"]) == 0 {
t.Fatal("config not returned correctly")
}
if _, ok := config["GoogleDeveloperKey"]; ok {
t.Fatal("config should be missing developer key")
}
})
t.Run("missing format", func(t *testing.T) {
Client := th.Client
if _, err := Client.DoApiGet("/config/client", ""); err == nil || err.StatusCode != http.StatusNotImplemented {
t.Fatal("should have errored with 501")
}
})
t.Run("invalid format", func(t *testing.T) {
Client := th.Client
if _, err := Client.DoApiGet("/config/client?format=junk", ""); err == nil || err.StatusCode != http.StatusBadRequest {
t.Fatal("should have errored with 400")
}
})
}
func TestGetOldClientLicense(t *testing.T) {
th := Setup().InitBasic()
defer th.TearDown()
Client := th.Client
license, resp := Client.GetOldClientLicense("")
CheckNoError(t, resp)
if len(license["IsLicensed"]) == 0 {
t.Fatal("license not returned correctly")
}
Client.Logout()
_, resp = Client.GetOldClientLicense("")
CheckNoError(t, resp)
if _, err := Client.DoApiGet("/license/client", ""); err == nil || err.StatusCode != http.StatusNotImplemented {
t.Fatal("should have errored with 501")
}
if _, err := Client.DoApiGet("/license/client?format=junk", ""); err == nil || err.StatusCode != http.StatusBadRequest {
t.Fatal("should have errored with 400")
}
license, resp = th.SystemAdminClient.GetOldClientLicense("")
CheckNoError(t, resp)
if len(license["IsLicensed"]) == 0 {
t.Fatal("license not returned correctly")
}
}
func TestGetAudits(t *testing.T) {
th := Setup().InitBasic()
defer th.TearDown()
Client := th.Client
audits, resp := th.SystemAdminClient.GetAudits(0, 100, "")
CheckNoError(t, resp)
if len(audits) == 0 {
t.Fatal("should not be empty")
}
audits, resp = th.SystemAdminClient.GetAudits(0, 1, "")
CheckNoError(t, resp)
if len(audits) != 1 {
t.Fatal("should only be 1")
}
audits, resp = th.SystemAdminClient.GetAudits(1, 1, "")
CheckNoError(t, resp)
if len(audits) != 1 {
t.Fatal("should only be 1")
}
_, resp = th.SystemAdminClient.GetAudits(-1, -1, "")
CheckNoError(t, resp)
_, resp = Client.GetAudits(0, 100, "")
CheckForbiddenStatus(t, resp)
Client.Logout()
_, resp = Client.GetAudits(0, 100, "")
CheckUnauthorizedStatus(t, resp)
}
func TestEmailTest(t *testing.T) {
th := Setup().InitBasic()
defer th.TearDown()
Client := th.Client
config := model.Config{
EmailSettings: model.EmailSettings{
SMTPServer: "",
SMTPPort: "",
},
}
_, resp := Client.TestEmail(&config)
CheckForbiddenStatus(t, resp)
_, resp = th.SystemAdminClient.TestEmail(&config)
CheckErrorMessage(t, resp, "api.admin.test_email.missing_server")
CheckBadRequestStatus(t, resp)
inbucket_host := os.Getenv("CI_INBUCKET_HOST")
if inbucket_host == "" {
inbucket_host = "dockerhost"
}
inbucket_port := os.Getenv("CI_INBUCKET_PORT")
if inbucket_port == "" {
inbucket_port = "9000"
}
config.EmailSettings.SMTPServer = inbucket_host
config.EmailSettings.SMTPPort = inbucket_port
_, resp = th.SystemAdminClient.TestEmail(&config)
CheckOKStatus(t, resp)
}
func TestDatabaseRecycle(t *testing.T) {
th := Setup().InitBasic()
defer th.TearDown()
Client := th.Client
_, resp := Client.DatabaseRecycle()
CheckForbiddenStatus(t, resp)
_, resp = th.SystemAdminClient.DatabaseRecycle()
CheckNoError(t, resp)
}
func TestInvalidateCaches(t *testing.T) {
th := Setup().InitBasic()
defer th.TearDown()
Client := th.Client
flag, resp := Client.InvalidateCaches()
CheckForbiddenStatus(t, resp)
if flag {
t.Fatal("should not clean the cache due no permission.")
}
flag, resp = th.SystemAdminClient.InvalidateCaches()
CheckNoError(t, resp)
if !flag {
t.Fatal("should clean the cache")
}
}
func TestGetLogs(t *testing.T) {
th := Setup().InitBasic()
defer th.TearDown()
Client := th.Client
for i := 0; i < 20; i++ {
mlog.Info(fmt.Sprint(i))
}
logs, resp := th.SystemAdminClient.GetLogs(0, 10)
CheckNoError(t, resp)
if len(logs) != 10 {
t.Log(len(logs))
t.Fatal("wrong length")
}
logs, resp = th.SystemAdminClient.GetLogs(1, 10)
CheckNoError(t, resp)
if len(logs) != 10 {
t.Log(len(logs))
t.Fatal("wrong length")
}
logs, resp = th.SystemAdminClient.GetLogs(-1, -1)
CheckNoError(t, resp)
if len(logs) == 0 {
t.Fatal("should not be empty")
}
_, resp = Client.GetLogs(0, 10)
CheckForbiddenStatus(t, resp)
Client.Logout()
_, resp = Client.GetLogs(0, 10)
CheckUnauthorizedStatus(t, resp)
}
func TestPostLog(t *testing.T) {
th := Setup().InitBasic()
defer th.TearDown()
Client := th.Client
enableDev := *th.App.Config().ServiceSettings.EnableDeveloper
defer func() {
*th.App.Config().ServiceSettings.EnableDeveloper = enableDev
}()
*th.App.Config().ServiceSettings.EnableDeveloper = true
message := make(map[string]string)
message["level"] = "ERROR"
message["message"] = "this is a test"
_, resp := Client.PostLog(message)
CheckNoError(t, resp)
Client.Logout()
_, resp = Client.PostLog(message)
CheckNoError(t, resp)
*th.App.Config().ServiceSettings.EnableDeveloper = false
_, resp = Client.PostLog(message)
CheckForbiddenStatus(t, resp)
logMessage, resp := th.SystemAdminClient.PostLog(message)
CheckNoError(t, resp)
if len(logMessage) == 0 {
t.Fatal("should return the log message")
}
}
func TestUploadLicenseFile(t *testing.T) {
th := Setup().InitBasic()
defer th.TearDown()
Client := th.Client
ok, resp := Client.UploadLicenseFile([]byte{})
CheckForbiddenStatus(t, resp)
if ok {
t.Fatal("should fail")
}
ok, resp = th.SystemAdminClient.UploadLicenseFile([]byte{})
CheckBadRequestStatus(t, resp)
if ok {
t.Fatal("should fail")
}
}
func TestRemoveLicenseFile(t *testing.T) {
th := Setup().InitBasic()
defer th.TearDown()
Client := th.Client
ok, resp := Client.RemoveLicenseFile()
CheckForbiddenStatus(t, resp)
if ok {
t.Fatal("should fail")
}
ok, resp = th.SystemAdminClient.RemoveLicenseFile()
CheckNoError(t, resp)
if !ok {
t.Fatal("should pass")
}
}
func TestGetAnalyticsOld(t *testing.T) {
th := Setup().InitBasic()
defer th.TearDown()
Client := th.Client
rows, resp := Client.GetAnalyticsOld("", "")
CheckForbiddenStatus(t, resp)
if rows != nil {
t.Fatal("should be nil")
}
rows, resp = th.SystemAdminClient.GetAnalyticsOld("", "")
CheckNoError(t, resp)
found := false
found2 := false
for _, row := range rows {
if row.Name == "unique_user_count" {
found = true
} else if row.Name == "inactive_user_count" {
found2 = true
assert.True(t, row.Value >= 0)
}
}
assert.True(t, found, "should return unique user count")
assert.True(t, found2, "should return inactive user count")
_, resp = th.SystemAdminClient.GetAnalyticsOld("post_counts_day", "")
CheckNoError(t, resp)
_, resp = th.SystemAdminClient.GetAnalyticsOld("user_counts_with_posts_day", "")
CheckNoError(t, resp)
_, resp = th.SystemAdminClient.GetAnalyticsOld("extra_counts", "")
CheckNoError(t, resp)
rows, resp = th.SystemAdminClient.GetAnalyticsOld("", th.BasicTeam.Id)
CheckNoError(t, resp)
for _, row := range rows {
if row.Name == "inactive_user_count" {
assert.Equal(t, float64(-1), row.Value, "inactive user count should be -1 when team specified")
}
}
rows2, resp2 := th.SystemAdminClient.GetAnalyticsOld("standard", "")
CheckNoError(t, resp2)
assert.Equal(t, "total_websocket_connections", rows2[5].Name)
assert.Equal(t, float64(0), rows2[5].Value)
WebSocketClient, err := th.CreateWebSocketClient()
if err != nil {
t.Fatal(err)
}
rows2, resp2 = th.SystemAdminClient.GetAnalyticsOld("standard", "")
CheckNoError(t, resp2)
assert.Equal(t, "total_websocket_connections", rows2[5].Name)
assert.Equal(t, float64(1), rows2[5].Value)
WebSocketClient.Close()
rows2, resp2 = th.SystemAdminClient.GetAnalyticsOld("standard", "")
CheckNoError(t, resp2)
assert.Equal(t, "total_websocket_connections", rows2[5].Name)
assert.Equal(t, float64(0), rows2[5].Value)
Client.Logout()
_, resp = Client.GetAnalyticsOld("", th.BasicTeam.Id)
CheckUnauthorizedStatus(t, resp)
}
func TestS3TestConnection(t *testing.T) {
th := Setup().InitBasic()
defer th.TearDown()
Client := th.Client
s3Host := os.Getenv("CI_MINIO_HOST")
if s3Host == "" {
s3Host = "dockerhost"
}
s3Port := os.Getenv("CI_MINIO_PORT")
if s3Port == "" {
s3Port = "9001"
}
s3Endpoint := fmt.Sprintf("%s:%s", s3Host, s3Port)
config := model.Config{
FileSettings: model.FileSettings{
DriverName: model.NewString(model.IMAGE_DRIVER_S3),
AmazonS3AccessKeyId: model.MINIO_ACCESS_KEY,
AmazonS3SecretAccessKey: model.MINIO_SECRET_KEY,
AmazonS3Bucket: "",
AmazonS3Endpoint: s3Endpoint,
AmazonS3SSL: model.NewBool(false),
},
}
_, resp := Client.TestS3Connection(&config)
CheckForbiddenStatus(t, resp)
_, resp = th.SystemAdminClient.TestS3Connection(&config)
CheckBadRequestStatus(t, resp)
if resp.Error.Message != "S3 Bucket is required" {
t.Fatal("should return error - missing s3 bucket")
}
// If this fails, check the test configuration to ensure minio is setup with the
// `mattermost-test` bucket defined by model.MINIO_BUCKET.
config.FileSettings.AmazonS3Bucket = model.MINIO_BUCKET
config.FileSettings.AmazonS3Region = "us-east-1"
_, resp = th.SystemAdminClient.TestS3Connection(&config)
CheckOKStatus(t, resp)
config.FileSettings.AmazonS3Region = ""
_, resp = th.SystemAdminClient.TestS3Connection(&config)
CheckOKStatus(t, resp)
config.FileSettings.AmazonS3Bucket = "Wrong_bucket"
_, resp = th.SystemAdminClient.TestS3Connection(&config)
CheckInternalErrorStatus(t, resp)
assert.Equal(t, "Unable to create bucket.", resp.Error.Message)
config.FileSettings.AmazonS3Bucket = "shouldcreatenewbucket"
_, resp = th.SystemAdminClient.TestS3Connection(&config)
CheckOKStatus(t, resp)
}
func TestSupportedTimezones(t *testing.T) {
th := Setup().InitBasic()
defer th.TearDown()
Client := th.Client
supportedTimezonesFromConfig := th.App.Timezones.GetSupported()
supportedTimezones, resp := Client.GetSupportedTimezone()
CheckNoError(t, resp)
assert.Equal(t, supportedTimezonesFromConfig, supportedTimezones)
}
func TestRedirectLocation(t *testing.T) {
expected := "https://mattermost.com/wp-content/themes/mattermostv2/img/logo-light.svg"
testServer := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {
res.Header().Set("Location", expected)
res.WriteHeader(http.StatusFound)
res.Write([]byte("body"))
}))
defer func() { testServer.Close() }()
mockBitlyLink := testServer.URL
th := Setup().InitBasic()
defer th.TearDown()
Client := th.Client
enableLinkPreviews := *th.App.Config().ServiceSettings.EnableLinkPreviews
defer func() {
th.App.UpdateConfig(func(cfg *model.Config) { *cfg.ServiceSettings.EnableLinkPreviews = enableLinkPreviews })
}()
*th.App.Config().ServiceSettings.EnableLinkPreviews = true
_, resp := th.SystemAdminClient.GetRedirectLocation("https://mattermost.com/", "")
CheckNoError(t, resp)
_, resp = th.SystemAdminClient.GetRedirectLocation("", "")
CheckBadRequestStatus(t, resp)
actual, resp := th.SystemAdminClient.GetRedirectLocation(mockBitlyLink, "")
CheckNoError(t, resp)
if actual != expected {
t.Errorf("Expected %v but got %v.", expected, actual)
}
*th.App.Config().ServiceSettings.EnableLinkPreviews = false
actual, resp = th.SystemAdminClient.GetRedirectLocation("https://mattermost.com/", "")
CheckNoError(t, resp)
assert.Equal(t, actual, "")
actual, resp = th.SystemAdminClient.GetRedirectLocation("", "")
CheckNoError(t, resp)
assert.Equal(t, actual, "")
actual, resp = th.SystemAdminClient.GetRedirectLocation(mockBitlyLink, "")
CheckNoError(t, resp)
assert.Equal(t, actual, "")
Client.Logout()
_, resp = Client.GetRedirectLocation("", "")
CheckUnauthorizedStatus(t, resp)
}
|
[
"\"CI_INBUCKET_HOST\"",
"\"CI_INBUCKET_PORT\"",
"\"CI_MINIO_HOST\"",
"\"CI_MINIO_PORT\""
] |
[] |
[
"CI_MINIO_PORT",
"CI_INBUCKET_PORT",
"CI_MINIO_HOST",
"CI_INBUCKET_HOST"
] |
[]
|
["CI_MINIO_PORT", "CI_INBUCKET_PORT", "CI_MINIO_HOST", "CI_INBUCKET_HOST"]
|
go
| 4 | 0 | |
config/init.go
|
/*
Copyright 2017 WALLIX
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"fmt"
"os"
"path/filepath"
"strconv"
"github.com/wallix/awless/aws/services"
"github.com/wallix/awless/database"
)
var (
AwlessHome = filepath.Join(os.Getenv("HOME"), ".awless")
DBPath = filepath.Join(AwlessHome, database.Filename)
Dir = filepath.Join(AwlessHome, "aws")
KeysDir = filepath.Join(AwlessHome, "keys")
AwlessFirstInstall bool
)
func init() {
os.Setenv("__AWLESS_HOME", AwlessHome)
os.Setenv("__AWLESS_CACHE", filepath.Join(AwlessHome, "cache"))
os.Setenv("__AWLESS_KEYS_DIR", KeysDir)
}
func InitAwlessEnv() error {
_, err := os.Stat(DBPath)
AwlessFirstInstall = os.IsNotExist(err)
os.Setenv("__AWLESS_FIRST_INSTALL", strconv.FormatBool(AwlessFirstInstall))
os.MkdirAll(KeysDir, 0700)
if AwlessFirstInstall {
fmt.Fprintln(os.Stderr, AWLESS_ASCII_LOGO)
fmt.Fprintln(os.Stderr, "Welcome! Resolving environment data...")
fmt.Fprintln(os.Stderr)
if err = InitConfig(resolveRequiredConfigFromEnv()); err != nil {
return err
}
err = database.Execute(func(db *database.DB) error {
return db.SetStringValue("current.version", Version)
})
if err != nil {
fmt.Fprintf(os.Stderr, "cannot store current version in db: %s\n", err)
}
}
if err = LoadConfig(); err != nil {
return err
}
return nil
}
func resolveRequiredConfigFromEnv() map[string]string {
region := awsservices.ResolveRegionFromEnv()
resolved := make(map[string]string)
if region != "" {
resolved[RegionConfigKey] = region
}
return resolved
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
app/database/redis.go
|
package database
import (
"fmt"
"os"
"github.com/go-redis/redis/v7"
)
// Redis is a db.
type Redis struct{}
// NewRedis creates a Redis.
func NewRedis() *Redis {
return &Redis{}
}
// Conn connects to redis.
func (d *Redis) Conn() (*redis.Client, error) {
addr := fmt.Sprintf("%s:%s", os.Getenv("REDIS_HOST"), os.Getenv("REDIS_PORT"))
conn := redis.NewClient(&redis.Options{
Addr: addr,
Password: os.Getenv("REDIS_PASSWORD"),
DB: 0,
})
_, err := conn.Ping().Result()
if err != nil {
return nil, err
}
return conn, nil
}
|
[
"\"REDIS_HOST\"",
"\"REDIS_PORT\"",
"\"REDIS_PASSWORD\""
] |
[] |
[
"REDIS_PASSWORD",
"REDIS_PORT",
"REDIS_HOST"
] |
[]
|
["REDIS_PASSWORD", "REDIS_PORT", "REDIS_HOST"]
|
go
| 3 | 0 | |
codegen/import_test.go
|
package codegen
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestDeterministicDecollisioning(t *testing.T) {
a := Imports{
imports: []*Import{
{Name: "types", Path: "foobar/types"},
{Name: "types", Path: "bazfoo/types"},
},
}.finalize()
b := Imports{
imports: []*Import{
{Name: "types", Path: "bazfoo/types"},
{Name: "types", Path: "foobar/types"},
},
}.finalize()
require.EqualValues(t, a, b)
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
cpt/test/integration/basic_test.py
|
import os
import unittest
import sys
from conans import tools
from conans.model.ref import ConanFileReference
from cpt.test.integration.base import BaseTest
from cpt.packager import ConanMultiPackager
from cpt.test.unit.utils import MockCIManager
class SimpleTest(BaseTest):
def test_missing_full_reference(self):
conanfile = """from conans import ConanFile
class Pkg(ConanFile):
pass
"""
self.save_conanfile(conanfile)
mp = ConanMultiPackager(username="lasote")
with self.assertRaisesRegexp(Exception, "Specify a CONAN_REFERENCE or name and version"):
mp.add_common_builds()
def test_missing_username(self):
conanfile = """from conans import ConanFile
class Pkg(ConanFile):
name = "lib"
version = "1.0"
options = {"shared": [True, False]}
default_options = "shared=False"
"""
self.save_conanfile(conanfile)
with self.assertRaisesRegexp(Exception, "Instance ConanMultiPackage with 'username' "
"parameter or use CONAN_USERNAME env variable"):
ConanMultiPackager()
@unittest.skipUnless(sys.platform.startswith("win"), "Requires Windows")
def test_msvc(self):
conanfile = """from conans import ConanFile
import os
class Pkg(ConanFile):
settings = "os", "compiler", "build_type", "arch"
def build(self):
assert("WindowsLibPath" in os.environ)
"""
self.save_conanfile(conanfile)
self.packager = ConanMultiPackager(username="lasote",
channel="mychannel",
visual_versions=[15],
archs=["x86"],
build_types=["Release"],
visual_runtimes=["MD"],
reference="zlib/1.2.2")
self.packager.add_common_builds()
self.packager.run_builds(1, 1)
def test_msvc_exclude_precommand(self):
conanfile = """from conans import ConanFile
import os
class Pkg(ConanFile):
settings = "os", "compiler", "build_type", "arch"
def build(self):
assert("WindowsLibPath" not in os.environ)
"""
self.save_conanfile(conanfile)
self.packager = ConanMultiPackager(username="lasote",
channel="mychannel",
visual_versions=[15],
archs=["x86"],
build_types=["Release"],
visual_runtimes=["MD"],
exclude_vcvars_precommand=True,
reference="zlib/1.2.2")
self.packager.add_common_builds()
self.packager.run_builds(1, 1)
def test_shared_option_auto_managed(self):
conanfile = """from conans import ConanFile
class Pkg(ConanFile):
name = "lib"
version = "1.0"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
"""
self.save_conanfile(conanfile)
self.packager = ConanMultiPackager(username="lasote")
self.packager.add_common_builds()
self.assertIn("lib:shared", self.packager.items[0].options)
# Even without name and version but reference
conanfile = """from conans import ConanFile
class Pkg(ConanFile):
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
"""
self.save_conanfile(conanfile)
self.packager = ConanMultiPackager(username="lasote", reference="lib2/1.0")
self.packager.add_common_builds()
self.assertIn("lib2:shared", self.packager.items[0].options)
self.packager = ConanMultiPackager(username="lasote", reference="lib2/1.0")
self.packager.add_common_builds(shared_option_name=False)
self.assertNotIn("lib2:shared", self.packager.items[0].options)
def test_auto_managed_subdirectory(self):
conanfile = """from conans import ConanFile
class Pkg(ConanFile):
name = "lib"
version = "1.0"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
"""
cwd = os.path.join(self.tmp_folder, "subdirectory")
tools.save(os.path.join(cwd, "conanfile.py"), conanfile)
self.packager = ConanMultiPackager(username="lasote", cwd=cwd)
self.packager.add_common_builds()
self.assertGreater(len(self.packager.items), 0)
self.assertIn("lib:shared", self.packager.items[0].options)
def test_exported_files(self):
conanfile = """from conans import ConanFile
class Pkg(ConanFile):
name = "lib"
version = "1.0"
settings = "os"
exports = "*"
exports_sources = "source*"
"""
ci_manager = MockCIManager()
self.save_conanfile(conanfile)
tools.save(os.path.join(self.tmp_folder, "other_file"), "Dummy contents")
tools.save(os.path.join(self.tmp_folder, "source.cpp"), "Dummy contents")
self.packager = ConanMultiPackager(username="lasote", reference="lib/1.0", ci_manager=ci_manager)
self.packager.add({}, {}, {}, {})
self.packager.run()
ref = ConanFileReference.loads("lib/1.0@lasote/testing")
pf = self.client_cache.package_layout(ref).export()
found_in_export = False
for exported in os.listdir(pf):
if "other_file" == exported:
found_in_export = True
break
self.assertTrue(found_in_export)
pf = self.client_cache.package_layout(ref).export_sources()
found_in_export_sources = False
for exported in os.listdir(pf):
if "source.cpp" == exported:
found_in_export_sources = True
break
self.assertTrue(found_in_export_sources)
def test_build_policy(self):
ci_manager = MockCIManager(build_policy="outdated")
conanfile = """from conans import ConanFile
import os
class Pkg(ConanFile):
name = "lib"
version = "1.2"
settings = "os", "compiler", "build_type", "arch"
"""
self.save_conanfile(conanfile)
with tools.environment_append({"CONAN_USERNAME": "lasote"}):
self.packager = ConanMultiPackager(channel="mychannel",
gcc_versions=["6"],
visual_versions=["12"],
archs=["x86", "x86_64"],
build_types=["Release"],
ci_manager=ci_manager)
self.packager.add_common_builds()
self.packager.run()
def test_custom_conanfile(self):
conanfile = """from conans import ConanFile
class Pkg(ConanFile):
name = "lib"
version = "1.2"
settings = "os", "compiler", "build_type", "arch"
"""
tools.save(os.path.join(self.tmp_folder, "foobar.py"), conanfile)
with tools.environment_append({"CONAN_CONANFILE": "foobar.py"}):
self.packager = ConanMultiPackager(username="pepe",
channel="mychannel",
out=self.output.write)
self.packager.add({}, {}, {}, {})
self.packager.run()
self.assertIn("conanfile | foobar.py", self.output)
tools.save(os.path.join(self.tmp_folder, "custom_recipe.py"), conanfile)
self.packager = ConanMultiPackager(username="pepe",
channel="mychannel",
conanfile="custom_recipe.py",
out=self.output.write)
self.packager.add({}, {}, {}, {})
self.packager.run()
self.assertIn("conanfile | custom_recipe.py", self.output)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
trueSite/trueSite/wsgi.py
|
"""
WSGI config for trueSite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'trueSite.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
ui/compose.go
|
package ui
import (
"bufio"
"io/ioutil"
"os"
"os/exec"
"strings"
"github.com/emersion/go-message"
_ "github.com/emersion/go-message/charset"
"github.com/gdamore/tcell/v2"
"github.com/gdamore/tcell/v2/views"
"github.com/stregouet/nuntius/config"
"github.com/stregouet/nuntius/lib"
sm "github.com/stregouet/nuntius/statesmachines"
"github.com/stregouet/nuntius/widgets"
"github.com/stregouet/nuntius/workers"
)
type ComposeView struct {
machine *lib.Machine
bindings config.Mapping
term *widgets.Terminal
screen tcell.Screen
*widgets.BaseWidget
}
func NewComposeView(acc *config.Account, bindings config.Mapping) *ComposeView {
email, err := ioutil.TempFile("", "nuntius-*.eml")
machine := sm.NewComposeMachine(email)
if err != nil {
App.logger.Errorf("cannot create tmp file %v", err)
machine.Send(&lib.Event{sm.TR_COMPOSE_SET_ERR, nil})
}
b := &widgets.BaseWidget{}
c := &ComposeView{
machine: machine,
bindings: bindings,
BaseWidget: b,
}
c.setTerminal()
machine.OnTransition(func(s lib.StateType, ctx interface{}, ev *lib.Event) {
switch ev.Transition {
case sm.TR_COMPOSE_SEND:
state := ctx.(*sm.ComposeMachineCtx)
App.PostImapMessage(
&workers.SendMail{
Body: strings.NewReader(state.Body),
},
acc.Name,
func(response workers.Message) error {
if r, ok := response.(*workers.Error); ok {
App.logger.Errorf("cannot send mail %v", r.Error)
c.Messagef("error sending mail (%v)", r.Error)
}
return nil
},
)
c.AskRedraw()
case sm.TR_COMPOSE_SET_ERR:
c.AskRedraw()
case sm.TR_COMPOSE_REVIEW:
if c.term != nil {
c.term.Destroy()
c.term = nil
}
c.AskRedraw()
case sm.TR_COMPOSE_WRITE:
c.setTerminal()
c.setTermView(c.GetViewPort(), c.screen)
c.AskRedraw()
}
})
c.OnSetViewPort(func(view *views.ViewPort, screen tcell.Screen) {
c.screen = screen
c.setTermView(view, screen)
})
return c
}
// Tab interface
func (c *ComposeView) TabTitle() string {
return "compose"
}
func (c *ComposeView) setTermView(view *views.ViewPort, screen tcell.Screen) {
if c.term != nil {
c.term.SetViewPort(view, screen)
}
}
func (c *ComposeView) state() *sm.ComposeMachineCtx {
return c.machine.Context.(*sm.ComposeMachineCtx)
}
func (c *ComposeView) termClosed(err error) {
App.transitions <- &lib.Event{sm.TR_COMPOSE_REVIEW, nil}
}
func (c *ComposeView) setTerminal() {
state := c.state()
editorName := os.Getenv("EDITOR")
editor := exec.Command("/bin/sh", "-c", editorName+" "+state.MailFile.Name())
c.term = widgets.NewTerminal(editor)
c.term.OnClose = c.termClosed
c.term.AskingRedraw(func() {
c.AskRedraw()
})
}
func (c *ComposeView) err(msg string, format ...interface{}) {
App.logger.Errorf(msg, format...)
c.machine.Send(&lib.Event{sm.TR_COMPOSE_SET_ERR, nil})
}
func (c *ComposeView) drawMail(content string) {
style := tcell.StyleDefault
bold := style.Bold(true)
r := strings.NewReader(content)
msg, err := message.Read(r)
if err != nil {
c.Messagef("malformed mail")
c.err("cannot parse mail %v", err)
return
}
hf := msg.Header.Fields()
line := 0
for hf.Next() {
offset := c.Print(0, line, bold, hf.Key()+": ")
val, err := hf.Text()
if err != nil {
c.err("cannot parse header field %v", err)
return
}
c.Print(offset, line, style, val)
line++
}
s := bufio.NewScanner(msg.Body)
line++
for s.Scan() {
c.Print(0, line, style, s.Text())
line++
}
}
func (c *ComposeView) Draw() {
style := tcell.StyleDefault
if c.machine.Current == sm.STATE_COMPOSE_ERR {
c.Clear()
c.Print(0, 0, style, "error occured...")
} else if c.machine.Current == sm.STATE_COMPOSE_WRITE_MAIL {
c.term.Draw()
} else {
c.Clear()
state := c.state()
c.drawMail(state.Body)
}
}
func (c *ComposeView) SetTermView(view *views.ViewPort, screen tcell.Screen) {
c.term.SetViewPort(view, screen)
}
func (c *ComposeView) IsActiveTerm() bool {
return c.machine.Current == sm.STATE_COMPOSE_WRITE_MAIL
}
func (c *ComposeView) HandleEvent(ks []*lib.KeyStroke) bool {
if c.machine.Current == sm.STATE_COMPOSE_WRITE_MAIL {
return c.term.HandleEvent(ks)
}
if cmd := c.bindings.FindCommand(ks); cmd != "" {
mev, err := c.machine.BuildEvent(cmd)
if err != nil {
App.logger.Errorf("error building machine event from `%s` (%v)", cmd, err)
return false
}
if c.machine.Send(mev) {
return true
}
}
return false
}
func (c *ComposeView) HandleTransitions(ev *lib.Event) bool {
return c.machine.Send(ev)
}
|
[
"\"EDITOR\""
] |
[] |
[
"EDITOR"
] |
[]
|
["EDITOR"]
|
go
| 1 | 0 | |
DQMOffline/CalibCalo/test/dohistos.py
|
#! /usr/bin/env python3
from __future__ import print_function
import os
import sys
import errno
#print error/help message and exit
def help_message():
print("Usage:\n\
dohistos [folder_name] [options] -v versions_to_compare -f files_to_compare\n\
Versions and files must be whitespace separated.\n\
If no folder is specified the pwd will be used.\n\
folder_name, if specified, must be the first argument.\n\
Options:\n\
--outfile out.root specify the output root file name\n\
-h prints this message\n\
--cvs uses cvs access to retrieve edm2me_cfg.py\n\
-a uses anonymous cvs access (only for debug purposes)\n\
--dbs use dbs access to retrieve file paths, edit directly the source code to modify the query, while in --dbs mode don't use -f option\n\
--condition [IDEAL,STARTUP] permits to specify the conditions when in dbs mode; if not specified IDEAL is default; must be coherent with --query option if specified\n\
-n [1000] specify the number of events\n\
--like additional like statement in the dbs query to narrow the number of dataset returned\n\
Example:\n\
./dohistos.py Folder01 -v CMSSW_X_Y_Z CMSSW_J_K_W -f file:/file1.root file:/file2.root")
sys.exit()
#custom query is at the moment not useful:
#--query with --dbs option specify the query passed to dbs cli (with quotes)\n\
#run command in the command line with specified environment
def runcmd(envir,program,*args):
pid=os.fork()
if not pid:
os.execvpe(program,(program,)+args,envir)
return os.wait()[0]
#print the help message
if "-h" in sys.argv or "-help" in sys.argv or "--help" in sys.argv:
help_message()
#make the working directory (if specified) and move there
if len(sys.argv)>1:
if not sys.argv[1][0]=="-":
name=sys.argv[1]
try:
os.mkdir(name)
except OSError as inst:
if inst.errno==errno.EEXIST:
print("Warning: the specified working folder already exist")
os.chdir(name)
else: help_message()
#read and parse the input
state="n"
like_query=""
num_evts="1000"
cvs=False
dbs=False
use_manual_num=False
anon=False#only for debug purposes
conditions='IDEAL_31X'
conditions_file='IDEAL'
#query_list=[] #not useful
ver=[]
fil=[]
out_root="histo.root"
#used state letters (please keep updated): cflnqrv
for arg in sys.argv:
if arg=="-v" or arg=="-V":
state="v"
elif arg=="-f" or arg=="-F":
state="f"
elif arg=="--outfile":
state="r"
elif arg=="--conditions":
state="c"
elif arg=="-n":
state="n"
#elif arg=="--query": #not useful
# state="q"
elif arg=="--cvs":
cvs=True
elif arg=="--dbs":
dbs=True
elif arg=="-a":#only for debug purposes
anon=True
elif arg=="--like":
state="l"
############################################## state handling
elif state=="v":
ver.append(arg)
elif state=="f":
fil.append(arg)
elif state=="r":
out_root=arg
elif state=="l":
like_query=arg
elif state=="c":
conditions=arg
usn=0
for ncondt,condt in enumerate(arg):
if condt=='_':
usn=ncondt
break
conditions_file=conditions[:usn]
elif state=="n":
num_evts=arg
use_manual_num=True
#elif state=="q": #not useful
# query_list.append(arg)
#check consistency of -f and --dbs
if len(fil)>0 and dbs:
print("when using --dbs option, -f option is not needed")
help_message()
###dbs query to retrieve the data with option --dbs
###|||||||||||||||||||||||||||||||||||||||||||||||||
dbsstr='python $DBSCMD_HOME/dbsCommandLine.py -c search --query="find dataset where phygrp=RelVal and primds=RelValMinBias and dataset.tier=GEN-SIM-DIGI-RAW-HLTDEBUG and file.name like *'+conditions+'* and file.name like *'+like_query+'* and file.release='
#dbsstr='dbs -c search --query="find dataset where phygrp=RelVal and primds=RelValMinBias and dataset.tier=GEN-SIM-DIGI-RAW-HLTDEBUG and file.name like *'+conditions+'* and file.release='
###|||||||||||||||||||||||||||||||||||||||||||||||||
dbsdataset=[]
dbsfile=[]
nevt=[]
nevent=0
#create folders and generate files
for nv,v in enumerate(ver):
os.system("scramv1 project CMSSW "+v)
os.chdir(v)
env=os.popen("scramv1 runtime -sh","r")
environment=os.environ
for l in env.readlines():
try:
variable,value=l[7:len(l)-3].strip().split("=",1)
environment[variable]=value[1:]
except ValueError:
print("Warning: environment variable problem")
env.close()
if cvs:
if anon:#only for debug purposes, works only in cmsfarm
os.system("eval `scramv1 runtime -sh`; source /cms-sw/slc4_ia32_gcc345/cms/cms-cvs-utils/1.0/bin/cmscvsroot.sh CMSSW; cvs login; addpkg DQMOffline/CalibCalo")
else:
runcmd(environment,"addpkg","DQMOffline/CalibCalo")
#dbs code
if dbs:
dbsdataset=[]
dbsfile=[]
nevt=[]
#searching the required dataset
inifil=False
ris=os.popen(dbsstr+v+'"')
for lnris in ris.readlines():
print(lnris)
if inifil:
dbsdataset.append(lnris)
else:
#if lnris[:3]=="___":
if lnris[:3]=="---":
inifil=True
ris.close()
dbsdataset=dbsdataset[2:]
dbsdataset[0]=dbsdataset[0][0:-1]
for lnris2 in dbsdataset:
print(lnris2)
if len(dbsdataset)>1 or len(dbsdataset)==0:
#print dbsdataset
print("dbs search returned ",len(dbsdataset)," records, please modify the query so only one dataset is returned")
sys.exit()
else:
#extracting the file names relative to the selected dataset
inifil=False
ris=os.popen('python $DBSCMD_HOME/dbsCommandLine.py -c search --query="find file where dataset like *'+dbsdataset[0]+'*"')
for lnris in ris.readlines():
if inifil:
dbsfile.append(lnris)
else:
if lnris[:3]=="---":
inifil=True
ris.close()
dbsfile=dbsfile[2:]
for dbsfn,dbsf in enumerate(dbsfile):
dbsfile[dbsfn]=dbsfile[dbsfn][:-1]
#extracting the total number of events #not very useful at the moment, it is better to use manual extraction
#if not use_manual_num:
# for dbsf in dbsfile:
# inifil=False
# ris=os.popen('python $DBSCMD_HOME/dbsCommandLine.py -c search --query="find file.numevents where file like *'+dbsf+'*"')
# for lnris in ris:
# if inifil:
# nevt.append(lnris)
# else:
# if lnris[:3]=="___":
# inifil=True
# nevt.pop()
# ris.close()
# for nevtn,nevte in nevt:
# nevt[nevtn]=int(nevt[nevtn][:-2])
# nevent
# for nevte in nevt:
#
#for f in fil: remember indentation if uncommenting this
if not dbs:
runcmd(environment,"cmsDriver.py","testALCA","-s","ALCA:Configuration/StandardSequences/AlCaRecoStream_EcalCalPhiSym_cff:EcalCalPhiSym+DQM","-n",num_evts,"--filein",fil[nv],"--fileout","file:dqm.root","--eventcontent","FEVT","--conditions","FrontierConditions_GlobalTag,"+conditions+"::All")#,"--no_exec")
else:
sfl=""
for fl in dbsfile:
sfl=sfl+','+fl
sfl=sfl[1:]
runcmd(environment,"cmsDriver.py","testALCA","-s","ALCA:Configuration/StandardSequences/AlCaRecoStream_EcalCalPhiSym_cff:EcalCalPhiSym+DQM","-n",num_evts,"--filein",sfl,"--fileout","file:dqm.root","--eventcontent","FEVT","--conditions","FrontierConditions_GlobalTag,"+conditions+"::All","--no_exec")
alcareco=open("testALCA_ALCA_"+conditions_file+".py",'r')
alcarecoln=alcareco.readlines()
alcareco.close()
arnum=0
for arln,arl in enumerate(alcarecoln):
if sfl in arl:
arnum=arln
alcarecoln[arnum]=alcarecoln[arnum].replace(",","','")
alcareco=open("testALCA_ALCA_"+conditions_file+".py",'w')
for arln in alcarecoln:
alcareco.write(arln)
alcareco.close()
runcmd(environment,"cmsRun","testALCA_ALCA_"+conditions_file+".py")
os.system("mv ALCARECOEcalCalPhiSym.root dqm.root")
if cvs:
runcmd(environment,"cmsRun","src/DQMOffline/CalibCalo/test/edm2me_cfg.py")
else:
runcmd(environment,"cmsRun",environment["CMSSW_RELEASE_BASE"]+"/src/DQMOffline/CalibCalo/test/edm2me_cfg.py")
os.system("mv DQM_V0001_R000000001__A__B__C.root "+out_root)
os.system("rm dqm.root")
os.chdir("../")
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
plugin/contractpb/contractpb.go
|
package contractpb
import (
"errors"
"fmt"
"os"
"sync"
"time"
"github.com/gogo/protobuf/proto"
"github.com/loomnetwork/go-loom"
cctypes "github.com/loomnetwork/go-loom/builtin/types/chainconfig"
"github.com/loomnetwork/go-loom/plugin"
ptypes "github.com/loomnetwork/go-loom/plugin/types"
"github.com/loomnetwork/go-loom/types"
)
var (
ErrNotFound = errors.New("not found")
)
// StaticContext is the high-level context provided to Go contract methods that don't mutate state.
type StaticContext interface {
plugin.StaticAPI
Get(key []byte, pb proto.Message) error
Range(prefix []byte) plugin.RangeData
Has(key []byte) bool
Block() loom.BlockHeader
Now() time.Time
Message() plugin.Message
ContractAddress() loom.Address
Logger() *loom.Logger
GetEvmTxReceipt([]byte) (ptypes.EvmTxReceipt, error)
HasPermissionFor(addr loom.Address, token []byte, roles []string) (bool, []string)
FeatureEnabled(name string, defaultVal bool) bool
Config() *cctypes.Config
Validators() []*types.Validator
EnabledFeatures() []string
// ContractRecord retrieves the contract meta data stored in the Registry.
// NOTE: This method requires Registry v2.
ContractRecord(contractAddr loom.Address) (*plugin.ContractRecord, error)
}
// Context is the high-level context provided to Go contract methods that mutate state.
type Context interface {
plugin.VolatileAPI
StaticContext
Set(key []byte, pb proto.Message) error
Delete(key []byte)
HasPermission(token []byte, roles []string) (bool, []string)
GrantPermissionTo(addr loom.Address, token []byte, role string)
RevokePermissionFrom(addr loom.Address, token []byte, role string)
GrantPermission(token []byte, roles []string)
}
type Contract interface {
Meta() (plugin.Meta, error)
}
// Implements the StaticContext interface for Go contract methods.
type wrappedPluginStaticContext struct {
plugin.StaticContext
logger *loom.Logger
}
var _ StaticContext = &wrappedPluginStaticContext{}
func (c *wrappedPluginStaticContext) Logger() *loom.Logger {
return c.logger
}
func (c *wrappedPluginStaticContext) Get(key []byte, pb proto.Message) error {
data := c.StaticContext.Get(key)
if len(data) == 0 {
return ErrNotFound
}
return proto.Unmarshal(data, pb)
}
// HasPermissionFor checks whether the given `addr` has any of the permission given in `roles` on `token`
func (c *wrappedPluginStaticContext) HasPermissionFor(addr loom.Address, token []byte, roles []string) (bool, []string) {
found := false
foundRoles := []string{}
for _, role := range roles {
v := c.StaticContext.Get(rolePermKey(addr, token, role))
if v != nil && string(v) == "true" {
found = true
foundRoles = append(foundRoles, role)
}
}
return found, foundRoles
}
// FeatureEnabled checks whether the feature is enabled on chain
func (c *wrappedPluginStaticContext) FeatureEnabled(name string, defaultVal bool) bool {
return c.StaticContext.FeatureEnabled(name, defaultVal)
}
// Config returns the current on-chain config
func (c *wrappedPluginStaticContext) Config() *cctypes.Config {
return c.StaticContext.Config()
}
// EnabledFeatures returns a list of the currently activated feature flags.
func (c *wrappedPluginStaticContext) EnabledFeatures() []string {
return c.StaticContext.EnabledFeatures()
}
// Validators gives a list of validators
func (c *wrappedPluginStaticContext) Validators() []*types.Validator {
return c.StaticContext.Validators()
}
// Implements the Context interface for Go contract methods.
type wrappedPluginContext struct {
plugin.Context
wrappedPluginStaticContext
}
var _ Context = &wrappedPluginContext{}
func (c *wrappedPluginContext) Get(key []byte, pb proto.Message) error {
return c.wrappedPluginStaticContext.Get(key, pb)
}
func (c *wrappedPluginContext) Set(key []byte, pb proto.Message) error {
enc, err := proto.Marshal(pb)
if err != nil {
return err
}
c.Context.Set(key, enc)
return nil
}
// HasPermission checks whether the sender of the tx has any of the permission given in `roles` on `token`
func (c *wrappedPluginContext) HasPermission(token []byte, roles []string) (bool, []string) {
addr := c.Message().Sender
return c.HasPermissionFor(addr, token, roles)
}
// GrantPermissionTo sets a given `role` permission on `token` for the given `addr`
func (c *wrappedPluginContext) GrantPermissionTo(addr loom.Address, token []byte, role string) {
c.Context.Set(rolePermKey(addr, token, role), []byte("true"))
}
// RevokePermissionFrom removes a permission previously granted by GrantPermissionTo
func (c *wrappedPluginContext) RevokePermissionFrom(addr loom.Address, token []byte, role string) {
c.Context.Delete(rolePermKey(addr, token, role))
}
// Check if feature is enabled on chain
func (c *wrappedPluginContext) FeatureEnabled(name string, defaultVal bool) bool {
return c.Context.FeatureEnabled(name, defaultVal)
}
// Config returns the current on-chain config
func (c *wrappedPluginContext) Config() *cctypes.Config {
return c.Context.Config()
}
// EnabledFeatures returns a list of the currently activated feature flags.
func (c *wrappedPluginContext) EnabledFeatures() []string {
return c.Context.EnabledFeatures()
}
// Validators gives a list of validators
func (c *wrappedPluginContext) Validators() []*types.Validator {
return c.Context.Validators()
}
func rolePermKey(addr loom.Address, token []byte, role string) []byte {
// TODO: This generates an overly long key, the key generated here is prefixed by the contract
// address, but the wrappedPluginContext only has access to the state prefixed by the
// contract address, so all the permission keys are effectively prefixed by the contract
// address twice!
return []byte(fmt.Sprintf("%stoken:%s:role:%s", loom.PermPrefix(addr), token, []byte(role)))
}
// GrantPermission sets a given `role` permission on `token` for the sender of the tx
func (c *wrappedPluginContext) GrantPermission(token []byte, roles []string) {
for _, r := range roles {
c.GrantPermissionTo(c.Message().Sender, token, r)
}
}
func MakePluginContract(c Contract) plugin.Contract {
r, err := NewRequestDispatcher(c)
if err != nil {
panic(err)
}
setupLogger()
return r
}
func Call(ctx Context, addr loom.Address, inpb proto.Message, outpb proto.Message) error {
input, err := makeEnvelope(inpb)
if err != nil {
return err
}
output, err := ctx.Call(addr, input)
if err != nil {
return err
}
var resp plugin.Response
err = proto.Unmarshal(output, &resp)
if err != nil {
return err
}
if outpb != nil {
err = proto.Unmarshal(resp.Body, outpb)
if err != nil {
return err
}
}
return nil
}
func CallMethod(ctx Context, addr loom.Address, method string, inpb proto.Message, outpb proto.Message) error {
args, err := proto.Marshal(inpb)
if err != nil {
return err
}
query := &ptypes.ContractMethodCall{
Method: method,
Args: args,
}
return Call(ctx, addr, query, outpb)
}
var logger *loom.Logger
var onceSetup sync.Once
func setupLogger() {
onceSetup.Do(func() {
level := "info"
envLevel := os.Getenv("CONTRACT_LOG_LEVEL")
if envLevel != "" {
level = envLevel
}
dest := "file://-"
envDest := os.Getenv("CONTRACT_LOG_DESTINATION")
if envDest != "" {
dest = envDest
}
logger = loom.NewLoomLogger(level, dest)
})
}
func StaticCall(ctx StaticContext, addr loom.Address, inpb proto.Message, outpb proto.Message) error {
input, err := makeEnvelope(inpb)
if err != nil {
return err
}
output, err := ctx.StaticCall(addr, input)
if err != nil {
return err
}
var resp plugin.Response
err = proto.Unmarshal(output, &resp)
if err != nil {
return err
}
if outpb != nil {
err = proto.Unmarshal(resp.Body, outpb)
if err != nil {
return err
}
}
return nil
}
func StaticCallMethod(ctx StaticContext, addr loom.Address, method string, inpb proto.Message, outpb proto.Message) error {
args, err := proto.Marshal(inpb)
if err != nil {
return err
}
query := &ptypes.ContractMethodCall{
Method: method,
Args: args,
}
return StaticCall(ctx, addr, query, outpb)
}
func CallEVM(ctx Context, addr loom.Address, input []byte, output *[]byte) error {
resp, err := ctx.CallEVM(addr, input, loom.NewBigUIntFromInt(0))
*output = resp
return err
}
func StaticCallEVM(ctx StaticContext, addr loom.Address, input []byte, output *[]byte) error {
resp, err := ctx.StaticCallEVM(addr, input)
*output = resp
return err
}
func WrapPluginContext(ctx plugin.Context) Context {
return &wrappedPluginContext{ctx, wrappedPluginStaticContext{ctx, logger}}
}
func WrapPluginStaticContext(ctx plugin.StaticContext) StaticContext {
return &wrappedPluginStaticContext{ctx, logger}
}
func makeEnvelope(inpb proto.Message) ([]byte, error) {
body, err := proto.Marshal(inpb)
if err != nil {
return nil, err
}
req := &plugin.Request{
ContentType: plugin.EncodingType_PROTOBUF3,
Accept: plugin.EncodingType_PROTOBUF3,
Body: body,
}
return proto.Marshal(req)
}
|
[
"\"CONTRACT_LOG_LEVEL\"",
"\"CONTRACT_LOG_DESTINATION\""
] |
[] |
[
"CONTRACT_LOG_DESTINATION",
"CONTRACT_LOG_LEVEL"
] |
[]
|
["CONTRACT_LOG_DESTINATION", "CONTRACT_LOG_LEVEL"]
|
go
| 2 | 0 | |
app/middleware/jwt.go
|
package middleware
import (
"encoding/json"
"errors"
"io"
"net/http"
"os"
"github.com/auth0/go-jwt-middleware"
"github.com/form3tech-oss/jwt-go"
"github.com/gin-gonic/gin"
)
// EnsureValidToken is a gin.HandlerFunc middleware that will check the validity of our JWT.
func EnsureValidToken(c *gin.Context) {
var jwtMiddleware = jwtmiddleware.New(jwtmiddleware.Options{
ValidationKeyGetter: func(token *jwt.Token) (interface{}, error) {
// Verify 'aud' claim
aud := os.Getenv("AUTH0_AUDIENCE")
checkAud := token.Claims.(jwt.MapClaims).VerifyAudience(aud, false)
if !checkAud {
return token, errors.New("invalid audience")
}
// Verify 'iss' claim
iss := "https://" + os.Getenv("AUTH0_DOMAIN") + "/"
checkIss := token.Claims.(jwt.MapClaims).VerifyIssuer(iss, false)
if !checkIss {
return token, errors.New("invalid issuer")
}
cert, err := getPemCert(token)
if err != nil {
return token, err
}
c.Next()
return jwt.ParseRSAPublicKeyFromPEM([]byte(cert))
},
SigningMethod: jwt.SigningMethodRS256,
})
if err := jwtMiddleware.CheckJWT(c.Writer, c.Request); err != nil {
c.AbortWithStatus(http.StatusUnauthorized)
}
c.Next()
}
type Jwks struct {
Keys []JSONWebKeys `json:"keys"`
}
type JSONWebKeys struct {
Kty string `json:"kty"`
Kid string `json:"kid"`
Use string `json:"use"`
N string `json:"n"`
E string `json:"e"`
X5c []string `json:"x5c"`
}
func getPemCert(token *jwt.Token) (string, error) {
resp, err := http.Get("https://" + os.Getenv("AUTH0_DOMAIN") + "/.well-known/jwks.json")
if err != nil {
return "", err
}
defer func(Body io.ReadCloser) {
err := Body.Close()
if err != nil {
}
}(resp.Body)
var jwks Jwks
if err = json.NewDecoder(resp.Body).Decode(&jwks); err != nil {
return "", err
}
var cert string
for _, key := range jwks.Keys {
if token.Header["kid"] == key.Kid {
cert = "-----BEGIN CERTIFICATE-----\n" + key.X5c[0] + "\n-----END CERTIFICATE-----"
break
}
}
if cert == "" {
return cert, errors.New("unable to find appropriate key")
}
return cert, nil
}
|
[
"\"AUTH0_AUDIENCE\"",
"\"AUTH0_DOMAIN\"",
"\"AUTH0_DOMAIN\""
] |
[] |
[
"AUTH0_DOMAIN",
"AUTH0_AUDIENCE"
] |
[]
|
["AUTH0_DOMAIN", "AUTH0_AUDIENCE"]
|
go
| 2 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "second_project.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "awardpro.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/frontend/frontend.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Web service for frontend
"""
import datetime
import json
import logging
import os
import socket
import sys
from decimal import Decimal
import requests
from requests.exceptions import HTTPError, RequestException
import jwt
from flask import Flask, abort, jsonify, make_response, redirect, \
render_template, request, url_for
from opentelemetry import trace
from opentelemetry.sdk.trace.export import BatchExportSpanProcessor
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.propagators import set_global_textmap
from opentelemetry.exporter.cloud_trace import CloudTraceSpanExporter
from opentelemetry.tools.cloud_trace_propagator import CloudTraceFormatPropagator
from opentelemetry.instrumentation.flask import FlaskInstrumentor
from opentelemetry.instrumentation.requests import RequestsInstrumentor
from opentelemetry.instrumentation.jinja2 import Jinja2Instrumentor
# pylint: disable-msg=too-many-locals
def create_app():
"""Flask application factory to create instances
of the Frontend Flask App
"""
app = Flask(__name__)
# Disabling unused-variable for lines with route decorated functions
# as pylint thinks they are unused
# pylint: disable=unused-variable
@app.route('/version', methods=['GET'])
def version():
"""
Service version endpoint
"""
return os.environ.get('VERSION'), 200
@app.route('/ready', methods=['GET'])
def readiness():
"""
Readiness probe
"""
return 'ok', 200
@app.route('/whereami', methods=['GET'])
def whereami():
"""
Returns the cluster name + zone name where this Pod is running.
"""
return "Cluster: " + cluster_name + ", Pod: " + pod_name + ", Zone: " + pod_zone, 200
@app.route("/")
def root():
"""
Renders home page or login page, depending on authentication status.
"""
token = request.cookies.get(app.config['TOKEN_NAME'])
if not verify_token(token):
return login_page()
return home()
@app.route("/home")
def home():
"""
Renders home page. Redirects to /login if token is not valid
"""
token = request.cookies.get(app.config['TOKEN_NAME'])
if not verify_token(token):
# user isn't authenticated
app.logger.debug('User isn\'t authenticated. Redirecting to login page.')
return redirect(url_for('login_page',
_external=True,
_scheme=app.config['SCHEME']))
token_data = jwt.decode(token, verify=False)
display_name = token_data['name']
username = token_data['user']
account_id = token_data['acct']
hed = {'Authorization': 'Bearer ' + token}
# get balance
balance = None
try:
url = '{}/{}'.format(app.config["BALANCES_URI"], account_id)
app.logger.debug('Getting account balance.')
response = requests.get(url=url, headers=hed, timeout=app.config['BACKEND_TIMEOUT'])
if response:
balance = response.json()
except (requests.exceptions.RequestException, ValueError) as err:
app.logger.error('Error getting account balance: %s', str(err))
# get history
transaction_list = None
try:
url = '{}/{}'.format(app.config["HISTORY_URI"], account_id)
app.logger.debug('Getting transaction history.')
response = requests.get(url=url, headers=hed, timeout=app.config['BACKEND_TIMEOUT'])
if response:
transaction_list = response.json()
except (requests.exceptions.RequestException, ValueError) as err:
app.logger.error('Error getting transaction history: %s', str(err))
# get contacts
contacts = []
try:
url = '{}/{}'.format(app.config["CONTACTS_URI"], username)
app.logger.debug('Getting contacts.')
response = requests.get(url=url, headers=hed, timeout=app.config['BACKEND_TIMEOUT'])
if response:
contacts = response.json()
except (requests.exceptions.RequestException, ValueError) as err:
app.logger.error('Error getting contacts: %s', str(err))
_populate_contact_labels(account_id, transaction_list, contacts)
return render_template('index.html',
cluster_name=cluster_name,
pod_name=pod_name,
pod_zone=pod_zone,
cymbal_logo=os.getenv('CYMBAL_LOGO', 'false'),
history=transaction_list,
balance=balance,
name=display_name,
account_id=account_id,
contacts=contacts,
message=request.args.get('msg', None),
bank_name=os.getenv('BANK_NAME', 'Toptal Bank v3'))
def _populate_contact_labels(account_id, transactions, contacts):
"""
Populate contact labels for the passed transactions.
Side effect:
Take each transaction and set the 'accountLabel' field with the label of
the contact each transaction was associated with. If there was no
associated contact, set 'accountLabel' to None.
If any parameter is None, nothing happens.
Params: account_id - the account id for the user owning the transaction list
transactions - a list of transactions as key/value dicts
[{transaction1}, {transaction2}, ...]
contacts - a list of contacts as key/value dicts
[{contact1}, {contact2}, ...]
"""
app.logger.debug('Populating contact labels.')
if account_id is None or transactions is None or contacts is None:
return
# Map contact accounts to their labels. If no label found, default to None.
contact_map = {c['account_num']: c.get('label') for c in contacts}
# Populate the 'accountLabel' field. If no match found, default to None.
for trans in transactions:
if trans['toAccountNum'] == account_id:
trans['accountLabel'] = contact_map.get(trans['fromAccountNum'])
elif trans['fromAccountNum'] == account_id:
trans['accountLabel'] = contact_map.get(trans['toAccountNum'])
@app.route('/payment', methods=['POST'])
def payment():
"""
Submits payment request to ledgerwriter service
Fails if:
- token is not valid
- basic validation checks fail
- response code from ledgerwriter is not 201
"""
token = request.cookies.get(app.config['TOKEN_NAME'])
if not verify_token(token):
# user isn't authenticated
app.logger.error('Error submitting payment: user is not authenticated.')
return abort(401)
try:
account_id = jwt.decode(token, verify=False)['acct']
recipient = request.form['account_num']
if recipient == 'add':
recipient = request.form['contact_account_num']
label = request.form.get('contact_label', None)
if label:
# new contact. Add to contacts list
_add_contact(label,
recipient,
app.config['LOCAL_ROUTING'],
False)
transaction_data = {"fromAccountNum": account_id,
"fromRoutingNum": app.config['LOCAL_ROUTING'],
"toAccountNum": recipient,
"toRoutingNum": app.config['LOCAL_ROUTING'],
"amount": int(Decimal(request.form['amount']) * 100),
"uuid": request.form['uuid']}
_submit_transaction(transaction_data)
app.logger.info('Payment initiated successfully.')
return redirect(url_for('home',
msg='Payment successful',
_external=True,
_scheme=app.config['SCHEME']))
except requests.exceptions.RequestException as err:
app.logger.error('Error submitting payment: %s', str(err))
except UserWarning as warn:
app.logger.error('Error submitting payment: %s', str(warn))
msg = 'Payment failed: {}'.format(str(warn))
return redirect(url_for('home',
msg=msg,
_external=True,
_scheme=app.config['SCHEME']))
return redirect(url_for('home',
msg='Payment failed',
_external=True,
_scheme=app.config['SCHEME']))
@app.route('/deposit', methods=['POST'])
def deposit():
"""
Submits deposit request to ledgerwriter service
Fails if:
- token is not valid
- routing number == local routing number
- response code from ledgerwriter is not 201
"""
token = request.cookies.get(app.config['TOKEN_NAME'])
if not verify_token(token):
# user isn't authenticated
app.logger.error('Error submitting deposit: user is not authenticated.')
return abort(401)
try:
# get account id from token
account_id = jwt.decode(token, verify=False)['acct']
if request.form['account'] == 'add':
external_account_num = request.form['external_account_num']
external_routing_num = request.form['external_routing_num']
if external_routing_num == app.config['LOCAL_ROUTING']:
raise UserWarning("invalid routing number")
external_label = request.form.get('external_label', None)
if external_label:
# new contact. Add to contacts list
_add_contact(external_label,
external_account_num,
external_routing_num,
True)
else:
account_details = json.loads(request.form['account'])
external_account_num = account_details['account_num']
external_routing_num = account_details['routing_num']
transaction_data = {"fromAccountNum": external_account_num,
"fromRoutingNum": external_routing_num,
"toAccountNum": account_id,
"toRoutingNum": app.config['LOCAL_ROUTING'],
"amount": int(Decimal(request.form['amount']) * 100),
"uuid": request.form['uuid']}
_submit_transaction(transaction_data)
app.logger.info('Deposit submitted successfully.')
return redirect(url_for('home',
msg='Deposit successful',
_external=True,
_scheme=app.config['SCHEME']))
except requests.exceptions.RequestException as err:
app.logger.error('Error submitting deposit: %s', str(err))
except UserWarning as warn:
app.logger.error('Error submitting deposit: %s', str(warn))
msg = 'Deposit failed: {}'.format(str(warn))
return redirect(url_for('home',
msg=msg,
_external=True,
_scheme=app.config['SCHEME']))
return redirect(url_for('home',
msg='Deposit failed',
_external=True,
_scheme=app.config['SCHEME']))
def _submit_transaction(transaction_data):
app.logger.debug('Submitting transaction.')
token = request.cookies.get(app.config['TOKEN_NAME'])
hed = {'Authorization': 'Bearer ' + token,
'content-type': 'application/json'}
resp = requests.post(url=app.config["TRANSACTIONS_URI"],
data=jsonify(transaction_data).data,
headers=hed,
timeout=app.config['BACKEND_TIMEOUT'])
try:
resp.raise_for_status() # Raise on HTTP Status code 4XX or 5XX
except requests.exceptions.HTTPError as http_request_err:
raise UserWarning(resp.text) from http_request_err
def _add_contact(label, acct_num, routing_num, is_external_acct=False):
"""
Submits a new contact to the contact service.
Raise: UserWarning if the response status is 4xx or 5xx.
"""
app.logger.debug('Adding new contact.')
token = request.cookies.get(app.config['TOKEN_NAME'])
hed = {'Authorization': 'Bearer ' + token,
'content-type': 'application/json'}
contact_data = {
'label': label,
'account_num': acct_num,
'routing_num': routing_num,
'is_external': is_external_acct
}
token_data = jwt.decode(token, verify=False)
url = '{}/{}'.format(app.config["CONTACTS_URI"], token_data['user'])
resp = requests.post(url=url,
data=jsonify(contact_data).data,
headers=hed,
timeout=app.config['BACKEND_TIMEOUT'])
try:
resp.raise_for_status() # Raise on HTTP Status code 4XX or 5XX
except requests.exceptions.HTTPError as http_request_err:
raise UserWarning(resp.text) from http_request_err
@app.route("/login", methods=['GET'])
def login_page():
"""
Renders login page. Redirects to /home if user already has a valid token
"""
token = request.cookies.get(app.config['TOKEN_NAME'])
if verify_token(token):
# already authenticated
app.logger.debug('User already authenticated. Redirecting to /home')
return redirect(url_for('home',
_external=True,
_scheme=app.config['SCHEME']))
return render_template('login.html',
cymbal_logo=os.getenv('CYMBAL_LOGO', 'false'),
cluster_name=cluster_name,
pod_name=pod_name,
pod_zone=pod_zone,
message=request.args.get('msg', None),
default_user=os.getenv('DEFAULT_USERNAME', ''),
default_password=os.getenv('DEFAULT_PASSWORD', ''),
bank_name=os.getenv('BANK_NAME', 'Toptal Bank v3'))
@app.route('/login', methods=['POST'])
def login():
"""
Submits login request to userservice and saves resulting token
Fails if userservice does not accept input username and password
"""
return _login_helper(request.form['username'],
request.form['password'])
def _login_helper(username, password):
try:
app.logger.debug('Logging in.')
req = requests.get(url=app.config["LOGIN_URI"],
params={'username': username, 'password': password})
req.raise_for_status() # Raise on HTTP Status code 4XX or 5XX
# login success
token = req.json()['token'].encode('utf-8')
claims = jwt.decode(token, verify=False)
max_age = claims['exp'] - claims['iat']
resp = make_response(redirect(url_for('home',
_external=True,
_scheme=app.config['SCHEME'])))
resp.set_cookie(app.config['TOKEN_NAME'], token, max_age=max_age)
app.logger.info('Successfully logged in.')
return resp
except (RequestException, HTTPError) as err:
app.logger.error('Error logging in: %s', str(err))
return redirect(url_for('login',
msg='Login Failed',
_external=True,
_scheme=app.config['SCHEME']))
@app.route("/signup", methods=['GET'])
def signup_page():
"""
Renders signup page. Redirects to /login if token is not valid
"""
token = request.cookies.get(app.config['TOKEN_NAME'])
if verify_token(token):
# already authenticated
app.logger.debug('User already authenticated. Redirecting to /home')
return redirect(url_for('home',
_external=True,
_scheme=app.config['SCHEME']))
return render_template('signup.html',
cymbal_logo=os.getenv('CYMBAL_LOGO', 'false'),
cluster_name=cluster_name,
pod_name=pod_name,
pod_zone=pod_zone,
bank_name=os.getenv('BANK_NAME', 'Toptal Bank v3'))
@app.route("/signup", methods=['POST'])
def signup():
"""
Submits signup request to userservice
Fails if userservice does not accept input form data
"""
try:
# create user
app.logger.debug('Creating new user.')
resp = requests.post(url=app.config["USERSERVICE_URI"],
data=request.form,
timeout=app.config['BACKEND_TIMEOUT'])
if resp.status_code == 201:
# user created. Attempt login
app.logger.info('New user created.')
return _login_helper(request.form['username'],
request.form['password'])
except requests.exceptions.RequestException as err:
app.logger.error('Error creating new user: %s', str(err))
return redirect(url_for('login',
msg='Error: Account creation failed',
_external=True,
_scheme=app.config['SCHEME']))
@app.route('/logout', methods=['POST'])
def logout():
"""
Logs out user by deleting token cookie and redirecting to login page
"""
app.logger.info('Logging out.')
resp = make_response(redirect(url_for('login_page',
_external=True,
_scheme=app.config['SCHEME'])))
resp.delete_cookie(app.config['TOKEN_NAME'])
return resp
def verify_token(token):
"""
Validates token using userservice public key
"""
app.logger.debug('Verifying token.')
if token is None:
return False
try:
jwt.decode(token, key=app.config['PUBLIC_KEY'], algorithms='RS256', verify=True)
app.logger.debug('Token verified.')
return True
except jwt.exceptions.InvalidTokenError as err:
app.logger.error('Error validating token: %s', str(err))
return False
# register html template formatters
def format_timestamp_day(timestamp):
""" Format the input timestamp day in a human readable way """
# TODO: time zones?
date = datetime.datetime.strptime(timestamp, app.config['TIMESTAMP_FORMAT'])
return date.strftime('%d')
def format_timestamp_month(timestamp):
""" Format the input timestamp month in a human readable way """
# TODO: time zones?
date = datetime.datetime.strptime(timestamp, app.config['TIMESTAMP_FORMAT'])
return date.strftime('%b')
def format_currency(int_amount):
""" Format the input currency in a human readable way """
if int_amount is None:
return '$---'
amount_str = '${:0,.2f}'.format(abs(Decimal(int_amount)/100))
if int_amount < 0:
amount_str = '-' + amount_str
return amount_str
# set up global variables
app.config["TRANSACTIONS_URI"] = 'http://{}/transactions'.format(
os.environ.get('TRANSACTIONS_API_ADDR'))
app.config["USERSERVICE_URI"] = 'http://{}/users'.format(
os.environ.get('USERSERVICE_API_ADDR'))
app.config["BALANCES_URI"] = 'http://{}/balances'.format(
os.environ.get('BALANCES_API_ADDR'))
app.config["HISTORY_URI"] = 'http://{}/transactions'.format(
os.environ.get('HISTORY_API_ADDR'))
app.config["LOGIN_URI"] = 'http://{}/login'.format(
os.environ.get('USERSERVICE_API_ADDR'))
app.config["CONTACTS_URI"] = 'http://{}/contacts'.format(
os.environ.get('CONTACTS_API_ADDR'))
app.config['PUBLIC_KEY'] = open(os.environ.get('PUB_KEY_PATH'), 'r').read()
app.config['LOCAL_ROUTING'] = os.getenv('LOCAL_ROUTING_NUM')
app.config['BACKEND_TIMEOUT'] = 4 # timeout in seconds for calls to the backend
app.config['TOKEN_NAME'] = 'token'
app.config['TIMESTAMP_FORMAT'] = '%Y-%m-%dT%H:%M:%S.%f%z'
app.config['SCHEME'] = os.environ.get('SCHEME', 'http')
# where am I?
metadata_url = 'http://metadata.google.internal/computeMetadata/v1/'
metadata_headers = {'Metadata-Flavor': 'Google'}
# get GKE cluster name
cluster_name = "unknown"
try:
req = requests.get(metadata_url + 'instance/attributes/cluster-name',
headers=metadata_headers)
if req.ok:
cluster_name = str(req.text)
except (RequestException, HTTPError) as err:
app.logger.warning("Unable to capture GKE cluster name.")
# get GKE pod name
pod_name = "unknown"
pod_name = socket.gethostname()
# get GKE node zone
pod_zone = "unknown"
try:
req = requests.get(metadata_url + 'instance/zone',
headers=metadata_headers)
if req.ok:
pod_zone = str(req.text.split("/")[3])
except (RequestException, HTTPError) as err:
app.logger.warning("Unable to capture GKE node zone.")
# register formater functions
app.jinja_env.globals.update(format_currency=format_currency)
app.jinja_env.globals.update(format_timestamp_month=format_timestamp_month)
app.jinja_env.globals.update(format_timestamp_day=format_timestamp_day)
# set log formatting
date_format = "%Y-%m-%d %H:%M:%S"
message_format = '%(asctime)s | [%(levelname)s] | %(funcName)s | %(message)s'
logging.basicConfig(format= message_format, datefmt= date_format, stream=sys.stdout)
# set log level
log_levels = {
"DEBUG": logging.DEBUG,
"WARNING": logging.WARNING,
"INFO": logging.INFO,
"ERROR": logging.ERROR,
"CRITICAL": logging.CRITICAL
}
level = logging.INFO #default
user_log_level = os.environ.get("LOG_LEVEL")
if user_log_level is not None and user_log_level.upper() in log_levels:
level = log_levels.get(user_log_level.upper())
app.logger.setLevel(level)
app.logger.info("Starting frontend.")
# Set up tracing and export spans to Cloud Trace.
if os.environ['ENABLE_TRACING'] == "true":
app.logger.info("✅ Tracing enabled.")
trace.set_tracer_provider(TracerProvider())
cloud_trace_exporter = CloudTraceSpanExporter()
trace.get_tracer_provider().add_span_processor(
BatchExportSpanProcessor(cloud_trace_exporter)
)
set_global_textmap(CloudTraceFormatPropagator())
# Add tracing auto-instrumentation for Flask, jinja and requests
FlaskInstrumentor().instrument_app(app)
RequestsInstrumentor().instrument()
Jinja2Instrumentor().instrument()
else:
app.logger.info("🚫 Tracing disabled.")
return app
if __name__ == "__main__":
# Create an instance of flask server when called directly
FRONTEND = create_app()
FRONTEND.run()
|
[] |
[] |
[
"DEFAULT_USERNAME",
"CONTACTS_API_ADDR",
"CYMBAL_LOGO",
"HISTORY_API_ADDR",
"LOG_LEVEL",
"VERSION",
"BALANCES_API_ADDR",
"TRANSACTIONS_API_ADDR",
"PUB_KEY_PATH",
"ENABLE_TRACING",
"BANK_NAME",
"USERSERVICE_API_ADDR",
"LOCAL_ROUTING_NUM",
"SCHEME",
"DEFAULT_PASSWORD"
] |
[]
|
["DEFAULT_USERNAME", "CONTACTS_API_ADDR", "CYMBAL_LOGO", "HISTORY_API_ADDR", "LOG_LEVEL", "VERSION", "BALANCES_API_ADDR", "TRANSACTIONS_API_ADDR", "PUB_KEY_PATH", "ENABLE_TRACING", "BANK_NAME", "USERSERVICE_API_ADDR", "LOCAL_ROUTING_NUM", "SCHEME", "DEFAULT_PASSWORD"]
|
python
| 15 | 0 | |
contrib/spendfrom/spendfrom.py
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend WGCs received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a worldGreenCoind or worldGreenCoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the worldGreenCoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/WorldGreenCoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "WorldGreenCoin")
return os.path.expanduser("~/.worldGreenCoin")
def read_bitcoin_config(dbdir):
"""Read the worldGreenCoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "worldGreenCoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a worldGreenCoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 17122 if testnet else 17122
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the worldGreenCoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(worldGreenCoind):
info = worldGreenCoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
worldGreenCoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = worldGreenCoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(worldGreenCoind):
address_summary = dict()
address_to_account = dict()
for info in worldGreenCoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = worldGreenCoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = worldGreenCoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-worldGreenCoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(worldGreenCoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(worldGreenCoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to worldGreenCoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = worldGreenCoind.createrawtransaction(inputs, outputs)
signed_rawtx = worldGreenCoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(worldGreenCoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = worldGreenCoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(worldGreenCoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = worldGreenCoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(worldGreenCoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get WGCs from")
parser.add_option("--to", dest="to", default=None,
help="address to get send WGCs to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of worldGreenCoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
worldGreenCoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(worldGreenCoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(worldGreenCoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(worldGreenCoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(worldGreenCoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = worldGreenCoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"APPDATA"
] |
[]
|
["APPDATA"]
|
python
| 1 | 0 | |
examples/create_file/main.go
|
package main
import (
"fmt"
"os"
"github.com/hashgraph/hedera-sdk-go"
)
func main() {
client := hedera.ClientForTestnet()
operatorPrivateKey, err := hedera.Ed25519PrivateKeyFromString(os.Getenv("OPERATOR_KEY"))
if err != nil {
panic(err)
}
operatorAccountID, err := hedera.AccountIDFromString(os.Getenv("OPERATOR_ID"))
if err != nil {
panic(err)
}
client.SetOperator(operatorAccountID, operatorPrivateKey)
transactionID, err := hedera.NewFileCreateTransaction().
// A file is not implicitly owned by anyone, even the operator
AddKey(operatorPrivateKey.PublicKey()).
SetContents([]byte("Hello, World")).
SetTransactionMemo("go sdk example create_file/main.go").
SetMaxTransactionFee(hedera.HbarFrom(8, hedera.HbarUnits.Hbar)).
Execute(client)
if err != nil {
panic(err)
}
transactionReceipt, err := transactionID.GetReceipt(client)
if err != nil {
panic(err)
}
fmt.Printf("file = %v\n", transactionReceipt.GetFileID())
}
|
[
"\"OPERATOR_KEY\"",
"\"OPERATOR_ID\""
] |
[] |
[
"OPERATOR_ID",
"OPERATOR_KEY"
] |
[]
|
["OPERATOR_ID", "OPERATOR_KEY"]
|
go
| 2 | 0 | |
src/runtime/pprof/pprof_test.go
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !js
package pprof
import (
"bytes"
"context"
"fmt"
"internal/abi"
"internal/profile"
"internal/testenv"
"io"
"math"
"math/big"
"os"
"os/exec"
"regexp"
"runtime"
"runtime/debug"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
_ "unsafe"
)
func cpuHogger(f func(x int) int, y *int, dur time.Duration) {
// We only need to get one 100 Hz clock tick, so we've got
// a large safety buffer.
// But do at least 500 iterations (which should take about 100ms),
// otherwise TestCPUProfileMultithreaded can fail if only one
// thread is scheduled during the testing period.
t0 := time.Now()
accum := *y
for i := 0; i < 500 || time.Since(t0) < dur; i++ {
accum = f(accum)
}
*y = accum
}
var (
salt1 = 0
salt2 = 0
)
// The actual CPU hogging function.
// Must not call other functions nor access heap/globals in the loop,
// otherwise under race detector the samples will be in the race runtime.
func cpuHog1(x int) int {
return cpuHog0(x, 1e5)
}
func cpuHog0(x, n int) int {
foo := x
for i := 0; i < n; i++ {
if foo > 0 {
foo *= foo
} else {
foo *= foo + 1
}
}
return foo
}
func cpuHog2(x int) int {
foo := x
for i := 0; i < 1e5; i++ {
if foo > 0 {
foo *= foo
} else {
foo *= foo + 2
}
}
return foo
}
// Return a list of functions that we don't want to ever appear in CPU
// profiles. For gccgo, that list includes the sigprof handler itself.
func avoidFunctions() []string {
if runtime.Compiler == "gccgo" {
return []string{"runtime.sigprof"}
}
return nil
}
func TestCPUProfile(t *testing.T) {
matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.cpuHog1"}, avoidFunctions())
testCPUProfile(t, matches, func(dur time.Duration) {
cpuHogger(cpuHog1, &salt1, dur)
})
}
func TestCPUProfileMultithreaded(t *testing.T) {
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.cpuHog1", "runtime/pprof.cpuHog2"}, avoidFunctions())
testCPUProfile(t, matches, func(dur time.Duration) {
c := make(chan int)
go func() {
cpuHogger(cpuHog1, &salt1, dur)
c <- 1
}()
cpuHogger(cpuHog2, &salt2, dur)
<-c
})
}
func TestCPUProfileMultithreadMagnitude(t *testing.T) {
if runtime.GOOS != "linux" {
t.Skip("issue 35057 is only confirmed on Linux")
}
// Linux [5.9,5.16) has a kernel bug that can break CPU timers on newly
// created threads, breaking our CPU accounting.
major, minor, patch, err := linuxKernelVersion()
if err != nil {
t.Errorf("Error determining kernel version: %v", err)
}
t.Logf("Running on Linux %d.%d.%d", major, minor, patch)
defer func() {
if t.Failed() {
t.Logf("Failure of this test may indicate that your system suffers from a known Linux kernel bug fixed on newer kernels. See https://golang.org/issue/49065.")
}
}()
// Disable on affected builders to avoid flakiness, but otherwise keep
// it enabled to potentially warn users that they are on a broken
// kernel.
if testenv.Builder() != "" && (runtime.GOARCH == "386" || runtime.GOARCH == "amd64") {
have59 := major > 5 || (major == 5 && minor >= 9)
have516 := major > 5 || (major == 5 && minor >= 16)
if have59 && !have516 {
testenv.SkipFlaky(t, 49065)
}
}
// Run a workload in a single goroutine, then run copies of the same
// workload in several goroutines. For both the serial and parallel cases,
// the CPU time the process measures with its own profiler should match the
// total CPU usage that the OS reports.
//
// We could also check that increases in parallelism (GOMAXPROCS) lead to a
// linear increase in the CPU usage reported by both the OS and the
// profiler, but without a guarantee of exclusive access to CPU resources
// that is likely to be a flaky test.
// Require the smaller value to be within 10%, or 40% in short mode.
maxDiff := 0.10
if testing.Short() {
maxDiff = 0.40
}
compare := func(a, b time.Duration, maxDiff float64) error {
if a <= 0 || b <= 0 {
return fmt.Errorf("Expected both time reports to be positive")
}
if a < b {
a, b = b, a
}
diff := float64(a-b) / float64(a)
if diff > maxDiff {
return fmt.Errorf("CPU usage reports are too different (limit -%.1f%%, got -%.1f%%)", maxDiff*100, diff*100)
}
return nil
}
for _, tc := range []struct {
name string
workers int
}{
{
name: "serial",
workers: 1,
},
{
name: "parallel",
workers: runtime.GOMAXPROCS(0),
},
} {
// check that the OS's perspective matches what the Go runtime measures.
t.Run(tc.name, func(t *testing.T) {
t.Logf("Running with %d workers", tc.workers)
var userTime, systemTime time.Duration
matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.cpuHog1"}, avoidFunctions())
acceptProfile := func(t *testing.T, p *profile.Profile) bool {
if !matches(t, p) {
return false
}
ok := true
for i, unit := range []string{"count", "nanoseconds"} {
if have, want := p.SampleType[i].Unit, unit; have != want {
t.Logf("pN SampleType[%d]; %q != %q", i, have, want)
ok = false
}
}
// cpuHog1 called below is the primary source of CPU
// load, but there may be some background work by the
// runtime. Since the OS rusage measurement will
// include all work done by the process, also compare
// against all samples in our profile.
var value time.Duration
for _, sample := range p.Sample {
value += time.Duration(sample.Value[1]) * time.Nanosecond
}
totalTime := userTime + systemTime
t.Logf("compare %s user + %s system = %s vs %s", userTime, systemTime, totalTime, value)
if err := compare(totalTime, value, maxDiff); err != nil {
t.Logf("compare got %v want nil", err)
ok = false
}
return ok
}
testCPUProfile(t, acceptProfile, func(dur time.Duration) {
userTime, systemTime = diffCPUTime(t, func() {
var wg sync.WaitGroup
var once sync.Once
for i := 0; i < tc.workers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
var salt = 0
cpuHogger(cpuHog1, &salt, dur)
once.Do(func() { salt1 = salt })
}()
}
wg.Wait()
})
})
})
}
}
// containsInlinedCall reports whether the function body for the function f is
// known to contain an inlined function call within the first maxBytes bytes.
func containsInlinedCall(f any, maxBytes int) bool {
_, found := findInlinedCall(f, maxBytes)
return found
}
// findInlinedCall returns the PC of an inlined function call within
// the function body for the function f if any.
func findInlinedCall(f any, maxBytes int) (pc uint64, found bool) {
fFunc := runtime.FuncForPC(uintptr(abi.FuncPCABIInternal(f)))
if fFunc == nil || fFunc.Entry() == 0 {
panic("failed to locate function entry")
}
for offset := 0; offset < maxBytes; offset++ {
innerPC := fFunc.Entry() + uintptr(offset)
inner := runtime.FuncForPC(innerPC)
if inner == nil {
// No function known for this PC value.
// It might simply be misaligned, so keep searching.
continue
}
if inner.Entry() != fFunc.Entry() {
// Scanned past f and didn't find any inlined functions.
break
}
if inner.Name() != fFunc.Name() {
// This PC has f as its entry-point, but is not f. Therefore, it must be a
// function inlined into f.
return uint64(innerPC), true
}
}
return 0, false
}
func TestCPUProfileInlining(t *testing.T) {
if !containsInlinedCall(inlinedCaller, 4<<10) {
t.Skip("Can't determine whether inlinedCallee was inlined into inlinedCaller.")
}
matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}, avoidFunctions())
p := testCPUProfile(t, matches, func(dur time.Duration) {
cpuHogger(inlinedCaller, &salt1, dur)
})
// Check if inlined function locations are encoded correctly. The inlinedCalee and inlinedCaller should be in one location.
for _, loc := range p.Location {
hasInlinedCallerAfterInlinedCallee, hasInlinedCallee := false, false
for _, line := range loc.Line {
if line.Function.Name == "runtime/pprof.inlinedCallee" {
hasInlinedCallee = true
}
if hasInlinedCallee && line.Function.Name == "runtime/pprof.inlinedCaller" {
hasInlinedCallerAfterInlinedCallee = true
}
}
if hasInlinedCallee != hasInlinedCallerAfterInlinedCallee {
t.Fatalf("want inlinedCallee followed by inlinedCaller, got separate Location entries:\n%v", p)
}
}
}
func inlinedCaller(x int) int {
x = inlinedCallee(x, 1e5)
return x
}
func inlinedCallee(x, n int) int {
return cpuHog0(x, n)
}
//go:noinline
func dumpCallers(pcs []uintptr) {
if pcs == nil {
return
}
skip := 2 // Callers and dumpCallers
runtime.Callers(skip, pcs)
}
//go:noinline
func inlinedCallerDump(pcs []uintptr) {
inlinedCalleeDump(pcs)
}
func inlinedCalleeDump(pcs []uintptr) {
dumpCallers(pcs)
}
func TestCPUProfileRecursion(t *testing.T) {
matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.recursionCallee", "runtime/pprof.recursionCaller"}, avoidFunctions())
p := testCPUProfile(t, matches, func(dur time.Duration) {
cpuHogger(recursionCaller, &salt1, dur)
})
// check the Location encoding was not confused by recursive calls.
for i, loc := range p.Location {
recursionFunc := 0
for _, line := range loc.Line {
if name := line.Function.Name; name == "runtime/pprof.recursionCaller" || name == "runtime/pprof.recursionCallee" {
recursionFunc++
}
}
if recursionFunc > 1 {
t.Fatalf("want at most one recursionCaller or recursionCallee in one Location, got a violating Location (index: %d):\n%v", i, p)
}
}
}
func recursionCaller(x int) int {
y := recursionCallee(3, x)
return y
}
func recursionCallee(n, x int) int {
if n == 0 {
return 1
}
y := inlinedCallee(x, 1e4)
return y * recursionCallee(n-1, x)
}
func recursionChainTop(x int, pcs []uintptr) {
if x < 0 {
return
}
recursionChainMiddle(x, pcs)
}
func recursionChainMiddle(x int, pcs []uintptr) {
recursionChainBottom(x, pcs)
}
func recursionChainBottom(x int, pcs []uintptr) {
// This will be called each time, we only care about the last. We
// can't make this conditional or this function won't be inlined.
dumpCallers(pcs)
recursionChainTop(x-1, pcs)
}
func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []*profile.Location, map[string][]string)) *profile.Profile {
p, err := profile.Parse(bytes.NewReader(valBytes))
if err != nil {
t.Fatal(err)
}
for _, sample := range p.Sample {
count := uintptr(sample.Value[0])
f(count, sample.Location, sample.Label)
}
return p
}
func cpuProfilingBroken() bool {
switch runtime.GOOS {
case "plan9":
// Profiling unimplemented.
return true
case "aix":
// See https://golang.org/issue/45170.
return true
case "ios", "dragonfly", "netbsd", "illumos", "solaris":
// See https://golang.org/issue/13841.
return true
case "openbsd":
if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
// See https://golang.org/issue/13841.
return true
}
}
return false
}
// testCPUProfile runs f under the CPU profiler, checking for some conditions specified by need,
// as interpreted by matches, and returns the parsed profile.
func testCPUProfile(t *testing.T, matches profileMatchFunc, f func(dur time.Duration)) *profile.Profile {
switch runtime.GOOS {
case "darwin":
out, err := exec.Command("uname", "-a").CombinedOutput()
if err != nil {
t.Fatal(err)
}
vers := string(out)
t.Logf("uname -a: %v", vers)
case "plan9":
t.Skip("skipping on plan9")
}
broken := cpuProfilingBroken()
maxDuration := 5 * time.Second
if testing.Short() && broken {
// If it's expected to be broken, no point waiting around.
maxDuration /= 10
}
// If we're running a long test, start with a long duration
// for tests that try to make sure something *doesn't* happen.
duration := 5 * time.Second
if testing.Short() {
duration = 100 * time.Millisecond
}
// Profiling tests are inherently flaky, especially on a
// loaded system, such as when this test is running with
// several others under go test std. If a test fails in a way
// that could mean it just didn't run long enough, try with a
// longer duration.
for duration <= maxDuration {
var prof bytes.Buffer
if err := StartCPUProfile(&prof); err != nil {
t.Fatal(err)
}
f(duration)
StopCPUProfile()
if p, ok := profileOk(t, matches, prof, duration); ok {
return p
}
duration *= 2
if duration <= maxDuration {
t.Logf("retrying with %s duration", duration)
}
}
if broken {
t.Skipf("ignoring failure on %s/%s; see golang.org/issue/13841", runtime.GOOS, runtime.GOARCH)
}
// Ignore the failure if the tests are running in a QEMU-based emulator,
// QEMU is not perfect at emulating everything.
// IN_QEMU environmental variable is set by some of the Go builders.
// IN_QEMU=1 indicates that the tests are running in QEMU. See issue 9605.
if os.Getenv("IN_QEMU") == "1" {
t.Skip("ignore the failure in QEMU; see golang.org/issue/9605")
}
t.FailNow()
return nil
}
var diffCPUTimeImpl func(f func()) (user, system time.Duration)
func diffCPUTime(t *testing.T, f func()) (user, system time.Duration) {
if fn := diffCPUTimeImpl; fn != nil {
return fn(f)
}
t.Fatalf("cannot measure CPU time on GOOS=%s GOARCH=%s", runtime.GOOS, runtime.GOARCH)
return 0, 0
}
func contains(slice []string, s string) bool {
for i := range slice {
if slice[i] == s {
return true
}
}
return false
}
// stackContains matches if a function named spec appears anywhere in the stack trace.
func stackContains(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
for _, loc := range stk {
for _, line := range loc.Line {
if strings.Contains(line.Function.Name, spec) {
return true
}
}
}
return false
}
type sampleMatchFunc func(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool
func profileOk(t *testing.T, matches profileMatchFunc, prof bytes.Buffer, duration time.Duration) (_ *profile.Profile, ok bool) {
ok = true
var samples uintptr
var buf bytes.Buffer
p := parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, labels map[string][]string) {
fmt.Fprintf(&buf, "%d:", count)
fprintStack(&buf, stk)
fmt.Fprintf(&buf, " labels: %v\n", labels)
samples += count
fmt.Fprintf(&buf, "\n")
})
t.Logf("total %d CPU profile samples collected:\n%s", samples, buf.String())
if samples < 10 && runtime.GOOS == "windows" {
// On some windows machines we end up with
// not enough samples due to coarse timer
// resolution. Let it go.
t.Log("too few samples on Windows (golang.org/issue/10842)")
return p, false
}
// Check that we got a reasonable number of samples.
// We used to always require at least ideal/4 samples,
// but that is too hard to guarantee on a loaded system.
// Now we accept 10 or more samples, which we take to be
// enough to show that at least some profiling is occurring.
if ideal := uintptr(duration * 100 / time.Second); samples == 0 || (samples < ideal/4 && samples < 10) {
t.Logf("too few samples; got %d, want at least %d, ideally %d", samples, ideal/4, ideal)
ok = false
}
if matches != nil && !matches(t, p) {
ok = false
}
return p, ok
}
type profileMatchFunc func(*testing.T, *profile.Profile) bool
func matchAndAvoidStacks(matches sampleMatchFunc, need []string, avoid []string) profileMatchFunc {
return func(t *testing.T, p *profile.Profile) (ok bool) {
ok = true
// Check that profile is well formed, contains 'need', and does not contain
// anything from 'avoid'.
have := make([]uintptr, len(need))
avoidSamples := make([]uintptr, len(avoid))
for _, sample := range p.Sample {
count := uintptr(sample.Value[0])
for i, spec := range need {
if matches(spec, count, sample.Location, sample.Label) {
have[i] += count
}
}
for i, name := range avoid {
for _, loc := range sample.Location {
for _, line := range loc.Line {
if strings.Contains(line.Function.Name, name) {
avoidSamples[i] += count
}
}
}
}
}
for i, name := range avoid {
bad := avoidSamples[i]
if bad != 0 {
t.Logf("found %d samples in avoid-function %s\n", bad, name)
ok = false
}
}
if len(need) == 0 {
return
}
var total uintptr
for i, name := range need {
total += have[i]
t.Logf("%s: %d\n", name, have[i])
}
if total == 0 {
t.Logf("no samples in expected functions")
ok = false
}
// We'd like to check a reasonable minimum, like
// total / len(have) / smallconstant, but this test is
// pretty flaky (see bug 7095). So we'll just test to
// make sure we got at least one sample.
min := uintptr(1)
for i, name := range need {
if have[i] < min {
t.Logf("%s has %d samples out of %d, want at least %d, ideally %d", name, have[i], total, min, total/uintptr(len(have)))
ok = false
}
}
return
}
}
// Fork can hang if preempted with signals frequently enough (see issue 5517).
// Ensure that we do not do this.
func TestCPUProfileWithFork(t *testing.T) {
testenv.MustHaveExec(t)
heap := 1 << 30
if runtime.GOOS == "android" {
// Use smaller size for Android to avoid crash.
heap = 100 << 20
}
if runtime.GOOS == "windows" && runtime.GOARCH == "arm" {
// Use smaller heap for Windows/ARM to avoid crash.
heap = 100 << 20
}
if testing.Short() {
heap = 100 << 20
}
// This makes fork slower.
garbage := make([]byte, heap)
// Need to touch the slice, otherwise it won't be paged in.
done := make(chan bool)
go func() {
for i := range garbage {
garbage[i] = 42
}
done <- true
}()
<-done
var prof bytes.Buffer
if err := StartCPUProfile(&prof); err != nil {
t.Fatal(err)
}
defer StopCPUProfile()
for i := 0; i < 10; i++ {
exec.Command(os.Args[0], "-h").CombinedOutput()
}
}
// Test that profiler does not observe runtime.gogo as "user" goroutine execution.
// If it did, it would see inconsistent state and would either record an incorrect stack
// or crash because the stack was malformed.
func TestGoroutineSwitch(t *testing.T) {
if runtime.Compiler == "gccgo" {
t.Skip("not applicable for gccgo")
}
// How much to try. These defaults take about 1 seconds
// on a 2012 MacBook Pro. The ones in short mode take
// about 0.1 seconds.
tries := 10
count := 1000000
if testing.Short() {
tries = 1
}
for try := 0; try < tries; try++ {
var prof bytes.Buffer
if err := StartCPUProfile(&prof); err != nil {
t.Fatal(err)
}
for i := 0; i < count; i++ {
runtime.Gosched()
}
StopCPUProfile()
// Read profile to look for entries for gogo with an attempt at a traceback.
// "runtime.gogo" is OK, because that's the part of the context switch
// before the actual switch begins. But we should not see "gogo",
// aka "gogo<>(SB)", which does the actual switch and is marked SPWRITE.
parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, _ map[string][]string) {
// An entry with two frames with 'System' in its top frame
// exists to record a PC without a traceback. Those are okay.
if len(stk) == 2 {
name := stk[1].Line[0].Function.Name
if name == "runtime._System" || name == "runtime._ExternalCode" || name == "runtime._GC" {
return
}
}
// An entry with just one frame is OK too:
// it knew to stop at gogo.
if len(stk) == 1 {
return
}
// Otherwise, should not see gogo.
// The place we'd see it would be the inner most frame.
name := stk[0].Line[0].Function.Name
if name == "gogo" {
var buf bytes.Buffer
fprintStack(&buf, stk)
t.Fatalf("found profile entry for gogo:\n%s", buf.String())
}
})
}
}
func fprintStack(w io.Writer, stk []*profile.Location) {
for _, loc := range stk {
fmt.Fprintf(w, " %#x", loc.Address)
fmt.Fprintf(w, " (")
for i, line := range loc.Line {
if i > 0 {
fmt.Fprintf(w, " ")
}
fmt.Fprintf(w, "%s:%d", line.Function.Name, line.Line)
}
fmt.Fprintf(w, ")")
}
}
// Test that profiling of division operations is okay, especially on ARM. See issue 6681.
func TestMathBigDivide(t *testing.T) {
testCPUProfile(t, nil, func(duration time.Duration) {
t := time.After(duration)
pi := new(big.Int)
for {
for i := 0; i < 100; i++ {
n := big.NewInt(2646693125139304345)
d := big.NewInt(842468587426513207)
pi.Div(n, d)
}
select {
case <-t:
return
default:
}
}
})
}
// stackContainsAll matches if all functions in spec (comma-separated) appear somewhere in the stack trace.
func stackContainsAll(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
for _, f := range strings.Split(spec, ",") {
if !stackContains(f, count, stk, labels) {
return false
}
}
return true
}
func TestMorestack(t *testing.T) {
matches := matchAndAvoidStacks(stackContainsAll, []string{"runtime.newstack,runtime/pprof.growstack"}, avoidFunctions())
testCPUProfile(t, matches, func(duration time.Duration) {
t := time.After(duration)
c := make(chan bool)
for {
go func() {
growstack1()
c <- true
}()
select {
case <-t:
return
case <-c:
}
}
})
}
//go:noinline
func growstack1() {
growstack(10)
}
//go:noinline
func growstack(n int) {
var buf [8 << 18]byte
use(buf)
if n > 0 {
growstack(n - 1)
}
}
//go:noinline
func use(x [8 << 18]byte) {}
func TestBlockProfile(t *testing.T) {
type TestCase struct {
name string
f func(*testing.T)
stk []string
re string
}
tests := [...]TestCase{
{
name: "chan recv",
f: blockChanRecv,
stk: []string{
"runtime.chanrecv1",
"runtime/pprof.blockChanRecv",
"runtime/pprof.TestBlockProfile",
},
re: `
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
# 0x[0-9a-f]+ runtime\.chanrecv1\+0x[0-9a-f]+ .*runtime/chan.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.blockChanRecv\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
`},
{
name: "chan send",
f: blockChanSend,
stk: []string{
"runtime.chansend1",
"runtime/pprof.blockChanSend",
"runtime/pprof.TestBlockProfile",
},
re: `
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
# 0x[0-9a-f]+ runtime\.chansend1\+0x[0-9a-f]+ .*runtime/chan.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.blockChanSend\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
`},
{
name: "chan close",
f: blockChanClose,
stk: []string{
"runtime.chanrecv1",
"runtime/pprof.blockChanClose",
"runtime/pprof.TestBlockProfile",
},
re: `
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
# 0x[0-9a-f]+ runtime\.chanrecv1\+0x[0-9a-f]+ .*runtime/chan.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.blockChanClose\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
`},
{
name: "select recv async",
f: blockSelectRecvAsync,
stk: []string{
"runtime.selectgo",
"runtime/pprof.blockSelectRecvAsync",
"runtime/pprof.TestBlockProfile",
},
re: `
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
# 0x[0-9a-f]+ runtime\.selectgo\+0x[0-9a-f]+ .*runtime/select.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.blockSelectRecvAsync\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
`},
{
name: "select send sync",
f: blockSelectSendSync,
stk: []string{
"runtime.selectgo",
"runtime/pprof.blockSelectSendSync",
"runtime/pprof.TestBlockProfile",
},
re: `
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
# 0x[0-9a-f]+ runtime\.selectgo\+0x[0-9a-f]+ .*runtime/select.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.blockSelectSendSync\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
`},
{
name: "mutex",
f: blockMutex,
stk: []string{
"sync.(*Mutex).Lock",
"runtime/pprof.blockMutex",
"runtime/pprof.TestBlockProfile",
},
re: `
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
# 0x[0-9a-f]+ sync\.\(\*Mutex\)\.Lock\+0x[0-9a-f]+ .*sync/mutex\.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.blockMutex\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
`},
{
name: "cond",
f: blockCond,
stk: []string{
"sync.(*Cond).Wait",
"runtime/pprof.blockCond",
"runtime/pprof.TestBlockProfile",
},
re: `
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
# 0x[0-9a-f]+ sync\.\(\*Cond\)\.Wait\+0x[0-9a-f]+ .*sync/cond\.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.blockCond\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
`},
}
// Generate block profile
runtime.SetBlockProfileRate(1)
defer runtime.SetBlockProfileRate(0)
for _, test := range tests {
test.f(t)
}
t.Run("debug=1", func(t *testing.T) {
var w bytes.Buffer
Lookup("block").WriteTo(&w, 1)
prof := w.String()
if !strings.HasPrefix(prof, "--- contention:\ncycles/second=") {
t.Fatalf("Bad profile header:\n%v", prof)
}
if strings.HasSuffix(prof, "#\t0x0\n\n") {
t.Errorf("Useless 0 suffix:\n%v", prof)
}
for _, test := range tests {
if !regexp.MustCompile(strings.ReplaceAll(test.re, "\t", "\t+")).MatchString(prof) {
t.Errorf("Bad %v entry, expect:\n%v\ngot:\n%v", test.name, test.re, prof)
}
}
})
t.Run("proto", func(t *testing.T) {
// proto format
var w bytes.Buffer
Lookup("block").WriteTo(&w, 0)
p, err := profile.Parse(&w)
if err != nil {
t.Fatalf("failed to parse profile: %v", err)
}
t.Logf("parsed proto: %s", p)
if err := p.CheckValid(); err != nil {
t.Fatalf("invalid profile: %v", err)
}
stks := stacks(p)
for _, test := range tests {
if !containsStack(stks, test.stk) {
t.Errorf("No matching stack entry for %v, want %+v", test.name, test.stk)
}
}
})
}
func stacks(p *profile.Profile) (res [][]string) {
for _, s := range p.Sample {
var stk []string
for _, l := range s.Location {
for _, line := range l.Line {
stk = append(stk, line.Function.Name)
}
}
res = append(res, stk)
}
return res
}
func containsStack(got [][]string, want []string) bool {
for _, stk := range got {
if len(stk) < len(want) {
continue
}
for i, f := range want {
if f != stk[i] {
break
}
if i == len(want)-1 {
return true
}
}
}
return false
}
// awaitBlockedGoroutine spins on runtime.Gosched until a runtime stack dump
// shows a goroutine in the given state with a stack frame in
// runtime/pprof.<fName>.
func awaitBlockedGoroutine(t *testing.T, state, fName string) {
re := fmt.Sprintf(`(?m)^goroutine \d+ \[%s\]:\n(?:.+\n\t.+\n)*runtime/pprof\.%s`, regexp.QuoteMeta(state), fName)
r := regexp.MustCompile(re)
if deadline, ok := t.Deadline(); ok {
if d := time.Until(deadline); d > 1*time.Second {
timer := time.AfterFunc(d-1*time.Second, func() {
debug.SetTraceback("all")
panic(fmt.Sprintf("timed out waiting for %#q", re))
})
defer timer.Stop()
}
}
buf := make([]byte, 64<<10)
for {
runtime.Gosched()
n := runtime.Stack(buf, true)
if n == len(buf) {
// Buffer wasn't large enough for a full goroutine dump.
// Resize it and try again.
buf = make([]byte, 2*len(buf))
continue
}
if r.Match(buf[:n]) {
return
}
}
}
func blockChanRecv(t *testing.T) {
c := make(chan bool)
go func() {
awaitBlockedGoroutine(t, "chan receive", "blockChanRecv")
c <- true
}()
<-c
}
func blockChanSend(t *testing.T) {
c := make(chan bool)
go func() {
awaitBlockedGoroutine(t, "chan send", "blockChanSend")
<-c
}()
c <- true
}
func blockChanClose(t *testing.T) {
c := make(chan bool)
go func() {
awaitBlockedGoroutine(t, "chan receive", "blockChanClose")
close(c)
}()
<-c
}
func blockSelectRecvAsync(t *testing.T) {
const numTries = 3
c := make(chan bool, 1)
c2 := make(chan bool, 1)
go func() {
for i := 0; i < numTries; i++ {
awaitBlockedGoroutine(t, "select", "blockSelectRecvAsync")
c <- true
}
}()
for i := 0; i < numTries; i++ {
select {
case <-c:
case <-c2:
}
}
}
func blockSelectSendSync(t *testing.T) {
c := make(chan bool)
c2 := make(chan bool)
go func() {
awaitBlockedGoroutine(t, "select", "blockSelectSendSync")
<-c
}()
select {
case c <- true:
case c2 <- true:
}
}
func blockMutex(t *testing.T) {
var mu sync.Mutex
mu.Lock()
go func() {
awaitBlockedGoroutine(t, "semacquire", "blockMutex")
mu.Unlock()
}()
// Note: Unlock releases mu before recording the mutex event,
// so it's theoretically possible for this to proceed and
// capture the profile before the event is recorded. As long
// as this is blocked before the unlock happens, it's okay.
mu.Lock()
}
func blockCond(t *testing.T) {
var mu sync.Mutex
c := sync.NewCond(&mu)
mu.Lock()
go func() {
awaitBlockedGoroutine(t, "sync.Cond.Wait", "blockCond")
mu.Lock()
c.Signal()
mu.Unlock()
}()
c.Wait()
mu.Unlock()
}
// See http://golang.org/cl/299991.
func TestBlockProfileBias(t *testing.T) {
rate := int(1000) // arbitrary value
runtime.SetBlockProfileRate(rate)
defer runtime.SetBlockProfileRate(0)
// simulate blocking events
blockFrequentShort(rate)
blockInfrequentLong(rate)
var w bytes.Buffer
Lookup("block").WriteTo(&w, 0)
p, err := profile.Parse(&w)
if err != nil {
t.Fatalf("failed to parse profile: %v", err)
}
t.Logf("parsed proto: %s", p)
il := float64(-1) // blockInfrequentLong duration
fs := float64(-1) // blockFrequentShort duration
for _, s := range p.Sample {
for _, l := range s.Location {
for _, line := range l.Line {
if len(s.Value) < 2 {
t.Fatal("block profile has less than 2 sample types")
}
if line.Function.Name == "runtime/pprof.blockInfrequentLong" {
il = float64(s.Value[1])
} else if line.Function.Name == "runtime/pprof.blockFrequentShort" {
fs = float64(s.Value[1])
}
}
}
}
if il == -1 || fs == -1 {
t.Fatal("block profile is missing expected functions")
}
// stddev of bias from 100 runs on local machine multiplied by 10x
const threshold = 0.2
if bias := (il - fs) / il; math.Abs(bias) > threshold {
t.Fatalf("bias: abs(%f) > %f", bias, threshold)
} else {
t.Logf("bias: abs(%f) < %f", bias, threshold)
}
}
// blockFrequentShort produces 100000 block events with an average duration of
// rate / 10.
func blockFrequentShort(rate int) {
for i := 0; i < 100000; i++ {
blockevent(int64(rate/10), 1)
}
}
// blockFrequentShort produces 10000 block events with an average duration of
// rate.
func blockInfrequentLong(rate int) {
for i := 0; i < 10000; i++ {
blockevent(int64(rate), 1)
}
}
// Used by TestBlockProfileBias.
//go:linkname blockevent runtime.blockevent
func blockevent(cycles int64, skip int)
func TestMutexProfile(t *testing.T) {
// Generate mutex profile
old := runtime.SetMutexProfileFraction(1)
defer runtime.SetMutexProfileFraction(old)
if old != 0 {
t.Fatalf("need MutexProfileRate 0, got %d", old)
}
blockMutex(t)
t.Run("debug=1", func(t *testing.T) {
var w bytes.Buffer
Lookup("mutex").WriteTo(&w, 1)
prof := w.String()
t.Logf("received profile: %v", prof)
if !strings.HasPrefix(prof, "--- mutex:\ncycles/second=") {
t.Errorf("Bad profile header:\n%v", prof)
}
prof = strings.Trim(prof, "\n")
lines := strings.Split(prof, "\n")
if len(lines) != 6 {
t.Errorf("expected 6 lines, got %d %q\n%s", len(lines), prof, prof)
}
if len(lines) < 6 {
return
}
// checking that the line is like "35258904 1 @ 0x48288d 0x47cd28 0x458931"
r2 := `^\d+ \d+ @(?: 0x[[:xdigit:]]+)+`
//r2 := "^[0-9]+ 1 @ 0x[0-9a-f x]+$"
if ok, err := regexp.MatchString(r2, lines[3]); err != nil || !ok {
t.Errorf("%q didn't match %q", lines[3], r2)
}
r3 := "^#.*runtime/pprof.blockMutex.*$"
if ok, err := regexp.MatchString(r3, lines[5]); err != nil || !ok {
t.Errorf("%q didn't match %q", lines[5], r3)
}
t.Logf(prof)
})
t.Run("proto", func(t *testing.T) {
// proto format
var w bytes.Buffer
Lookup("mutex").WriteTo(&w, 0)
p, err := profile.Parse(&w)
if err != nil {
t.Fatalf("failed to parse profile: %v", err)
}
t.Logf("parsed proto: %s", p)
if err := p.CheckValid(); err != nil {
t.Fatalf("invalid profile: %v", err)
}
stks := stacks(p)
for _, want := range [][]string{
{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutex.func1"},
} {
if !containsStack(stks, want) {
t.Errorf("No matching stack entry for %+v", want)
}
}
})
}
func func1(c chan int) { <-c }
func func2(c chan int) { <-c }
func func3(c chan int) { <-c }
func func4(c chan int) { <-c }
func TestGoroutineCounts(t *testing.T) {
// Setting GOMAXPROCS to 1 ensures we can force all goroutines to the
// desired blocking point.
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
c := make(chan int)
for i := 0; i < 100; i++ {
switch {
case i%10 == 0:
go func1(c)
case i%2 == 0:
go func2(c)
default:
go func3(c)
}
// Let goroutines block on channel
for j := 0; j < 5; j++ {
runtime.Gosched()
}
}
ctx := context.Background()
// ... and again, with labels this time (just with fewer iterations to keep
// sorting deterministic).
Do(ctx, Labels("label", "value"), func(context.Context) {
for i := 0; i < 89; i++ {
switch {
case i%10 == 0:
go func1(c)
case i%2 == 0:
go func2(c)
default:
go func3(c)
}
// Let goroutines block on channel
for j := 0; j < 5; j++ {
runtime.Gosched()
}
}
})
var w bytes.Buffer
goroutineProf := Lookup("goroutine")
// Check debug profile
goroutineProf.WriteTo(&w, 1)
prof := w.String()
labels := labelMap{"label": "value"}
labelStr := "\n# labels: " + labels.String()
if !containsInOrder(prof, "\n50 @ ", "\n44 @", labelStr,
"\n40 @", "\n36 @", labelStr, "\n10 @", "\n9 @", labelStr, "\n1 @") {
t.Errorf("expected sorted goroutine counts with Labels:\n%s", prof)
}
// Check proto profile
w.Reset()
goroutineProf.WriteTo(&w, 0)
p, err := profile.Parse(&w)
if err != nil {
t.Errorf("error parsing protobuf profile: %v", err)
}
if err := p.CheckValid(); err != nil {
t.Errorf("protobuf profile is invalid: %v", err)
}
expectedLabels := map[int64]map[string]string{
50: map[string]string{},
44: map[string]string{"label": "value"},
40: map[string]string{},
36: map[string]string{"label": "value"},
10: map[string]string{},
9: map[string]string{"label": "value"},
1: map[string]string{},
}
if !containsCountsLabels(p, expectedLabels) {
t.Errorf("expected count profile to contain goroutines with counts and labels %v, got %v",
expectedLabels, p)
}
close(c)
time.Sleep(10 * time.Millisecond) // let goroutines exit
}
func containsInOrder(s string, all ...string) bool {
for _, t := range all {
var ok bool
if _, s, ok = strings.Cut(s, t); !ok {
return false
}
}
return true
}
func containsCountsLabels(prof *profile.Profile, countLabels map[int64]map[string]string) bool {
m := make(map[int64]int)
type nkey struct {
count int64
key, val string
}
n := make(map[nkey]int)
for c, kv := range countLabels {
m[c]++
for k, v := range kv {
n[nkey{
count: c,
key: k,
val: v,
}]++
}
}
for _, s := range prof.Sample {
// The count is the single value in the sample
if len(s.Value) != 1 {
return false
}
m[s.Value[0]]--
for k, vs := range s.Label {
for _, v := range vs {
n[nkey{
count: s.Value[0],
key: k,
val: v,
}]--
}
}
}
for _, n := range m {
if n > 0 {
return false
}
}
for _, ncnt := range n {
if ncnt != 0 {
return false
}
}
return true
}
var emptyCallStackTestRun int64
// Issue 18836.
func TestEmptyCallStack(t *testing.T) {
name := fmt.Sprintf("test18836_%d", emptyCallStackTestRun)
emptyCallStackTestRun++
t.Parallel()
var buf bytes.Buffer
p := NewProfile(name)
p.Add("foo", 47674)
p.WriteTo(&buf, 1)
p.Remove("foo")
got := buf.String()
prefix := name + " profile: total 1\n"
if !strings.HasPrefix(got, prefix) {
t.Fatalf("got:\n\t%q\nwant prefix:\n\t%q\n", got, prefix)
}
lostevent := "lostProfileEvent"
if !strings.Contains(got, lostevent) {
t.Fatalf("got:\n\t%q\ndoes not contain:\n\t%q\n", got, lostevent)
}
}
// stackContainsLabeled takes a spec like funcname;key=value and matches if the stack has that key
// and value and has funcname somewhere in the stack.
func stackContainsLabeled(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
base, kv, ok := strings.Cut(spec, ";")
if !ok {
panic("no semicolon in key/value spec")
}
k, v, ok := strings.Cut(kv, "=")
if !ok {
panic("missing = in key/value spec")
}
if !contains(labels[k], v) {
return false
}
return stackContains(base, count, stk, labels)
}
func TestCPUProfileLabel(t *testing.T) {
matches := matchAndAvoidStacks(stackContainsLabeled, []string{"runtime/pprof.cpuHogger;key=value"}, avoidFunctions())
testCPUProfile(t, matches, func(dur time.Duration) {
Do(context.Background(), Labels("key", "value"), func(context.Context) {
cpuHogger(cpuHog1, &salt1, dur)
})
})
}
func TestLabelRace(t *testing.T) {
// Test the race detector annotations for synchronization
// between settings labels and consuming them from the
// profile.
matches := matchAndAvoidStacks(stackContainsLabeled, []string{"runtime/pprof.cpuHogger;key=value"}, nil)
testCPUProfile(t, matches, func(dur time.Duration) {
start := time.Now()
var wg sync.WaitGroup
for time.Since(start) < dur {
var salts [10]int
for i := 0; i < 10; i++ {
wg.Add(1)
go func(j int) {
Do(context.Background(), Labels("key", "value"), func(context.Context) {
cpuHogger(cpuHog1, &salts[j], time.Millisecond)
})
wg.Done()
}(i)
}
wg.Wait()
}
})
}
// TestLabelSystemstack makes sure CPU profiler samples of goroutines running
// on systemstack include the correct pprof labels. See issue #48577
func TestLabelSystemstack(t *testing.T) {
// Grab and re-set the initial value before continuing to ensure
// GOGC doesn't actually change following the test.
gogc := debug.SetGCPercent(100)
debug.SetGCPercent(gogc)
matches := matchAndAvoidStacks(stackContainsLabeled, []string{"runtime.systemstack;key=value"}, avoidFunctions())
p := testCPUProfile(t, matches, func(dur time.Duration) {
Do(context.Background(), Labels("key", "value"), func(ctx context.Context) {
parallelLabelHog(ctx, dur, gogc)
})
})
// Two conditions to check:
// * labelHog should always be labeled.
// * The label should _only_ appear on labelHog and the Do call above.
for _, s := range p.Sample {
isLabeled := s.Label != nil && contains(s.Label["key"], "value")
var (
mayBeLabeled bool
mustBeLabeled bool
mustNotBeLabeled bool
)
for _, loc := range s.Location {
for _, l := range loc.Line {
switch l.Function.Name {
case "runtime/pprof.labelHog", "runtime/pprof.parallelLabelHog", "runtime/pprof.parallelLabelHog.func1":
mustBeLabeled = true
case "runtime/pprof.Do":
// Do sets the labels, so samples may
// or may not be labeled depending on
// which part of the function they are
// at.
mayBeLabeled = true
case "runtime.bgsweep", "runtime.bgscavenge", "runtime.forcegchelper", "runtime.gcBgMarkWorker", "runtime.runfinq", "runtime.sysmon":
// Runtime system goroutines or threads
// (such as those identified by
// runtime.isSystemGoroutine). These
// should never be labeled.
mustNotBeLabeled = true
case "gogo", "gosave_systemstack_switch", "racecall":
// These are context switch/race
// critical that we can't do a full
// traceback from. Typically this would
// be covered by the runtime check
// below, but these symbols don't have
// the package name.
mayBeLabeled = true
}
if strings.HasPrefix(l.Function.Name, "runtime.") {
// There are many places in the runtime
// where we can't do a full traceback.
// Ideally we'd list them all, but
// barring that allow anything in the
// runtime, unless explicitly excluded
// above.
mayBeLabeled = true
}
}
}
if mustNotBeLabeled {
// If this must not be labeled, then mayBeLabeled hints
// are not relevant.
mayBeLabeled = false
}
if mustBeLabeled && !isLabeled {
var buf bytes.Buffer
fprintStack(&buf, s.Location)
t.Errorf("Sample labeled got false want true: %s", buf.String())
}
if mustNotBeLabeled && isLabeled {
var buf bytes.Buffer
fprintStack(&buf, s.Location)
t.Errorf("Sample labeled got true want false: %s", buf.String())
}
if isLabeled && !(mayBeLabeled || mustBeLabeled) {
var buf bytes.Buffer
fprintStack(&buf, s.Location)
t.Errorf("Sample labeled got true want false: %s", buf.String())
}
}
}
// labelHog is designed to burn CPU time in a way that a high number of CPU
// samples end up running on systemstack.
func labelHog(stop chan struct{}, gogc int) {
// Regression test for issue 50032. We must give GC an opportunity to
// be initially triggered by a labelled goroutine.
runtime.GC()
for i := 0; ; i++ {
select {
case <-stop:
return
default:
debug.SetGCPercent(gogc)
}
}
}
// parallelLabelHog runs GOMAXPROCS goroutines running labelHog.
func parallelLabelHog(ctx context.Context, dur time.Duration, gogc int) {
var wg sync.WaitGroup
stop := make(chan struct{})
for i := 0; i < runtime.GOMAXPROCS(0); i++ {
wg.Add(1)
go func() {
defer wg.Done()
labelHog(stop, gogc)
}()
}
time.Sleep(dur)
close(stop)
wg.Wait()
}
// Check that there is no deadlock when the program receives SIGPROF while in
// 64bit atomics' critical section. Used to happen on mips{,le}. See #20146.
func TestAtomicLoadStore64(t *testing.T) {
f, err := os.CreateTemp("", "profatomic")
if err != nil {
t.Fatalf("TempFile: %v", err)
}
defer os.Remove(f.Name())
defer f.Close()
if err := StartCPUProfile(f); err != nil {
t.Fatal(err)
}
defer StopCPUProfile()
var flag uint64
done := make(chan bool, 1)
go func() {
for atomic.LoadUint64(&flag) == 0 {
runtime.Gosched()
}
done <- true
}()
time.Sleep(50 * time.Millisecond)
atomic.StoreUint64(&flag, 1)
<-done
}
func TestTracebackAll(t *testing.T) {
// With gccgo, if a profiling signal arrives at the wrong time
// during traceback, it may crash or hang. See issue #29448.
f, err := os.CreateTemp("", "proftraceback")
if err != nil {
t.Fatalf("TempFile: %v", err)
}
defer os.Remove(f.Name())
defer f.Close()
if err := StartCPUProfile(f); err != nil {
t.Fatal(err)
}
defer StopCPUProfile()
ch := make(chan int)
defer close(ch)
count := 10
for i := 0; i < count; i++ {
go func() {
<-ch // block
}()
}
N := 10000
if testing.Short() {
N = 500
}
buf := make([]byte, 10*1024)
for i := 0; i < N; i++ {
runtime.Stack(buf, true)
}
}
// TestTryAdd tests the cases that are hard to test with real program execution.
//
// For example, the current go compilers may not always inline functions
// involved in recursion but that may not be true in the future compilers. This
// tests such cases by using fake call sequences and forcing the profile build
// utilizing translateCPUProfile defined in proto_test.go
func TestTryAdd(t *testing.T) {
if _, found := findInlinedCall(inlinedCallerDump, 4<<10); !found {
t.Skip("Can't determine whether anything was inlined into inlinedCallerDump.")
}
// inlinedCallerDump
// inlinedCalleeDump
pcs := make([]uintptr, 2)
inlinedCallerDump(pcs)
inlinedCallerStack := make([]uint64, 2)
for i := range pcs {
inlinedCallerStack[i] = uint64(pcs[i])
}
if _, found := findInlinedCall(recursionChainBottom, 4<<10); !found {
t.Skip("Can't determine whether anything was inlined into recursionChainBottom.")
}
// recursionChainTop
// recursionChainMiddle
// recursionChainBottom
// recursionChainTop
// recursionChainMiddle
// recursionChainBottom
pcs = make([]uintptr, 6)
recursionChainTop(1, pcs)
recursionStack := make([]uint64, len(pcs))
for i := range pcs {
recursionStack[i] = uint64(pcs[i])
}
period := int64(2000 * 1000) // 1/500*1e9 nanosec.
testCases := []struct {
name string
input []uint64 // following the input format assumed by profileBuilder.addCPUData.
count int // number of records in input.
wantLocs [][]string // ordered location entries with function names.
wantSamples []*profile.Sample // ordered samples, we care only about Value and the profile location IDs.
}{{
// Sanity test for a normal, complete stack trace.
name: "full_stack_trace",
input: []uint64{
3, 0, 500, // hz = 500. Must match the period.
5, 0, 50, inlinedCallerStack[0], inlinedCallerStack[1],
},
count: 2,
wantLocs: [][]string{
{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"},
},
wantSamples: []*profile.Sample{
{Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
},
}, {
name: "bug35538",
input: []uint64{
3, 0, 500, // hz = 500. Must match the period.
// Fake frame: tryAdd will have inlinedCallerDump
// (stack[1]) on the deck when it encounters the next
// inline function. It should accept this.
7, 0, 10, inlinedCallerStack[0], inlinedCallerStack[1], inlinedCallerStack[0], inlinedCallerStack[1],
5, 0, 20, inlinedCallerStack[0], inlinedCallerStack[1],
},
count: 3,
wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
wantSamples: []*profile.Sample{
{Value: []int64{10, 10 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}}},
{Value: []int64{20, 20 * period}, Location: []*profile.Location{{ID: 1}}},
},
}, {
name: "bug38096",
input: []uint64{
3, 0, 500, // hz = 500. Must match the period.
// count (data[2]) == 0 && len(stk) == 1 is an overflow
// entry. The "stk" entry is actually the count.
4, 0, 0, 4242,
},
count: 2,
wantLocs: [][]string{{"runtime/pprof.lostProfileEvent"}},
wantSamples: []*profile.Sample{
{Value: []int64{4242, 4242 * period}, Location: []*profile.Location{{ID: 1}}},
},
}, {
// If a function is directly called recursively then it must
// not be inlined in the caller.
//
// N.B. We're generating an impossible profile here, with a
// recursive inlineCalleeDump call. This is simulating a non-Go
// function that looks like an inlined Go function other than
// its recursive property. See pcDeck.tryAdd.
name: "directly_recursive_func_is_not_inlined",
input: []uint64{
3, 0, 500, // hz = 500. Must match the period.
5, 0, 30, inlinedCallerStack[0], inlinedCallerStack[0],
4, 0, 40, inlinedCallerStack[0],
},
count: 3,
// inlinedCallerDump shows up here because
// runtime_expandFinalInlineFrame adds it to the stack frame.
wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump"}, {"runtime/pprof.inlinedCallerDump"}},
wantSamples: []*profile.Sample{
{Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}, {ID: 2}}},
{Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}},
},
}, {
name: "recursion_chain_inline",
input: []uint64{
3, 0, 500, // hz = 500. Must match the period.
9, 0, 10, recursionStack[0], recursionStack[1], recursionStack[2], recursionStack[3], recursionStack[4], recursionStack[5],
},
count: 2,
wantLocs: [][]string{
{"runtime/pprof.recursionChainBottom"},
{
"runtime/pprof.recursionChainMiddle",
"runtime/pprof.recursionChainTop",
"runtime/pprof.recursionChainBottom",
},
{
"runtime/pprof.recursionChainMiddle",
"runtime/pprof.recursionChainTop",
"runtime/pprof.TestTryAdd", // inlined into the test.
},
},
wantSamples: []*profile.Sample{
{Value: []int64{10, 10 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}, {ID: 3}}},
},
}, {
name: "truncated_stack_trace_later",
input: []uint64{
3, 0, 500, // hz = 500. Must match the period.
5, 0, 50, inlinedCallerStack[0], inlinedCallerStack[1],
4, 0, 60, inlinedCallerStack[0],
},
count: 3,
wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
wantSamples: []*profile.Sample{
{Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
{Value: []int64{60, 60 * period}, Location: []*profile.Location{{ID: 1}}},
},
}, {
name: "truncated_stack_trace_first",
input: []uint64{
3, 0, 500, // hz = 500. Must match the period.
4, 0, 70, inlinedCallerStack[0],
5, 0, 80, inlinedCallerStack[0], inlinedCallerStack[1],
},
count: 3,
wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
wantSamples: []*profile.Sample{
{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
{Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}}},
},
}, {
// We can recover the inlined caller from a truncated stack.
name: "truncated_stack_trace_only",
input: []uint64{
3, 0, 500, // hz = 500. Must match the period.
4, 0, 70, inlinedCallerStack[0],
},
count: 2,
wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
wantSamples: []*profile.Sample{
{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
},
}, {
// The same location is used for duplicated stacks.
name: "truncated_stack_trace_twice",
input: []uint64{
3, 0, 500, // hz = 500. Must match the period.
4, 0, 70, inlinedCallerStack[0],
// Fake frame: add a fake call to
// inlinedCallerDump to prevent this sample
// from getting merged into above.
5, 0, 80, inlinedCallerStack[1], inlinedCallerStack[0],
},
count: 3,
wantLocs: [][]string{
{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"},
{"runtime/pprof.inlinedCallerDump"},
},
wantSamples: []*profile.Sample{
{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
{Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 2}, {ID: 1}}},
},
}}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
p, err := translateCPUProfile(tc.input, tc.count)
if err != nil {
t.Fatalf("translating profile: %v", err)
}
t.Logf("Profile: %v\n", p)
// One location entry with all inlined functions.
var gotLoc [][]string
for _, loc := range p.Location {
var names []string
for _, line := range loc.Line {
names = append(names, line.Function.Name)
}
gotLoc = append(gotLoc, names)
}
if got, want := fmtJSON(gotLoc), fmtJSON(tc.wantLocs); got != want {
t.Errorf("Got Location = %+v\n\twant %+v", got, want)
}
// All samples should point to one location.
var gotSamples []*profile.Sample
for _, sample := range p.Sample {
var locs []*profile.Location
for _, loc := range sample.Location {
locs = append(locs, &profile.Location{ID: loc.ID})
}
gotSamples = append(gotSamples, &profile.Sample{Value: sample.Value, Location: locs})
}
if got, want := fmtJSON(gotSamples), fmtJSON(tc.wantSamples); got != want {
t.Errorf("Got Samples = %+v\n\twant %+v", got, want)
}
})
}
}
func TestTimeVDSO(t *testing.T) {
// Test that time functions have the right stack trace. In particular,
// it shouldn't be recursive.
if runtime.GOOS == "android" {
// Flaky on Android, issue 48655. VDSO may not be enabled.
testenv.SkipFlaky(t, 48655)
}
matches := matchAndAvoidStacks(stackContains, []string{"time.now"}, avoidFunctions())
p := testCPUProfile(t, matches, func(dur time.Duration) {
t0 := time.Now()
for {
t := time.Now()
if t.Sub(t0) >= dur {
return
}
}
})
// Check for recursive time.now sample.
for _, sample := range p.Sample {
var seenNow bool
for _, loc := range sample.Location {
for _, line := range loc.Line {
if line.Function.Name == "time.now" {
if seenNow {
t.Fatalf("unexpected recursive time.now")
}
seenNow = true
}
}
}
}
}
|
[
"\"IN_QEMU\""
] |
[] |
[
"IN_QEMU"
] |
[]
|
["IN_QEMU"]
|
go
| 1 | 0 | |
tools/power/pm-graph/sleepgraph.py
|
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-only
#
# Tool for analyzing suspend/resume timing
# Copyright (c) 2013, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# Authors:
# Todd Brandt <[email protected]>
#
# Links:
# Home Page
# https://01.org/pm-graph
# Source repo
# [email protected]:intel/pm-graph
#
# Description:
# This tool is designed to assist kernel and OS developers in optimizing
# their linux stack's suspend/resume time. Using a kernel image built
# with a few extra options enabled, the tool will execute a suspend and
# will capture dmesg and ftrace data until resume is complete. This data
# is transformed into a device timeline and a callgraph to give a quick
# and detailed view of which devices and callbacks are taking the most
# time in suspend/resume. The output is a single html file which can be
# viewed in firefox or chrome.
#
# The following kernel build options are required:
# CONFIG_DEVMEM=y
# CONFIG_PM_DEBUG=y
# CONFIG_PM_SLEEP_DEBUG=y
# CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER=y
# CONFIG_FUNCTION_GRAPH_TRACER=y
# CONFIG_KPROBES=y
# CONFIG_KPROBES_ON_FTRACE=y
#
# For kernel versions older than 3.15:
# The following additional kernel parameters are required:
# (e.g. in file /etc/default/grub)
# GRUB_CMDLINE_LINUX_DEFAULT="... initcall_debug log_buf_len=16M ..."
#
# ----------------- LIBRARIES --------------------
import sys
import time
import os
import string
import re
import platform
import signal
import codecs
from datetime import datetime, timedelta
import struct
import configparser
import gzip
from threading import Thread
from subprocess import call, Popen, PIPE
import base64
def pprint(msg):
print(msg)
sys.stdout.flush()
def ascii(text):
return text.decode('ascii', 'ignore')
# ----------------- CLASSES --------------------
# Class: SystemValues
# Description:
# A global, single-instance container used to
# store system values and test parameters
class SystemValues:
title = 'SleepGraph'
version = '5.8'
ansi = False
rs = 0
display = ''
gzip = False
sync = False
wifi = False
verbose = False
testlog = True
dmesglog = True
ftracelog = False
acpidebug = True
tstat = True
mindevlen = 0.0001
mincglen = 0.0
cgphase = ''
cgtest = -1
cgskip = ''
maxfail = 0
multitest = {'run': False, 'count': 1000000, 'delay': 0}
max_graph_depth = 0
callloopmaxgap = 0.0001
callloopmaxlen = 0.005
bufsize = 0
cpucount = 0
memtotal = 204800
memfree = 204800
srgap = 0
cgexp = False
testdir = ''
outdir = ''
tpath = '/sys/kernel/debug/tracing/'
fpdtpath = '/sys/firmware/acpi/tables/FPDT'
epath = '/sys/kernel/debug/tracing/events/power/'
pmdpath = '/sys/power/pm_debug_messages'
acpipath='/sys/module/acpi/parameters/debug_level'
traceevents = [
'suspend_resume',
'wakeup_source_activate',
'wakeup_source_deactivate',
'device_pm_callback_end',
'device_pm_callback_start'
]
logmsg = ''
testcommand = ''
mempath = '/dev/mem'
powerfile = '/sys/power/state'
mempowerfile = '/sys/power/mem_sleep'
diskpowerfile = '/sys/power/disk'
suspendmode = 'mem'
memmode = ''
diskmode = ''
hostname = 'localhost'
prefix = 'test'
teststamp = ''
sysstamp = ''
dmesgstart = 0.0
dmesgfile = ''
ftracefile = ''
htmlfile = 'output.html'
result = ''
rtcwake = True
rtcwaketime = 15
rtcpath = ''
devicefilter = []
cgfilter = []
stamp = 0
execcount = 1
x2delay = 0
skiphtml = False
usecallgraph = False
ftopfunc = 'pm_suspend'
ftop = False
usetraceevents = False
usetracemarkers = True
usekprobes = True
usedevsrc = False
useprocmon = False
notestrun = False
cgdump = False
devdump = False
mixedphaseheight = True
devprops = dict()
cfgdef = dict()
platinfo = []
predelay = 0
postdelay = 0
tmstart = 'SUSPEND START %Y%m%d-%H:%M:%S.%f'
tmend = 'RESUME COMPLETE %Y%m%d-%H:%M:%S.%f'
tracefuncs = {
'sys_sync': {},
'ksys_sync': {},
'__pm_notifier_call_chain': {},
'pm_prepare_console': {},
'pm_notifier_call_chain': {},
'freeze_processes': {},
'freeze_kernel_threads': {},
'pm_restrict_gfp_mask': {},
'acpi_suspend_begin': {},
'acpi_hibernation_begin': {},
'acpi_hibernation_enter': {},
'acpi_hibernation_leave': {},
'acpi_pm_freeze': {},
'acpi_pm_thaw': {},
'acpi_s2idle_end': {},
'acpi_s2idle_sync': {},
'acpi_s2idle_begin': {},
'acpi_s2idle_prepare': {},
'acpi_s2idle_prepare_late': {},
'acpi_s2idle_wake': {},
'acpi_s2idle_wakeup': {},
'acpi_s2idle_restore': {},
'acpi_s2idle_restore_early': {},
'hibernate_preallocate_memory': {},
'create_basic_memory_bitmaps': {},
'swsusp_write': {},
'suspend_console': {},
'acpi_pm_prepare': {},
'syscore_suspend': {},
'arch_enable_nonboot_cpus_end': {},
'syscore_resume': {},
'acpi_pm_finish': {},
'resume_console': {},
'acpi_pm_end': {},
'pm_restore_gfp_mask': {},
'thaw_processes': {},
'pm_restore_console': {},
'CPU_OFF': {
'func':'_cpu_down',
'args_x86_64': {'cpu':'%di:s32'},
'format': 'CPU_OFF[{cpu}]'
},
'CPU_ON': {
'func':'_cpu_up',
'args_x86_64': {'cpu':'%di:s32'},
'format': 'CPU_ON[{cpu}]'
},
}
dev_tracefuncs = {
# general wait/delay/sleep
'msleep': { 'args_x86_64': {'time':'%di:s32'}, 'ub': 1 },
'schedule_timeout': { 'args_x86_64': {'timeout':'%di:s32'}, 'ub': 1 },
'udelay': { 'func':'__const_udelay', 'args_x86_64': {'loops':'%di:s32'}, 'ub': 1 },
'usleep_range': { 'args_x86_64': {'min':'%di:s32', 'max':'%si:s32'}, 'ub': 1 },
'mutex_lock_slowpath': { 'func':'__mutex_lock_slowpath', 'ub': 1 },
'acpi_os_stall': {'ub': 1},
'rt_mutex_slowlock': {'ub': 1},
# ACPI
'acpi_resume_power_resources': {},
'acpi_ps_execute_method': { 'args_x86_64': {
'fullpath':'+0(+40(%di)):string',
}},
# mei_me
'mei_reset': {},
# filesystem
'ext4_sync_fs': {},
# 80211
'ath10k_bmi_read_memory': { 'args_x86_64': {'length':'%cx:s32'} },
'ath10k_bmi_write_memory': { 'args_x86_64': {'length':'%cx:s32'} },
'ath10k_bmi_fast_download': { 'args_x86_64': {'length':'%cx:s32'} },
'iwlagn_mac_start': {},
'iwlagn_alloc_bcast_station': {},
'iwl_trans_pcie_start_hw': {},
'iwl_trans_pcie_start_fw': {},
'iwl_run_init_ucode': {},
'iwl_load_ucode_wait_alive': {},
'iwl_alive_start': {},
'iwlagn_mac_stop': {},
'iwlagn_mac_suspend': {},
'iwlagn_mac_resume': {},
'iwlagn_mac_add_interface': {},
'iwlagn_mac_remove_interface': {},
'iwlagn_mac_change_interface': {},
'iwlagn_mac_config': {},
'iwlagn_configure_filter': {},
'iwlagn_mac_hw_scan': {},
'iwlagn_bss_info_changed': {},
'iwlagn_mac_channel_switch': {},
'iwlagn_mac_flush': {},
# ATA
'ata_eh_recover': { 'args_x86_64': {'port':'+36(%di):s32'} },
# i915
'i915_gem_resume': {},
'i915_restore_state': {},
'intel_opregion_setup': {},
'g4x_pre_enable_dp': {},
'vlv_pre_enable_dp': {},
'chv_pre_enable_dp': {},
'g4x_enable_dp': {},
'vlv_enable_dp': {},
'intel_hpd_init': {},
'intel_opregion_register': {},
'intel_dp_detect': {},
'intel_hdmi_detect': {},
'intel_opregion_init': {},
'intel_fbdev_set_suspend': {},
}
infocmds = [
[0, 'kparams', 'cat', '/proc/cmdline'],
[0, 'mcelog', 'mcelog'],
[0, 'pcidevices', 'lspci', '-tv'],
[0, 'usbdevices', 'lsusb', '-t'],
[1, 'interrupts', 'cat', '/proc/interrupts'],
[1, 'wakeups', 'cat', '/sys/kernel/debug/wakeup_sources'],
[2, 'gpecounts', 'sh', '-c', 'grep -v invalid /sys/firmware/acpi/interrupts/*'],
[2, 'suspendstats', 'sh', '-c', 'grep -v invalid /sys/power/suspend_stats/*'],
[2, 'cpuidle', 'sh', '-c', 'grep -v invalid /sys/devices/system/cpu/cpu*/cpuidle/state*/s2idle/*'],
[2, 'battery', 'sh', '-c', 'grep -v invalid /sys/class/power_supply/*/*'],
]
cgblacklist = []
kprobes = dict()
timeformat = '%.3f'
cmdline = '%s %s' % \
(os.path.basename(sys.argv[0]), ' '.join(sys.argv[1:]))
sudouser = ''
def __init__(self):
self.archargs = 'args_'+platform.machine()
self.hostname = platform.node()
if(self.hostname == ''):
self.hostname = 'localhost'
rtc = "rtc0"
if os.path.exists('/dev/rtc'):
rtc = os.readlink('/dev/rtc')
rtc = '/sys/class/rtc/'+rtc
if os.path.exists(rtc) and os.path.exists(rtc+'/date') and \
os.path.exists(rtc+'/time') and os.path.exists(rtc+'/wakealarm'):
self.rtcpath = rtc
if (hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()):
self.ansi = True
self.testdir = datetime.now().strftime('suspend-%y%m%d-%H%M%S')
if os.getuid() == 0 and 'SUDO_USER' in os.environ and \
os.environ['SUDO_USER']:
self.sudouser = os.environ['SUDO_USER']
def resetlog(self):
self.logmsg = ''
self.platinfo = []
def vprint(self, msg):
self.logmsg += msg+'\n'
if self.verbose or msg.startswith('WARNING:'):
pprint(msg)
def signalHandler(self, signum, frame):
if not self.result:
return
signame = self.signames[signum] if signum in self.signames else 'UNKNOWN'
msg = 'Signal %s caused a tool exit, line %d' % (signame, frame.f_lineno)
self.outputResult({'error':msg})
sys.exit(3)
def signalHandlerInit(self):
capture = ['BUS', 'SYS', 'XCPU', 'XFSZ', 'PWR', 'HUP', 'INT', 'QUIT',
'ILL', 'ABRT', 'FPE', 'SEGV', 'TERM']
self.signames = dict()
for i in capture:
s = 'SIG'+i
try:
signum = getattr(signal, s)
signal.signal(signum, self.signalHandler)
except:
continue
self.signames[signum] = s
def rootCheck(self, fatal=True):
if(os.access(self.powerfile, os.W_OK)):
return True
if fatal:
msg = 'This command requires sysfs mount and root access'
pprint('ERROR: %s\n' % msg)
self.outputResult({'error':msg})
sys.exit(1)
return False
def rootUser(self, fatal=False):
if 'USER' in os.environ and os.environ['USER'] == 'root':
return True
if fatal:
msg = 'This command must be run as root'
pprint('ERROR: %s\n' % msg)
self.outputResult({'error':msg})
sys.exit(1)
return False
def usable(self, file):
return (os.path.exists(file) and os.path.getsize(file) > 0)
def getExec(self, cmd):
try:
fp = Popen(['which', cmd], stdout=PIPE, stderr=PIPE).stdout
out = ascii(fp.read()).strip()
fp.close()
except:
out = ''
if out:
return out
for path in ['/sbin', '/bin', '/usr/sbin', '/usr/bin',
'/usr/local/sbin', '/usr/local/bin']:
cmdfull = os.path.join(path, cmd)
if os.path.exists(cmdfull):
return cmdfull
return out
def setPrecision(self, num):
if num < 0 or num > 6:
return
self.timeformat = '%.{0}f'.format(num)
def setOutputFolder(self, value):
args = dict()
n = datetime.now()
args['date'] = n.strftime('%y%m%d')
args['time'] = n.strftime('%H%M%S')
args['hostname'] = args['host'] = self.hostname
args['mode'] = self.suspendmode
return value.format(**args)
def setOutputFile(self):
if self.dmesgfile != '':
m = re.match('(?P<name>.*)_dmesg\.txt.*', self.dmesgfile)
if(m):
self.htmlfile = m.group('name')+'.html'
if self.ftracefile != '':
m = re.match('(?P<name>.*)_ftrace\.txt.*', self.ftracefile)
if(m):
self.htmlfile = m.group('name')+'.html'
def systemInfo(self, info):
p = m = ''
if 'baseboard-manufacturer' in info:
m = info['baseboard-manufacturer']
elif 'system-manufacturer' in info:
m = info['system-manufacturer']
if 'system-product-name' in info:
p = info['system-product-name']
elif 'baseboard-product-name' in info:
p = info['baseboard-product-name']
if m[:5].lower() == 'intel' and 'baseboard-product-name' in info:
p = info['baseboard-product-name']
c = info['processor-version'] if 'processor-version' in info else ''
b = info['bios-version'] if 'bios-version' in info else ''
r = info['bios-release-date'] if 'bios-release-date' in info else ''
self.sysstamp = '# sysinfo | man:%s | plat:%s | cpu:%s | bios:%s | biosdate:%s | numcpu:%d | memsz:%d | memfr:%d' % \
(m, p, c, b, r, self.cpucount, self.memtotal, self.memfree)
def printSystemInfo(self, fatal=False):
self.rootCheck(True)
out = dmidecode(self.mempath, fatal)
if len(out) < 1:
return
fmt = '%-24s: %s'
for name in sorted(out):
print(fmt % (name, out[name]))
print(fmt % ('cpucount', ('%d' % self.cpucount)))
print(fmt % ('memtotal', ('%d kB' % self.memtotal)))
print(fmt % ('memfree', ('%d kB' % self.memfree)))
def cpuInfo(self):
self.cpucount = 0
fp = open('/proc/cpuinfo', 'r')
for line in fp:
if re.match('^processor[ \t]*:[ \t]*[0-9]*', line):
self.cpucount += 1
fp.close()
fp = open('/proc/meminfo', 'r')
for line in fp:
m = re.match('^MemTotal:[ \t]*(?P<sz>[0-9]*) *kB', line)
if m:
self.memtotal = int(m.group('sz'))
m = re.match('^MemFree:[ \t]*(?P<sz>[0-9]*) *kB', line)
if m:
self.memfree = int(m.group('sz'))
fp.close()
def initTestOutput(self, name):
self.prefix = self.hostname
v = open('/proc/version', 'r').read().strip()
kver = v.split()[2]
fmt = name+'-%m%d%y-%H%M%S'
testtime = datetime.now().strftime(fmt)
self.teststamp = \
'# '+testtime+' '+self.prefix+' '+self.suspendmode+' '+kver
ext = ''
if self.gzip:
ext = '.gz'
self.dmesgfile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_dmesg.txt'+ext
self.ftracefile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_ftrace.txt'+ext
self.htmlfile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'.html'
if not os.path.isdir(self.testdir):
os.makedirs(self.testdir)
self.sudoUserchown(self.testdir)
def getValueList(self, value):
out = []
for i in value.split(','):
if i.strip():
out.append(i.strip())
return out
def setDeviceFilter(self, value):
self.devicefilter = self.getValueList(value)
def setCallgraphFilter(self, value):
self.cgfilter = self.getValueList(value)
def skipKprobes(self, value):
for k in self.getValueList(value):
if k in self.tracefuncs:
del self.tracefuncs[k]
if k in self.dev_tracefuncs:
del self.dev_tracefuncs[k]
def setCallgraphBlacklist(self, file):
self.cgblacklist = self.listFromFile(file)
def rtcWakeAlarmOn(self):
call('echo 0 > '+self.rtcpath+'/wakealarm', shell=True)
nowtime = open(self.rtcpath+'/since_epoch', 'r').read().strip()
if nowtime:
nowtime = int(nowtime)
else:
# if hardware time fails, use the software time
nowtime = int(datetime.now().strftime('%s'))
alarm = nowtime + self.rtcwaketime
call('echo %d > %s/wakealarm' % (alarm, self.rtcpath), shell=True)
def rtcWakeAlarmOff(self):
call('echo 0 > %s/wakealarm' % self.rtcpath, shell=True)
def initdmesg(self):
# get the latest time stamp from the dmesg log
lines = Popen('dmesg', stdout=PIPE).stdout.readlines()
ktime = '0'
for line in reversed(lines):
line = ascii(line).replace('\r\n', '')
idx = line.find('[')
if idx > 1:
line = line[idx:]
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
ktime = m.group('ktime')
break
self.dmesgstart = float(ktime)
def getdmesg(self, testdata):
op = self.writeDatafileHeader(self.dmesgfile, testdata)
# store all new dmesg lines since initdmesg was called
fp = Popen('dmesg', stdout=PIPE).stdout
for line in fp:
line = ascii(line).replace('\r\n', '')
idx = line.find('[')
if idx > 1:
line = line[idx:]
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
ktime = float(m.group('ktime'))
if ktime > self.dmesgstart:
op.write(line)
fp.close()
op.close()
def listFromFile(self, file):
list = []
fp = open(file)
for i in fp.read().split('\n'):
i = i.strip()
if i and i[0] != '#':
list.append(i)
fp.close()
return list
def addFtraceFilterFunctions(self, file):
for i in self.listFromFile(file):
if len(i) < 2:
continue
self.tracefuncs[i] = dict()
def getFtraceFilterFunctions(self, current):
self.rootCheck(True)
if not current:
call('cat '+self.tpath+'available_filter_functions', shell=True)
return
master = self.listFromFile(self.tpath+'available_filter_functions')
for i in sorted(self.tracefuncs):
if 'func' in self.tracefuncs[i]:
i = self.tracefuncs[i]['func']
if i in master:
print(i)
else:
print(self.colorText(i))
def setFtraceFilterFunctions(self, list):
master = self.listFromFile(self.tpath+'available_filter_functions')
flist = ''
for i in list:
if i not in master:
continue
if ' [' in i:
flist += i.split(' ')[0]+'\n'
else:
flist += i+'\n'
fp = open(self.tpath+'set_graph_function', 'w')
fp.write(flist)
fp.close()
def basicKprobe(self, name):
self.kprobes[name] = {'name': name,'func': name,'args': dict(),'format': name}
def defaultKprobe(self, name, kdata):
k = kdata
for field in ['name', 'format', 'func']:
if field not in k:
k[field] = name
if self.archargs in k:
k['args'] = k[self.archargs]
else:
k['args'] = dict()
k['format'] = name
self.kprobes[name] = k
def kprobeColor(self, name):
if name not in self.kprobes or 'color' not in self.kprobes[name]:
return ''
return self.kprobes[name]['color']
def kprobeDisplayName(self, name, dataraw):
if name not in self.kprobes:
self.basicKprobe(name)
data = ''
quote=0
# first remvoe any spaces inside quotes, and the quotes
for c in dataraw:
if c == '"':
quote = (quote + 1) % 2
if quote and c == ' ':
data += '_'
elif c != '"':
data += c
fmt, args = self.kprobes[name]['format'], self.kprobes[name]['args']
arglist = dict()
# now process the args
for arg in sorted(args):
arglist[arg] = ''
m = re.match('.* '+arg+'=(?P<arg>.*) ', data);
if m:
arglist[arg] = m.group('arg')
else:
m = re.match('.* '+arg+'=(?P<arg>.*)', data);
if m:
arglist[arg] = m.group('arg')
out = fmt.format(**arglist)
out = out.replace(' ', '_').replace('"', '')
return out
def kprobeText(self, kname, kprobe):
name = fmt = func = kname
args = dict()
if 'name' in kprobe:
name = kprobe['name']
if 'format' in kprobe:
fmt = kprobe['format']
if 'func' in kprobe:
func = kprobe['func']
if self.archargs in kprobe:
args = kprobe[self.archargs]
if 'args' in kprobe:
args = kprobe['args']
if re.findall('{(?P<n>[a-z,A-Z,0-9]*)}', func):
doError('Kprobe "%s" has format info in the function name "%s"' % (name, func))
for arg in re.findall('{(?P<n>[a-z,A-Z,0-9]*)}', fmt):
if arg not in args:
doError('Kprobe "%s" is missing argument "%s"' % (name, arg))
val = 'p:%s_cal %s' % (name, func)
for i in sorted(args):
val += ' %s=%s' % (i, args[i])
val += '\nr:%s_ret %s $retval\n' % (name, func)
return val
def addKprobes(self, output=False):
if len(self.kprobes) < 1:
return
if output:
pprint(' kprobe functions in this kernel:')
# first test each kprobe
rejects = []
# sort kprobes: trace, ub-dev, custom, dev
kpl = [[], [], [], []]
linesout = len(self.kprobes)
for name in sorted(self.kprobes):
res = self.colorText('YES', 32)
if not self.testKprobe(name, self.kprobes[name]):
res = self.colorText('NO')
rejects.append(name)
else:
if name in self.tracefuncs:
kpl[0].append(name)
elif name in self.dev_tracefuncs:
if 'ub' in self.dev_tracefuncs[name]:
kpl[1].append(name)
else:
kpl[3].append(name)
else:
kpl[2].append(name)
if output:
pprint(' %s: %s' % (name, res))
kplist = kpl[0] + kpl[1] + kpl[2] + kpl[3]
# remove all failed ones from the list
for name in rejects:
self.kprobes.pop(name)
# set the kprobes all at once
self.fsetVal('', 'kprobe_events')
kprobeevents = ''
for kp in kplist:
kprobeevents += self.kprobeText(kp, self.kprobes[kp])
self.fsetVal(kprobeevents, 'kprobe_events')
if output:
check = self.fgetVal('kprobe_events')
linesack = (len(check.split('\n')) - 1) // 2
pprint(' kprobe functions enabled: %d/%d' % (linesack, linesout))
self.fsetVal('1', 'events/kprobes/enable')
def testKprobe(self, kname, kprobe):
self.fsetVal('0', 'events/kprobes/enable')
kprobeevents = self.kprobeText(kname, kprobe)
if not kprobeevents:
return False
try:
self.fsetVal(kprobeevents, 'kprobe_events')
check = self.fgetVal('kprobe_events')
except:
return False
linesout = len(kprobeevents.split('\n'))
linesack = len(check.split('\n'))
if linesack < linesout:
return False
return True
def setVal(self, val, file):
if not os.path.exists(file):
return False
try:
fp = open(file, 'wb', 0)
fp.write(val.encode())
fp.flush()
fp.close()
except:
return False
return True
def fsetVal(self, val, path):
return self.setVal(val, self.tpath+path)
def getVal(self, file):
res = ''
if not os.path.exists(file):
return res
try:
fp = open(file, 'r')
res = fp.read()
fp.close()
except:
pass
return res
def fgetVal(self, path):
return self.getVal(self.tpath+path)
def cleanupFtrace(self):
if(self.usecallgraph or self.usetraceevents or self.usedevsrc):
self.fsetVal('0', 'events/kprobes/enable')
self.fsetVal('', 'kprobe_events')
self.fsetVal('1024', 'buffer_size_kb')
def setupAllKprobes(self):
for name in self.tracefuncs:
self.defaultKprobe(name, self.tracefuncs[name])
for name in self.dev_tracefuncs:
self.defaultKprobe(name, self.dev_tracefuncs[name])
def isCallgraphFunc(self, name):
if len(self.tracefuncs) < 1 and self.suspendmode == 'command':
return True
for i in self.tracefuncs:
if 'func' in self.tracefuncs[i]:
f = self.tracefuncs[i]['func']
else:
f = i
if name == f:
return True
return False
def initFtrace(self, quiet=False):
if not quiet:
sysvals.printSystemInfo(False)
pprint('INITIALIZING FTRACE...')
# turn trace off
self.fsetVal('0', 'tracing_on')
self.cleanupFtrace()
self.testVal(self.pmdpath, 'basic', '1')
# set the trace clock to global
self.fsetVal('global', 'trace_clock')
self.fsetVal('nop', 'current_tracer')
# set trace buffer to an appropriate value
cpus = max(1, self.cpucount)
if self.bufsize > 0:
tgtsize = self.bufsize
elif self.usecallgraph or self.usedevsrc:
bmax = (1*1024*1024) if self.suspendmode in ['disk', 'command'] \
else (3*1024*1024)
tgtsize = min(self.memfree, bmax)
else:
tgtsize = 65536
while not self.fsetVal('%d' % (tgtsize // cpus), 'buffer_size_kb'):
# if the size failed to set, lower it and keep trying
tgtsize -= 65536
if tgtsize < 65536:
tgtsize = int(self.fgetVal('buffer_size_kb')) * cpus
break
self.vprint('Setting trace buffers to %d kB (%d kB per cpu)' % (tgtsize, tgtsize/cpus))
# initialize the callgraph trace
if(self.usecallgraph):
# set trace type
self.fsetVal('function_graph', 'current_tracer')
self.fsetVal('', 'set_ftrace_filter')
# set trace format options
self.fsetVal('print-parent', 'trace_options')
self.fsetVal('funcgraph-abstime', 'trace_options')
self.fsetVal('funcgraph-cpu', 'trace_options')
self.fsetVal('funcgraph-duration', 'trace_options')
self.fsetVal('funcgraph-proc', 'trace_options')
self.fsetVal('funcgraph-tail', 'trace_options')
self.fsetVal('nofuncgraph-overhead', 'trace_options')
self.fsetVal('context-info', 'trace_options')
self.fsetVal('graph-time', 'trace_options')
self.fsetVal('%d' % self.max_graph_depth, 'max_graph_depth')
cf = ['dpm_run_callback']
if(self.usetraceevents):
cf += ['dpm_prepare', 'dpm_complete']
for fn in self.tracefuncs:
if 'func' in self.tracefuncs[fn]:
cf.append(self.tracefuncs[fn]['func'])
else:
cf.append(fn)
if self.ftop:
self.setFtraceFilterFunctions([self.ftopfunc])
else:
self.setFtraceFilterFunctions(cf)
# initialize the kprobe trace
elif self.usekprobes:
for name in self.tracefuncs:
self.defaultKprobe(name, self.tracefuncs[name])
if self.usedevsrc:
for name in self.dev_tracefuncs:
self.defaultKprobe(name, self.dev_tracefuncs[name])
if not quiet:
pprint('INITIALIZING KPROBES...')
self.addKprobes(self.verbose)
if(self.usetraceevents):
# turn trace events on
events = iter(self.traceevents)
for e in events:
self.fsetVal('1', 'events/power/'+e+'/enable')
# clear the trace buffer
self.fsetVal('', 'trace')
def verifyFtrace(self):
# files needed for any trace data
files = ['buffer_size_kb', 'current_tracer', 'trace', 'trace_clock',
'trace_marker', 'trace_options', 'tracing_on']
# files needed for callgraph trace data
tp = self.tpath
if(self.usecallgraph):
files += [
'available_filter_functions',
'set_ftrace_filter',
'set_graph_function'
]
for f in files:
if(os.path.exists(tp+f) == False):
return False
return True
def verifyKprobes(self):
# files needed for kprobes to work
files = ['kprobe_events', 'events']
tp = self.tpath
for f in files:
if(os.path.exists(tp+f) == False):
return False
return True
def colorText(self, str, color=31):
if not self.ansi:
return str
return '\x1B[%d;40m%s\x1B[m' % (color, str)
def writeDatafileHeader(self, filename, testdata):
fp = self.openlog(filename, 'w')
fp.write('%s\n%s\n# command | %s\n' % (self.teststamp, self.sysstamp, self.cmdline))
for test in testdata:
if 'fw' in test:
fw = test['fw']
if(fw):
fp.write('# fwsuspend %u fwresume %u\n' % (fw[0], fw[1]))
if 'turbo' in test:
fp.write('# turbostat %s\n' % test['turbo'])
if 'wifi' in test:
fp.write('# wifi %s\n' % test['wifi'])
if test['error'] or len(testdata) > 1:
fp.write('# enter_sleep_error %s\n' % test['error'])
return fp
def sudoUserchown(self, dir):
if os.path.exists(dir) and self.sudouser:
cmd = 'chown -R {0}:{0} {1} > /dev/null 2>&1'
call(cmd.format(self.sudouser, dir), shell=True)
def outputResult(self, testdata, num=0):
if not self.result:
return
n = ''
if num > 0:
n = '%d' % num
fp = open(self.result, 'a')
if 'error' in testdata:
fp.write('result%s: fail\n' % n)
fp.write('error%s: %s\n' % (n, testdata['error']))
else:
fp.write('result%s: pass\n' % n)
for v in ['suspend', 'resume', 'boot', 'lastinit']:
if v in testdata:
fp.write('%s%s: %.3f\n' % (v, n, testdata[v]))
for v in ['fwsuspend', 'fwresume']:
if v in testdata:
fp.write('%s%s: %.3f\n' % (v, n, testdata[v] / 1000000.0))
if 'bugurl' in testdata:
fp.write('url%s: %s\n' % (n, testdata['bugurl']))
fp.close()
self.sudoUserchown(self.result)
def configFile(self, file):
dir = os.path.dirname(os.path.realpath(__file__))
if os.path.exists(file):
return file
elif os.path.exists(dir+'/'+file):
return dir+'/'+file
elif os.path.exists(dir+'/config/'+file):
return dir+'/config/'+file
return ''
def openlog(self, filename, mode):
isgz = self.gzip
if mode == 'r':
try:
with gzip.open(filename, mode+'t') as fp:
test = fp.read(64)
isgz = True
except:
isgz = False
if isgz:
return gzip.open(filename, mode+'t')
return open(filename, mode)
def putlog(self, filename, text):
with self.openlog(filename, 'a') as fp:
fp.write(text)
fp.close()
def dlog(self, text):
self.putlog(self.dmesgfile, '# %s\n' % text)
def flog(self, text):
self.putlog(self.ftracefile, text)
def b64unzip(self, data):
try:
out = codecs.decode(base64.b64decode(data), 'zlib').decode()
except:
out = data
return out
def b64zip(self, data):
out = base64.b64encode(codecs.encode(data.encode(), 'zlib')).decode()
return out
def platforminfo(self, cmdafter):
# add platform info on to a completed ftrace file
if not os.path.exists(self.ftracefile):
return False
footer = '#\n'
# add test command string line if need be
if self.suspendmode == 'command' and self.testcommand:
footer += '# platform-testcmd: %s\n' % (self.testcommand)
# get a list of target devices from the ftrace file
props = dict()
tp = TestProps()
tf = self.openlog(self.ftracefile, 'r')
for line in tf:
if tp.stampInfo(line, self):
continue
# parse only valid lines, if this is not one move on
m = re.match(tp.ftrace_line_fmt, line)
if(not m or 'device_pm_callback_start' not in line):
continue
m = re.match('.*: (?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*', m.group('msg'));
if(not m):
continue
dev = m.group('d')
if dev not in props:
props[dev] = DevProps()
tf.close()
# now get the syspath for each target device
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(re.match('.*/power', dirname) and 'async' in filenames):
dev = dirname.split('/')[-2]
if dev in props and (not props[dev].syspath or len(dirname) < len(props[dev].syspath)):
props[dev].syspath = dirname[:-6]
# now fill in the properties for our target devices
for dev in sorted(props):
dirname = props[dev].syspath
if not dirname or not os.path.exists(dirname):
continue
with open(dirname+'/power/async') as fp:
text = fp.read()
props[dev].isasync = False
if 'enabled' in text:
props[dev].isasync = True
fields = os.listdir(dirname)
if 'product' in fields:
with open(dirname+'/product', 'rb') as fp:
props[dev].altname = ascii(fp.read())
elif 'name' in fields:
with open(dirname+'/name', 'rb') as fp:
props[dev].altname = ascii(fp.read())
elif 'model' in fields:
with open(dirname+'/model', 'rb') as fp:
props[dev].altname = ascii(fp.read())
elif 'description' in fields:
with open(dirname+'/description', 'rb') as fp:
props[dev].altname = ascii(fp.read())
elif 'id' in fields:
with open(dirname+'/id', 'rb') as fp:
props[dev].altname = ascii(fp.read())
elif 'idVendor' in fields and 'idProduct' in fields:
idv, idp = '', ''
with open(dirname+'/idVendor', 'rb') as fp:
idv = ascii(fp.read()).strip()
with open(dirname+'/idProduct', 'rb') as fp:
idp = ascii(fp.read()).strip()
props[dev].altname = '%s:%s' % (idv, idp)
if props[dev].altname:
out = props[dev].altname.strip().replace('\n', ' ')\
.replace(',', ' ').replace(';', ' ')
props[dev].altname = out
# add a devinfo line to the bottom of ftrace
out = ''
for dev in sorted(props):
out += props[dev].out(dev)
footer += '# platform-devinfo: %s\n' % self.b64zip(out)
# add a line for each of these commands with their outputs
for name, cmdline, info in cmdafter:
footer += '# platform-%s: %s | %s\n' % (name, cmdline, self.b64zip(info))
self.flog(footer)
return True
def commonPrefix(self, list):
if len(list) < 2:
return ''
prefix = list[0]
for s in list[1:]:
while s[:len(prefix)] != prefix and prefix:
prefix = prefix[:len(prefix)-1]
if not prefix:
break
if '/' in prefix and prefix[-1] != '/':
prefix = prefix[0:prefix.rfind('/')+1]
return prefix
def dictify(self, text, format):
out = dict()
header = True if format == 1 else False
delim = ' ' if format == 1 else ':'
for line in text.split('\n'):
if header:
header, out['@'] = False, line
continue
line = line.strip()
if delim in line:
data = line.split(delim, 1)
num = re.search(r'[\d]+', data[1])
if format == 2 and num:
out[data[0].strip()] = num.group()
else:
out[data[0].strip()] = data[1]
return out
def cmdinfo(self, begin, debug=False):
out = []
if begin:
self.cmd1 = dict()
for cargs in self.infocmds:
delta, name = cargs[0], cargs[1]
cmdline, cmdpath = ' '.join(cargs[2:]), self.getExec(cargs[2])
if not cmdpath or (begin and not delta):
continue
self.dlog('[%s]' % cmdline)
try:
fp = Popen([cmdpath]+cargs[3:], stdout=PIPE, stderr=PIPE).stdout
info = ascii(fp.read()).strip()
fp.close()
except:
continue
if not debug and begin:
self.cmd1[name] = self.dictify(info, delta)
elif not debug and delta and name in self.cmd1:
before, after = self.cmd1[name], self.dictify(info, delta)
dinfo = ('\t%s\n' % before['@']) if '@' in before else ''
prefix = self.commonPrefix(list(before.keys()))
for key in sorted(before):
if key in after and before[key] != after[key]:
title = key.replace(prefix, '')
if delta == 2:
dinfo += '\t%s : %s -> %s\n' % \
(title, before[key].strip(), after[key].strip())
else:
dinfo += '%10s (start) : %s\n%10s (after) : %s\n' % \
(title, before[key], title, after[key])
dinfo = '\tnothing changed' if not dinfo else dinfo.rstrip()
out.append((name, cmdline, dinfo))
else:
out.append((name, cmdline, '\tnothing' if not info else info))
return out
def testVal(self, file, fmt='basic', value=''):
if file == 'restoreall':
for f in self.cfgdef:
if os.path.exists(f):
fp = open(f, 'w')
fp.write(self.cfgdef[f])
fp.close()
self.cfgdef = dict()
elif value and os.path.exists(file):
fp = open(file, 'r+')
if fmt == 'radio':
m = re.match('.*\[(?P<v>.*)\].*', fp.read())
if m:
self.cfgdef[file] = m.group('v')
elif fmt == 'acpi':
line = fp.read().strip().split('\n')[-1]
m = re.match('.* (?P<v>[0-9A-Fx]*) .*', line)
if m:
self.cfgdef[file] = m.group('v')
else:
self.cfgdef[file] = fp.read().strip()
fp.write(value)
fp.close()
def haveTurbostat(self):
if not self.tstat:
return False
cmd = self.getExec('turbostat')
if not cmd:
return False
fp = Popen([cmd, '-v'], stdout=PIPE, stderr=PIPE).stderr
out = ascii(fp.read()).strip()
fp.close()
if re.match('turbostat version .*', out):
self.vprint(out)
return True
return False
def turbostat(self):
cmd = self.getExec('turbostat')
rawout = keyline = valline = ''
fullcmd = '%s -q -S echo freeze > %s' % (cmd, self.powerfile)
fp = Popen(['sh', '-c', fullcmd], stdout=PIPE, stderr=PIPE).stderr
for line in fp:
line = ascii(line)
rawout += line
if keyline and valline:
continue
if re.match('(?i)Avg_MHz.*', line):
keyline = line.strip().split()
elif keyline:
valline = line.strip().split()
fp.close()
if not keyline or not valline or len(keyline) != len(valline):
errmsg = 'unrecognized turbostat output:\n'+rawout.strip()
self.vprint(errmsg)
if not self.verbose:
pprint(errmsg)
return ''
if self.verbose:
pprint(rawout.strip())
out = []
for key in keyline:
idx = keyline.index(key)
val = valline[idx]
out.append('%s=%s' % (key, val))
return '|'.join(out)
def wifiDetails(self, dev):
try:
info = open('/sys/class/net/%s/device/uevent' % dev, 'r').read().strip()
except:
return dev
vals = [dev]
for prop in info.split('\n'):
if prop.startswith('DRIVER=') or prop.startswith('PCI_ID='):
vals.append(prop.split('=')[-1])
return ':'.join(vals)
def checkWifi(self, dev=''):
try:
w = open('/proc/net/wireless', 'r').read().strip()
except:
return ''
for line in reversed(w.split('\n')):
m = re.match(' *(?P<dev>.*): (?P<stat>[0-9a-f]*) .*', w.split('\n')[-1])
if not m or (dev and dev != m.group('dev')):
continue
return m.group('dev')
return ''
def pollWifi(self, dev, timeout=60):
start = time.time()
while (time.time() - start) < timeout:
w = self.checkWifi(dev)
if w:
return '%s reconnected %.2f' % \
(self.wifiDetails(dev), max(0, time.time() - start))
time.sleep(0.01)
return '%s timeout %d' % (self.wifiDetails(dev), timeout)
def errorSummary(self, errinfo, msg):
found = False
for entry in errinfo:
if re.match(entry['match'], msg):
entry['count'] += 1
if self.hostname not in entry['urls']:
entry['urls'][self.hostname] = [self.htmlfile]
elif self.htmlfile not in entry['urls'][self.hostname]:
entry['urls'][self.hostname].append(self.htmlfile)
found = True
break
if found:
return
arr = msg.split()
for j in range(len(arr)):
if re.match('^[0-9,\-\.]*$', arr[j]):
arr[j] = '[0-9,\-\.]*'
else:
arr[j] = arr[j]\
.replace('\\', '\\\\').replace(']', '\]').replace('[', '\[')\
.replace('.', '\.').replace('+', '\+').replace('*', '\*')\
.replace('(', '\(').replace(')', '\)').replace('}', '\}')\
.replace('{', '\{')
mstr = ' *'.join(arr)
entry = {
'line': msg,
'match': mstr,
'count': 1,
'urls': {self.hostname: [self.htmlfile]}
}
errinfo.append(entry)
def multistat(self, start, idx, finish):
if 'time' in self.multitest:
id = '%d Duration=%dmin' % (idx+1, self.multitest['time'])
else:
id = '%d/%d' % (idx+1, self.multitest['count'])
t = time.time()
if 'start' not in self.multitest:
self.multitest['start'] = self.multitest['last'] = t
self.multitest['total'] = 0.0
pprint('TEST (%s) START' % id)
return
dt = t - self.multitest['last']
if not start:
if idx == 0 and self.multitest['delay'] > 0:
self.multitest['total'] += self.multitest['delay']
pprint('TEST (%s) COMPLETE -- Duration %.1fs' % (id, dt))
return
self.multitest['total'] += dt
self.multitest['last'] = t
avg = self.multitest['total'] / idx
if 'time' in self.multitest:
left = finish - datetime.now()
left -= timedelta(microseconds=left.microseconds)
else:
left = timedelta(seconds=((self.multitest['count'] - idx) * int(avg)))
pprint('TEST (%s) START - Avg Duration %.1fs, Time left %s' % \
(id, avg, str(left)))
def multiinit(self, c, d):
sz, unit = 'count', 'm'
if c.endswith('d') or c.endswith('h') or c.endswith('m'):
sz, unit, c = 'time', c[-1], c[:-1]
self.multitest['run'] = True
self.multitest[sz] = getArgInt('multi: n d (exec count)', c, 1, 1000000, False)
self.multitest['delay'] = getArgInt('multi: n d (delay between tests)', d, 0, 3600, False)
if unit == 'd':
self.multitest[sz] *= 1440
elif unit == 'h':
self.multitest[sz] *= 60
def displayControl(self, cmd):
xset, ret = 'timeout 10 xset -d :0.0 {0}', 0
if self.sudouser:
xset = 'sudo -u %s %s' % (self.sudouser, xset)
if cmd == 'init':
ret = call(xset.format('dpms 0 0 0'), shell=True)
if not ret:
ret = call(xset.format('s off'), shell=True)
elif cmd == 'reset':
ret = call(xset.format('s reset'), shell=True)
elif cmd in ['on', 'off', 'standby', 'suspend']:
b4 = self.displayControl('stat')
ret = call(xset.format('dpms force %s' % cmd), shell=True)
if not ret:
curr = self.displayControl('stat')
self.vprint('Display Switched: %s -> %s' % (b4, curr))
if curr != cmd:
self.vprint('WARNING: Display failed to change to %s' % cmd)
if ret:
self.vprint('WARNING: Display failed to change to %s with xset' % cmd)
return ret
elif cmd == 'stat':
fp = Popen(xset.format('q').split(' '), stdout=PIPE).stdout
ret = 'unknown'
for line in fp:
m = re.match('[\s]*Monitor is (?P<m>.*)', ascii(line))
if(m and len(m.group('m')) >= 2):
out = m.group('m').lower()
ret = out[3:] if out[0:2] == 'in' else out
break
fp.close()
return ret
def setRuntimeSuspend(self, before=True):
if before:
# runtime suspend disable or enable
if self.rs > 0:
self.rstgt, self.rsval, self.rsdir = 'on', 'auto', 'enabled'
else:
self.rstgt, self.rsval, self.rsdir = 'auto', 'on', 'disabled'
pprint('CONFIGURING RUNTIME SUSPEND...')
self.rslist = deviceInfo(self.rstgt)
for i in self.rslist:
self.setVal(self.rsval, i)
pprint('runtime suspend %s on all devices (%d changed)' % (self.rsdir, len(self.rslist)))
pprint('waiting 5 seconds...')
time.sleep(5)
else:
# runtime suspend re-enable or re-disable
for i in self.rslist:
self.setVal(self.rstgt, i)
pprint('runtime suspend settings restored on %d devices' % len(self.rslist))
sysvals = SystemValues()
switchvalues = ['enable', 'disable', 'on', 'off', 'true', 'false', '1', '0']
switchoff = ['disable', 'off', 'false', '0']
suspendmodename = {
'freeze': 'Freeze (S0)',
'standby': 'Standby (S1)',
'mem': 'Suspend (S3)',
'disk': 'Hibernate (S4)'
}
# Class: DevProps
# Description:
# Simple class which holds property values collected
# for all the devices used in the timeline.
class DevProps:
def __init__(self):
self.syspath = ''
self.altname = ''
self.isasync = True
self.xtraclass = ''
self.xtrainfo = ''
def out(self, dev):
return '%s,%s,%d;' % (dev, self.altname, self.isasync)
def debug(self, dev):
pprint('%s:\n\taltname = %s\n\t async = %s' % (dev, self.altname, self.isasync))
def altName(self, dev):
if not self.altname or self.altname == dev:
return dev
return '%s [%s]' % (self.altname, dev)
def xtraClass(self):
if self.xtraclass:
return ' '+self.xtraclass
if not self.isasync:
return ' sync'
return ''
def xtraInfo(self):
if self.xtraclass:
return ' '+self.xtraclass
if self.isasync:
return ' (async)'
return ' (sync)'
# Class: DeviceNode
# Description:
# A container used to create a device hierachy, with a single root node
# and a tree of child nodes. Used by Data.deviceTopology()
class DeviceNode:
def __init__(self, nodename, nodedepth):
self.name = nodename
self.children = []
self.depth = nodedepth
# Class: Data
# Description:
# The primary container for suspend/resume test data. There is one for
# each test run. The data is organized into a cronological hierarchy:
# Data.dmesg {
# phases {
# 10 sequential, non-overlapping phases of S/R
# contents: times for phase start/end, order/color data for html
# devlist {
# device callback or action list for this phase
# device {
# a single device callback or generic action
# contents: start/stop times, pid/cpu/driver info
# parents/children, html id for timeline/callgraph
# optionally includes an ftrace callgraph
# optionally includes dev/ps data
# }
# }
# }
# }
#
class Data:
phasedef = {
'suspend_prepare': {'order': 0, 'color': '#CCFFCC'},
'suspend': {'order': 1, 'color': '#88FF88'},
'suspend_late': {'order': 2, 'color': '#00AA00'},
'suspend_noirq': {'order': 3, 'color': '#008888'},
'suspend_machine': {'order': 4, 'color': '#0000FF'},
'resume_machine': {'order': 5, 'color': '#FF0000'},
'resume_noirq': {'order': 6, 'color': '#FF9900'},
'resume_early': {'order': 7, 'color': '#FFCC00'},
'resume': {'order': 8, 'color': '#FFFF88'},
'resume_complete': {'order': 9, 'color': '#FFFFCC'},
}
errlist = {
'HWERROR' : r'.*\[ *Hardware Error *\].*',
'FWBUG' : r'.*\[ *Firmware Bug *\].*',
'BUG' : r'(?i).*\bBUG\b.*',
'ERROR' : r'(?i).*\bERROR\b.*',
'WARNING' : r'(?i).*\bWARNING\b.*',
'FAULT' : r'(?i).*\bFAULT\b.*',
'FAIL' : r'(?i).*\bFAILED\b.*',
'INVALID' : r'(?i).*\bINVALID\b.*',
'CRASH' : r'(?i).*\bCRASHED\b.*',
'TIMEOUT' : r'(?i).*\bTIMEOUT\b.*',
'IRQ' : r'.*\bgenirq: .*',
'TASKFAIL': r'.*Freezing of tasks *.*',
'ACPI' : r'.*\bACPI *(?P<b>[A-Za-z]*) *Error[: ].*',
'DISKFULL': r'.*\bNo space left on device.*',
'USBERR' : r'.*usb .*device .*, error [0-9-]*',
'ATAERR' : r' *ata[0-9\.]*: .*failed.*',
'MEIERR' : r' *mei.*: .*failed.*',
'TPMERR' : r'(?i) *tpm *tpm[0-9]*: .*error.*',
}
def __init__(self, num):
idchar = 'abcdefghij'
self.start = 0.0 # test start
self.end = 0.0 # test end
self.hwstart = 0 # rtc test start
self.hwend = 0 # rtc test end
self.tSuspended = 0.0 # low-level suspend start
self.tResumed = 0.0 # low-level resume start
self.tKernSus = 0.0 # kernel level suspend start
self.tKernRes = 0.0 # kernel level resume end
self.fwValid = False # is firmware data available
self.fwSuspend = 0 # time spent in firmware suspend
self.fwResume = 0 # time spent in firmware resume
self.html_device_id = 0
self.stamp = 0
self.outfile = ''
self.kerror = False
self.wifi = dict()
self.turbostat = 0
self.enterfail = ''
self.currphase = ''
self.pstl = dict() # process timeline
self.testnumber = num
self.idstr = idchar[num]
self.dmesgtext = [] # dmesg text file in memory
self.dmesg = dict() # root data structure
self.errorinfo = {'suspend':[],'resume':[]}
self.tLow = [] # time spent in low-level suspends (standby/freeze)
self.devpids = []
self.devicegroups = 0
def sortedPhases(self):
return sorted(self.dmesg, key=lambda k:self.dmesg[k]['order'])
def initDevicegroups(self):
# called when phases are all finished being added
for phase in sorted(self.dmesg.keys()):
if '*' in phase:
p = phase.split('*')
pnew = '%s%d' % (p[0], len(p))
self.dmesg[pnew] = self.dmesg.pop(phase)
self.devicegroups = []
for phase in self.sortedPhases():
self.devicegroups.append([phase])
def nextPhase(self, phase, offset):
order = self.dmesg[phase]['order'] + offset
for p in self.dmesg:
if self.dmesg[p]['order'] == order:
return p
return ''
def lastPhase(self, depth=1):
plist = self.sortedPhases()
if len(plist) < depth:
return ''
return plist[-1*depth]
def turbostatInfo(self):
tp = TestProps()
out = {'syslpi':'N/A','pkgpc10':'N/A'}
for line in self.dmesgtext:
m = re.match(tp.tstatfmt, line)
if not m:
continue
for i in m.group('t').split('|'):
if 'SYS%LPI' in i:
out['syslpi'] = i.split('=')[-1]+'%'
elif 'pc10' in i:
out['pkgpc10'] = i.split('=')[-1]+'%'
break
return out
def extractErrorInfo(self):
lf = self.dmesgtext
if len(self.dmesgtext) < 1 and sysvals.dmesgfile:
lf = sysvals.openlog(sysvals.dmesgfile, 'r')
i = 0
tp = TestProps()
list = []
for line in lf:
i += 1
if tp.stampInfo(line, sysvals):
continue
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if not m:
continue
t = float(m.group('ktime'))
if t < self.start or t > self.end:
continue
dir = 'suspend' if t < self.tSuspended else 'resume'
msg = m.group('msg')
if re.match('capability: warning: .*', msg):
continue
for err in self.errlist:
if re.match(self.errlist[err], msg):
list.append((msg, err, dir, t, i, i))
self.kerror = True
break
tp.msglist = []
for msg, type, dir, t, idx1, idx2 in list:
tp.msglist.append(msg)
self.errorinfo[dir].append((type, t, idx1, idx2))
if self.kerror:
sysvals.dmesglog = True
if len(self.dmesgtext) < 1 and sysvals.dmesgfile:
lf.close()
return tp
def setStart(self, time, msg=''):
self.start = time
if msg:
try:
self.hwstart = datetime.strptime(msg, sysvals.tmstart)
except:
self.hwstart = 0
def setEnd(self, time, msg=''):
self.end = time
if msg:
try:
self.hwend = datetime.strptime(msg, sysvals.tmend)
except:
self.hwend = 0
def isTraceEventOutsideDeviceCalls(self, pid, time):
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for dev in list:
d = list[dev]
if(d['pid'] == pid and time >= d['start'] and
time < d['end']):
return False
return True
def sourcePhase(self, start):
for phase in self.sortedPhases():
if 'machine' in phase:
continue
pend = self.dmesg[phase]['end']
if start <= pend:
return phase
return 'resume_complete'
def sourceDevice(self, phaselist, start, end, pid, type):
tgtdev = ''
for phase in phaselist:
list = self.dmesg[phase]['list']
for devname in list:
dev = list[devname]
# pid must match
if dev['pid'] != pid:
continue
devS = dev['start']
devE = dev['end']
if type == 'device':
# device target event is entirely inside the source boundary
if(start < devS or start >= devE or end <= devS or end > devE):
continue
elif type == 'thread':
# thread target event will expand the source boundary
if start < devS:
dev['start'] = start
if end > devE:
dev['end'] = end
tgtdev = dev
break
return tgtdev
def addDeviceFunctionCall(self, displayname, kprobename, proc, pid, start, end, cdata, rdata):
# try to place the call in a device
phases = self.sortedPhases()
tgtdev = self.sourceDevice(phases, start, end, pid, 'device')
# calls with device pids that occur outside device bounds are dropped
# TODO: include these somehow
if not tgtdev and pid in self.devpids:
return False
# try to place the call in a thread
if not tgtdev:
tgtdev = self.sourceDevice(phases, start, end, pid, 'thread')
# create new thread blocks, expand as new calls are found
if not tgtdev:
if proc == '<...>':
threadname = 'kthread-%d' % (pid)
else:
threadname = '%s-%d' % (proc, pid)
tgtphase = self.sourcePhase(start)
self.newAction(tgtphase, threadname, pid, '', start, end, '', ' kth', '')
return self.addDeviceFunctionCall(displayname, kprobename, proc, pid, start, end, cdata, rdata)
# this should not happen
if not tgtdev:
sysvals.vprint('[%f - %f] %s-%d %s %s %s' % \
(start, end, proc, pid, kprobename, cdata, rdata))
return False
# place the call data inside the src element of the tgtdev
if('src' not in tgtdev):
tgtdev['src'] = []
dtf = sysvals.dev_tracefuncs
ubiquitous = False
if kprobename in dtf and 'ub' in dtf[kprobename]:
ubiquitous = True
title = cdata+' '+rdata
mstr = '\(.*\) *(?P<args>.*) *\((?P<caller>.*)\+.* arg1=(?P<ret>.*)'
m = re.match(mstr, title)
if m:
c = m.group('caller')
a = m.group('args').strip()
r = m.group('ret')
if len(r) > 6:
r = ''
else:
r = 'ret=%s ' % r
if ubiquitous and c in dtf and 'ub' in dtf[c]:
return False
color = sysvals.kprobeColor(kprobename)
e = DevFunction(displayname, a, c, r, start, end, ubiquitous, proc, pid, color)
tgtdev['src'].append(e)
return True
def overflowDevices(self):
# get a list of devices that extend beyond the end of this test run
devlist = []
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for devname in list:
dev = list[devname]
if dev['end'] > self.end:
devlist.append(dev)
return devlist
def mergeOverlapDevices(self, devlist):
# merge any devices that overlap devlist
for dev in devlist:
devname = dev['name']
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
if devname not in list:
continue
tdev = list[devname]
o = min(dev['end'], tdev['end']) - max(dev['start'], tdev['start'])
if o <= 0:
continue
dev['end'] = tdev['end']
if 'src' not in dev or 'src' not in tdev:
continue
dev['src'] += tdev['src']
del list[devname]
def usurpTouchingThread(self, name, dev):
# the caller test has priority of this thread, give it to him
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
if name in list:
tdev = list[name]
if tdev['start'] - dev['end'] < 0.1:
dev['end'] = tdev['end']
if 'src' not in dev:
dev['src'] = []
if 'src' in tdev:
dev['src'] += tdev['src']
del list[name]
break
def stitchTouchingThreads(self, testlist):
# merge any threads between tests that touch
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for devname in list:
dev = list[devname]
if 'htmlclass' not in dev or 'kth' not in dev['htmlclass']:
continue
for data in testlist:
data.usurpTouchingThread(devname, dev)
def optimizeDevSrc(self):
# merge any src call loops to reduce timeline size
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for dev in list:
if 'src' not in list[dev]:
continue
src = list[dev]['src']
p = 0
for e in sorted(src, key=lambda event: event.time):
if not p or not e.repeat(p):
p = e
continue
# e is another iteration of p, move it into p
p.end = e.end
p.length = p.end - p.time
p.count += 1
src.remove(e)
def trimTimeVal(self, t, t0, dT, left):
if left:
if(t > t0):
if(t - dT < t0):
return t0
return t - dT
else:
return t
else:
if(t < t0 + dT):
if(t > t0):
return t0 + dT
return t + dT
else:
return t
def trimTime(self, t0, dT, left):
self.tSuspended = self.trimTimeVal(self.tSuspended, t0, dT, left)
self.tResumed = self.trimTimeVal(self.tResumed, t0, dT, left)
self.start = self.trimTimeVal(self.start, t0, dT, left)
self.tKernSus = self.trimTimeVal(self.tKernSus, t0, dT, left)
self.tKernRes = self.trimTimeVal(self.tKernRes, t0, dT, left)
self.end = self.trimTimeVal(self.end, t0, dT, left)
for phase in self.sortedPhases():
p = self.dmesg[phase]
p['start'] = self.trimTimeVal(p['start'], t0, dT, left)
p['end'] = self.trimTimeVal(p['end'], t0, dT, left)
list = p['list']
for name in list:
d = list[name]
d['start'] = self.trimTimeVal(d['start'], t0, dT, left)
d['end'] = self.trimTimeVal(d['end'], t0, dT, left)
d['length'] = d['end'] - d['start']
if('ftrace' in d):
cg = d['ftrace']
cg.start = self.trimTimeVal(cg.start, t0, dT, left)
cg.end = self.trimTimeVal(cg.end, t0, dT, left)
for line in cg.list:
line.time = self.trimTimeVal(line.time, t0, dT, left)
if('src' in d):
for e in d['src']:
e.time = self.trimTimeVal(e.time, t0, dT, left)
e.end = self.trimTimeVal(e.end, t0, dT, left)
e.length = e.end - e.time
for dir in ['suspend', 'resume']:
list = []
for e in self.errorinfo[dir]:
type, tm, idx1, idx2 = e
tm = self.trimTimeVal(tm, t0, dT, left)
list.append((type, tm, idx1, idx2))
self.errorinfo[dir] = list
def trimFreezeTime(self, tZero):
# trim out any standby or freeze clock time
lp = ''
for phase in self.sortedPhases():
if 'resume_machine' in phase and 'suspend_machine' in lp:
tS, tR = self.dmesg[lp]['end'], self.dmesg[phase]['start']
tL = tR - tS
if tL <= 0:
continue
left = True if tR > tZero else False
self.trimTime(tS, tL, left)
if 'waking' in self.dmesg[lp]:
tCnt = self.dmesg[lp]['waking'][0]
if self.dmesg[lp]['waking'][1] >= 0.001:
tTry = '-%.0f' % (round(self.dmesg[lp]['waking'][1] * 1000))
else:
tTry = '-%.3f' % (self.dmesg[lp]['waking'][1] * 1000)
text = '%.0f (%s ms waking %d times)' % (tL * 1000, tTry, tCnt)
else:
text = '%.0f' % (tL * 1000)
self.tLow.append(text)
lp = phase
def getMemTime(self):
if not self.hwstart or not self.hwend:
return
stime = (self.tSuspended - self.start) * 1000000
rtime = (self.end - self.tResumed) * 1000000
hws = self.hwstart + timedelta(microseconds=stime)
hwr = self.hwend - timedelta(microseconds=rtime)
self.tLow.append('%.0f'%((hwr - hws).total_seconds() * 1000))
def getTimeValues(self):
sktime = (self.tSuspended - self.tKernSus) * 1000
rktime = (self.tKernRes - self.tResumed) * 1000
return (sktime, rktime)
def setPhase(self, phase, ktime, isbegin, order=-1):
if(isbegin):
# phase start over current phase
if self.currphase:
if 'resume_machine' not in self.currphase:
sysvals.vprint('WARNING: phase %s failed to end' % self.currphase)
self.dmesg[self.currphase]['end'] = ktime
phases = self.dmesg.keys()
color = self.phasedef[phase]['color']
count = len(phases) if order < 0 else order
# create unique name for every new phase
while phase in phases:
phase += '*'
self.dmesg[phase] = {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': color, 'order': count}
self.dmesg[phase]['start'] = ktime
self.currphase = phase
else:
# phase end without a start
if phase not in self.currphase:
if self.currphase:
sysvals.vprint('WARNING: %s ended instead of %s, ftrace corruption?' % (phase, self.currphase))
else:
sysvals.vprint('WARNING: %s ended without a start, ftrace corruption?' % phase)
return phase
phase = self.currphase
self.dmesg[phase]['end'] = ktime
self.currphase = ''
return phase
def sortedDevices(self, phase):
list = self.dmesg[phase]['list']
return sorted(list, key=lambda k:list[k]['start'])
def fixupInitcalls(self, phase):
# if any calls never returned, clip them at system resume end
phaselist = self.dmesg[phase]['list']
for devname in phaselist:
dev = phaselist[devname]
if(dev['end'] < 0):
for p in self.sortedPhases():
if self.dmesg[p]['end'] > dev['start']:
dev['end'] = self.dmesg[p]['end']
break
sysvals.vprint('%s (%s): callback didnt return' % (devname, phase))
def deviceFilter(self, devicefilter):
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
rmlist = []
for name in list:
keep = False
for filter in devicefilter:
if filter in name or \
('drv' in list[name] and filter in list[name]['drv']):
keep = True
if not keep:
rmlist.append(name)
for name in rmlist:
del list[name]
def fixupInitcallsThatDidntReturn(self):
# if any calls never returned, clip them at system resume end
for phase in self.sortedPhases():
self.fixupInitcalls(phase)
def phaseOverlap(self, phases):
rmgroups = []
newgroup = []
for group in self.devicegroups:
for phase in phases:
if phase not in group:
continue
for p in group:
if p not in newgroup:
newgroup.append(p)
if group not in rmgroups:
rmgroups.append(group)
for group in rmgroups:
self.devicegroups.remove(group)
self.devicegroups.append(newgroup)
def newActionGlobal(self, name, start, end, pid=-1, color=''):
# which phase is this device callback or action in
phases = self.sortedPhases()
targetphase = 'none'
htmlclass = ''
overlap = 0.0
myphases = []
for phase in phases:
pstart = self.dmesg[phase]['start']
pend = self.dmesg[phase]['end']
# see if the action overlaps this phase
o = max(0, min(end, pend) - max(start, pstart))
if o > 0:
myphases.append(phase)
# set the target phase to the one that overlaps most
if o > overlap:
if overlap > 0 and phase == 'post_resume':
continue
targetphase = phase
overlap = o
# if no target phase was found, pin it to the edge
if targetphase == 'none':
p0start = self.dmesg[phases[0]]['start']
if start <= p0start:
targetphase = phases[0]
else:
targetphase = phases[-1]
if pid == -2:
htmlclass = ' bg'
elif pid == -3:
htmlclass = ' ps'
if len(myphases) > 1:
htmlclass = ' bg'
self.phaseOverlap(myphases)
if targetphase in phases:
newname = self.newAction(targetphase, name, pid, '', start, end, '', htmlclass, color)
return (targetphase, newname)
return False
def newAction(self, phase, name, pid, parent, start, end, drv, htmlclass='', color=''):
# new device callback for a specific phase
self.html_device_id += 1
devid = '%s%d' % (self.idstr, self.html_device_id)
list = self.dmesg[phase]['list']
length = -1.0
if(start >= 0 and end >= 0):
length = end - start
if pid == -2 or name not in sysvals.tracefuncs.keys():
i = 2
origname = name
while(name in list):
name = '%s[%d]' % (origname, i)
i += 1
list[name] = {'name': name, 'start': start, 'end': end, 'pid': pid,
'par': parent, 'length': length, 'row': 0, 'id': devid, 'drv': drv }
if htmlclass:
list[name]['htmlclass'] = htmlclass
if color:
list[name]['color'] = color
return name
def findDevice(self, phase, name):
list = self.dmesg[phase]['list']
mydev = ''
for devname in sorted(list):
if name == devname or re.match('^%s\[(?P<num>[0-9]*)\]$' % name, devname):
mydev = devname
if mydev:
return list[mydev]
return False
def deviceChildren(self, devname, phase):
devlist = []
list = self.dmesg[phase]['list']
for child in list:
if(list[child]['par'] == devname):
devlist.append(child)
return devlist
def maxDeviceNameSize(self, phase):
size = 0
for name in self.dmesg[phase]['list']:
if len(name) > size:
size = len(name)
return size
def printDetails(self):
sysvals.vprint('Timeline Details:')
sysvals.vprint(' test start: %f' % self.start)
sysvals.vprint('kernel suspend start: %f' % self.tKernSus)
tS = tR = False
for phase in self.sortedPhases():
devlist = self.dmesg[phase]['list']
dc, ps, pe = len(devlist), self.dmesg[phase]['start'], self.dmesg[phase]['end']
if not tS and ps >= self.tSuspended:
sysvals.vprint(' machine suspended: %f' % self.tSuspended)
tS = True
if not tR and ps >= self.tResumed:
sysvals.vprint(' machine resumed: %f' % self.tResumed)
tR = True
sysvals.vprint('%20s: %f - %f (%d devices)' % (phase, ps, pe, dc))
if sysvals.devdump:
sysvals.vprint(''.join('-' for i in range(80)))
maxname = '%d' % self.maxDeviceNameSize(phase)
fmt = '%3d) %'+maxname+'s - %f - %f'
c = 1
for name in sorted(devlist):
s = devlist[name]['start']
e = devlist[name]['end']
sysvals.vprint(fmt % (c, name, s, e))
c += 1
sysvals.vprint(''.join('-' for i in range(80)))
sysvals.vprint(' kernel resume end: %f' % self.tKernRes)
sysvals.vprint(' test end: %f' % self.end)
def deviceChildrenAllPhases(self, devname):
devlist = []
for phase in self.sortedPhases():
list = self.deviceChildren(devname, phase)
for dev in sorted(list):
if dev not in devlist:
devlist.append(dev)
return devlist
def masterTopology(self, name, list, depth):
node = DeviceNode(name, depth)
for cname in list:
# avoid recursions
if name == cname:
continue
clist = self.deviceChildrenAllPhases(cname)
cnode = self.masterTopology(cname, clist, depth+1)
node.children.append(cnode)
return node
def printTopology(self, node):
html = ''
if node.name:
info = ''
drv = ''
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
if node.name in list:
s = list[node.name]['start']
e = list[node.name]['end']
if list[node.name]['drv']:
drv = ' {'+list[node.name]['drv']+'}'
info += ('<li>%s: %.3fms</li>' % (phase, (e-s)*1000))
html += '<li><b>'+node.name+drv+'</b>'
if info:
html += '<ul>'+info+'</ul>'
html += '</li>'
if len(node.children) > 0:
html += '<ul>'
for cnode in node.children:
html += self.printTopology(cnode)
html += '</ul>'
return html
def rootDeviceList(self):
# list of devices graphed
real = []
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for dev in sorted(list):
if list[dev]['pid'] >= 0 and dev not in real:
real.append(dev)
# list of top-most root devices
rootlist = []
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for dev in sorted(list):
pdev = list[dev]['par']
pid = list[dev]['pid']
if(pid < 0 or re.match('[0-9]*-[0-9]*\.[0-9]*[\.0-9]*\:[\.0-9]*$', pdev)):
continue
if pdev and pdev not in real and pdev not in rootlist:
rootlist.append(pdev)
return rootlist
def deviceTopology(self):
rootlist = self.rootDeviceList()
master = self.masterTopology('', rootlist, 0)
return self.printTopology(master)
def selectTimelineDevices(self, widfmt, tTotal, mindevlen):
# only select devices that will actually show up in html
self.tdevlist = dict()
for phase in self.dmesg:
devlist = []
list = self.dmesg[phase]['list']
for dev in list:
length = (list[dev]['end'] - list[dev]['start']) * 1000
width = widfmt % (((list[dev]['end']-list[dev]['start'])*100)/tTotal)
if length >= mindevlen:
devlist.append(dev)
self.tdevlist[phase] = devlist
def addHorizontalDivider(self, devname, devend):
phase = 'suspend_prepare'
self.newAction(phase, devname, -2, '', \
self.start, devend, '', ' sec', '')
if phase not in self.tdevlist:
self.tdevlist[phase] = []
self.tdevlist[phase].append(devname)
d = DevItem(0, phase, self.dmesg[phase]['list'][devname])
return d
def addProcessUsageEvent(self, name, times):
# get the start and end times for this process
maxC = 0
tlast = 0
start = -1
end = -1
for t in sorted(times):
if tlast == 0:
tlast = t
continue
if name in self.pstl[t]:
if start == -1 or tlast < start:
start = tlast
if end == -1 or t > end:
end = t
tlast = t
if start == -1 or end == -1:
return 0
# add a new action for this process and get the object
out = self.newActionGlobal(name, start, end, -3)
if not out:
return 0
phase, devname = out
dev = self.dmesg[phase]['list'][devname]
# get the cpu exec data
tlast = 0
clast = 0
cpuexec = dict()
for t in sorted(times):
if tlast == 0 or t <= start or t > end:
tlast = t
continue
list = self.pstl[t]
c = 0
if name in list:
c = list[name]
if c > maxC:
maxC = c
if c != clast:
key = (tlast, t)
cpuexec[key] = c
tlast = t
clast = c
dev['cpuexec'] = cpuexec
return maxC
def createProcessUsageEvents(self):
# get an array of process names
proclist = []
for t in sorted(self.pstl):
pslist = self.pstl[t]
for ps in sorted(pslist):
if ps not in proclist:
proclist.append(ps)
# get a list of data points for suspend and resume
tsus = []
tres = []
for t in sorted(self.pstl):
if t < self.tSuspended:
tsus.append(t)
else:
tres.append(t)
# process the events for suspend and resume
if len(proclist) > 0:
sysvals.vprint('Process Execution:')
for ps in proclist:
c = self.addProcessUsageEvent(ps, tsus)
if c > 0:
sysvals.vprint('%25s (sus): %d' % (ps, c))
c = self.addProcessUsageEvent(ps, tres)
if c > 0:
sysvals.vprint('%25s (res): %d' % (ps, c))
def handleEndMarker(self, time, msg=''):
dm = self.dmesg
self.setEnd(time, msg)
self.initDevicegroups()
# give suspend_prepare an end if needed
if 'suspend_prepare' in dm and dm['suspend_prepare']['end'] < 0:
dm['suspend_prepare']['end'] = time
# assume resume machine ends at next phase start
if 'resume_machine' in dm and dm['resume_machine']['end'] < 0:
np = self.nextPhase('resume_machine', 1)
if np:
dm['resume_machine']['end'] = dm[np]['start']
# if kernel resume end not found, assume its the end marker
if self.tKernRes == 0.0:
self.tKernRes = time
# if kernel suspend start not found, assume its the end marker
if self.tKernSus == 0.0:
self.tKernSus = time
# set resume complete to end at end marker
if 'resume_complete' in dm:
dm['resume_complete']['end'] = time
def debugPrint(self):
for p in self.sortedPhases():
list = self.dmesg[p]['list']
for devname in sorted(list):
dev = list[devname]
if 'ftrace' in dev:
dev['ftrace'].debugPrint(' [%s]' % devname)
# Class: DevFunction
# Description:
# A container for kprobe function data we want in the dev timeline
class DevFunction:
def __init__(self, name, args, caller, ret, start, end, u, proc, pid, color):
self.row = 0
self.count = 1
self.name = name
self.args = args
self.caller = caller
self.ret = ret
self.time = start
self.length = end - start
self.end = end
self.ubiquitous = u
self.proc = proc
self.pid = pid
self.color = color
def title(self):
cnt = ''
if self.count > 1:
cnt = '(x%d)' % self.count
l = '%0.3fms' % (self.length * 1000)
if self.ubiquitous:
title = '%s(%s)%s <- %s, %s(%s)' % \
(self.name, self.args, cnt, self.caller, self.ret, l)
else:
title = '%s(%s) %s%s(%s)' % (self.name, self.args, self.ret, cnt, l)
return title.replace('"', '')
def text(self):
if self.count > 1:
text = '%s(x%d)' % (self.name, self.count)
else:
text = self.name
return text
def repeat(self, tgt):
# is the tgt call just a repeat of this call (e.g. are we in a loop)
dt = self.time - tgt.end
# only combine calls if -all- attributes are identical
if tgt.caller == self.caller and \
tgt.name == self.name and tgt.args == self.args and \
tgt.proc == self.proc and tgt.pid == self.pid and \
tgt.ret == self.ret and dt >= 0 and \
dt <= sysvals.callloopmaxgap and \
self.length < sysvals.callloopmaxlen:
return True
return False
# Class: FTraceLine
# Description:
# A container for a single line of ftrace data. There are six basic types:
# callgraph line:
# call: " dpm_run_callback() {"
# return: " }"
# leaf: " dpm_run_callback();"
# trace event:
# tracing_mark_write: SUSPEND START or RESUME COMPLETE
# suspend_resume: phase or custom exec block data
# device_pm_callback: device callback info
class FTraceLine:
def __init__(self, t, m='', d=''):
self.length = 0.0
self.fcall = False
self.freturn = False
self.fevent = False
self.fkprobe = False
self.depth = 0
self.name = ''
self.type = ''
self.time = float(t)
if not m and not d:
return
# is this a trace event
if(d == 'traceevent' or re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)):
if(d == 'traceevent'):
# nop format trace event
msg = m
else:
# function_graph format trace event
em = re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)
msg = em.group('msg')
emm = re.match('^(?P<call>.*?): (?P<msg>.*)', msg)
if(emm):
self.name = emm.group('msg')
self.type = emm.group('call')
else:
self.name = msg
km = re.match('^(?P<n>.*)_cal$', self.type)
if km:
self.fcall = True
self.fkprobe = True
self.type = km.group('n')
return
km = re.match('^(?P<n>.*)_ret$', self.type)
if km:
self.freturn = True
self.fkprobe = True
self.type = km.group('n')
return
self.fevent = True
return
# convert the duration to seconds
if(d):
self.length = float(d)/1000000
# the indentation determines the depth
match = re.match('^(?P<d> *)(?P<o>.*)$', m)
if(not match):
return
self.depth = self.getDepth(match.group('d'))
m = match.group('o')
# function return
if(m[0] == '}'):
self.freturn = True
if(len(m) > 1):
# includes comment with function name
match = re.match('^} *\/\* *(?P<n>.*) *\*\/$', m)
if(match):
self.name = match.group('n').strip()
# function call
else:
self.fcall = True
# function call with children
if(m[-1] == '{'):
match = re.match('^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n').strip()
# function call with no children (leaf)
elif(m[-1] == ';'):
self.freturn = True
match = re.match('^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n').strip()
# something else (possibly a trace marker)
else:
self.name = m
def isCall(self):
return self.fcall and not self.freturn
def isReturn(self):
return self.freturn and not self.fcall
def isLeaf(self):
return self.fcall and self.freturn
def getDepth(self, str):
return len(str)/2
def debugPrint(self, info=''):
if self.isLeaf():
pprint(' -- %12.6f (depth=%02d): %s(); (%.3f us) %s' % (self.time, \
self.depth, self.name, self.length*1000000, info))
elif self.freturn:
pprint(' -- %12.6f (depth=%02d): %s} (%.3f us) %s' % (self.time, \
self.depth, self.name, self.length*1000000, info))
else:
pprint(' -- %12.6f (depth=%02d): %s() { (%.3f us) %s' % (self.time, \
self.depth, self.name, self.length*1000000, info))
def startMarker(self):
# Is this the starting line of a suspend?
if not self.fevent:
return False
if sysvals.usetracemarkers:
if(self.name.startswith('SUSPEND START')):
return True
return False
else:
if(self.type == 'suspend_resume' and
re.match('suspend_enter\[.*\] begin', self.name)):
return True
return False
def endMarker(self):
# Is this the ending line of a resume?
if not self.fevent:
return False
if sysvals.usetracemarkers:
if(self.name.startswith('RESUME COMPLETE')):
return True
return False
else:
if(self.type == 'suspend_resume' and
re.match('thaw_processes\[.*\] end', self.name)):
return True
return False
# Class: FTraceCallGraph
# Description:
# A container for the ftrace callgraph of a single recursive function.
# This can be a dpm_run_callback, dpm_prepare, or dpm_complete callgraph
# Each instance is tied to a single device in a single phase, and is
# comprised of an ordered list of FTraceLine objects
class FTraceCallGraph:
vfname = 'missing_function_name'
def __init__(self, pid, sv):
self.id = ''
self.invalid = False
self.name = ''
self.partial = False
self.ignore = False
self.start = -1.0
self.end = -1.0
self.list = []
self.depth = 0
self.pid = pid
self.sv = sv
def addLine(self, line):
# if this is already invalid, just leave
if(self.invalid):
if(line.depth == 0 and line.freturn):
return 1
return 0
# invalidate on bad depth
if(self.depth < 0):
self.invalidate(line)
return 0
# ignore data til we return to the current depth
if self.ignore:
if line.depth > self.depth:
return 0
else:
self.list[-1].freturn = True
self.list[-1].length = line.time - self.list[-1].time
self.ignore = False
# if this is a return at self.depth, no more work is needed
if line.depth == self.depth and line.isReturn():
if line.depth == 0:
self.end = line.time
return 1
return 0
# compare current depth with this lines pre-call depth
prelinedep = line.depth
if line.isReturn():
prelinedep += 1
last = 0
lasttime = line.time
if len(self.list) > 0:
last = self.list[-1]
lasttime = last.time
if last.isLeaf():
lasttime += last.length
# handle low misalignments by inserting returns
mismatch = prelinedep - self.depth
warning = self.sv.verbose and abs(mismatch) > 1
info = []
if mismatch < 0:
idx = 0
# add return calls to get the depth down
while prelinedep < self.depth:
self.depth -= 1
if idx == 0 and last and last.isCall():
# special case, turn last call into a leaf
last.depth = self.depth
last.freturn = True
last.length = line.time - last.time
if warning:
info.append(('[make leaf]', last))
else:
vline = FTraceLine(lasttime)
vline.depth = self.depth
vline.name = self.vfname
vline.freturn = True
self.list.append(vline)
if warning:
if idx == 0:
info.append(('', last))
info.append(('[add return]', vline))
idx += 1
if warning:
info.append(('', line))
# handle high misalignments by inserting calls
elif mismatch > 0:
idx = 0
if warning:
info.append(('', last))
# add calls to get the depth up
while prelinedep > self.depth:
if idx == 0 and line.isReturn():
# special case, turn this return into a leaf
line.fcall = True
prelinedep -= 1
if warning:
info.append(('[make leaf]', line))
else:
vline = FTraceLine(lasttime)
vline.depth = self.depth
vline.name = self.vfname
vline.fcall = True
self.list.append(vline)
self.depth += 1
if not last:
self.start = vline.time
if warning:
info.append(('[add call]', vline))
idx += 1
if warning and ('[make leaf]', line) not in info:
info.append(('', line))
if warning:
pprint('WARNING: ftrace data missing, corrections made:')
for i in info:
t, obj = i
if obj:
obj.debugPrint(t)
# process the call and set the new depth
skipadd = False
md = self.sv.max_graph_depth
if line.isCall():
# ignore blacklisted/overdepth funcs
if (md and self.depth >= md - 1) or (line.name in self.sv.cgblacklist):
self.ignore = True
else:
self.depth += 1
elif line.isReturn():
self.depth -= 1
# remove blacklisted/overdepth/empty funcs that slipped through
if (last and last.isCall() and last.depth == line.depth) or \
(md and last and last.depth >= md) or \
(line.name in self.sv.cgblacklist):
while len(self.list) > 0 and self.list[-1].depth > line.depth:
self.list.pop(-1)
if len(self.list) == 0:
self.invalid = True
return 1
self.list[-1].freturn = True
self.list[-1].length = line.time - self.list[-1].time
self.list[-1].name = line.name
skipadd = True
if len(self.list) < 1:
self.start = line.time
# check for a mismatch that returned all the way to callgraph end
res = 1
if mismatch < 0 and self.list[-1].depth == 0 and self.list[-1].freturn:
line = self.list[-1]
skipadd = True
res = -1
if not skipadd:
self.list.append(line)
if(line.depth == 0 and line.freturn):
if(self.start < 0):
self.start = line.time
self.end = line.time
if line.fcall:
self.end += line.length
if self.list[0].name == self.vfname:
self.invalid = True
if res == -1:
self.partial = True
return res
return 0
def invalidate(self, line):
if(len(self.list) > 0):
first = self.list[0]
self.list = []
self.list.append(first)
self.invalid = True
id = 'task %s' % (self.pid)
window = '(%f - %f)' % (self.start, line.time)
if(self.depth < 0):
pprint('Data misalignment for '+id+\
' (buffer overflow), ignoring this callback')
else:
pprint('Too much data for '+id+\
' '+window+', ignoring this callback')
def slice(self, dev):
minicg = FTraceCallGraph(dev['pid'], self.sv)
minicg.name = self.name
mydepth = -1
good = False
for l in self.list:
if(l.time < dev['start'] or l.time > dev['end']):
continue
if mydepth < 0:
if l.name == 'mutex_lock' and l.freturn:
mydepth = l.depth
continue
elif l.depth == mydepth and l.name == 'mutex_unlock' and l.fcall:
good = True
break
l.depth -= mydepth
minicg.addLine(l)
if not good or len(minicg.list) < 1:
return 0
return minicg
def repair(self, enddepth):
# bring the depth back to 0 with additional returns
fixed = False
last = self.list[-1]
for i in reversed(range(enddepth)):
t = FTraceLine(last.time)
t.depth = i
t.freturn = True
fixed = self.addLine(t)
if fixed != 0:
self.end = last.time
return True
return False
def postProcess(self):
if len(self.list) > 0:
self.name = self.list[0].name
stack = dict()
cnt = 0
last = 0
for l in self.list:
# ftrace bug: reported duration is not reliable
# check each leaf and clip it at max possible length
if last and last.isLeaf():
if last.length > l.time - last.time:
last.length = l.time - last.time
if l.isCall():
stack[l.depth] = l
cnt += 1
elif l.isReturn():
if(l.depth not in stack):
if self.sv.verbose:
pprint('Post Process Error: Depth missing')
l.debugPrint()
return False
# calculate call length from call/return lines
cl = stack[l.depth]
cl.length = l.time - cl.time
if cl.name == self.vfname:
cl.name = l.name
stack.pop(l.depth)
l.length = 0
cnt -= 1
last = l
if(cnt == 0):
# trace caught the whole call tree
return True
elif(cnt < 0):
if self.sv.verbose:
pprint('Post Process Error: Depth is less than 0')
return False
# trace ended before call tree finished
return self.repair(cnt)
def deviceMatch(self, pid, data):
found = ''
# add the callgraph data to the device hierarchy
borderphase = {
'dpm_prepare': 'suspend_prepare',
'dpm_complete': 'resume_complete'
}
if(self.name in borderphase):
p = borderphase[self.name]
list = data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
self.start <= dev['start'] and
self.end >= dev['end']):
cg = self.slice(dev)
if cg:
dev['ftrace'] = cg
found = devname
return found
for p in data.sortedPhases():
if(data.dmesg[p]['start'] <= self.start and
self.start <= data.dmesg[p]['end']):
list = data.dmesg[p]['list']
for devname in sorted(list, key=lambda k:list[k]['start']):
dev = list[devname]
if(pid == dev['pid'] and
self.start <= dev['start'] and
self.end >= dev['end']):
dev['ftrace'] = self
found = devname
break
break
return found
def newActionFromFunction(self, data):
name = self.name
if name in ['dpm_run_callback', 'dpm_prepare', 'dpm_complete']:
return
fs = self.start
fe = self.end
if fs < data.start or fe > data.end:
return
phase = ''
for p in data.sortedPhases():
if(data.dmesg[p]['start'] <= self.start and
self.start < data.dmesg[p]['end']):
phase = p
break
if not phase:
return
out = data.newActionGlobal(name, fs, fe, -2)
if out:
phase, myname = out
data.dmesg[phase]['list'][myname]['ftrace'] = self
def debugPrint(self, info=''):
pprint('%s pid=%d [%f - %f] %.3f us' % \
(self.name, self.pid, self.start, self.end,
(self.end - self.start)*1000000))
for l in self.list:
if l.isLeaf():
pprint('%f (%02d): %s(); (%.3f us)%s' % (l.time, \
l.depth, l.name, l.length*1000000, info))
elif l.freturn:
pprint('%f (%02d): %s} (%.3f us)%s' % (l.time, \
l.depth, l.name, l.length*1000000, info))
else:
pprint('%f (%02d): %s() { (%.3f us)%s' % (l.time, \
l.depth, l.name, l.length*1000000, info))
pprint(' ')
class DevItem:
def __init__(self, test, phase, dev):
self.test = test
self.phase = phase
self.dev = dev
def isa(self, cls):
if 'htmlclass' in self.dev and cls in self.dev['htmlclass']:
return True
return False
# Class: Timeline
# Description:
# A container for a device timeline which calculates
# all the html properties to display it correctly
class Timeline:
html_tblock = '<div id="block{0}" class="tblock" style="left:{1}%;width:{2}%;"><div class="tback" style="height:{3}px"></div>\n'
html_device = '<div id="{0}" title="{1}" class="thread{7}" style="left:{2}%;top:{3}px;height:{4}px;width:{5}%;{8}">{6}</div>\n'
html_phase = '<div class="phase" style="left:{0}%;width:{1}%;top:{2}px;height:{3}px;background:{4}">{5}</div>\n'
html_phaselet = '<div id="{0}" class="phaselet" style="left:{1}%;width:{2}%;background:{3}"></div>\n'
html_legend = '<div id="p{3}" class="square" style="left:{0}%;background:{1}"> {2}</div>\n'
def __init__(self, rowheight, scaleheight):
self.html = ''
self.height = 0 # total timeline height
self.scaleH = scaleheight # timescale (top) row height
self.rowH = rowheight # device row height
self.bodyH = 0 # body height
self.rows = 0 # total timeline rows
self.rowlines = dict()
self.rowheight = dict()
def createHeader(self, sv, stamp):
if(not stamp['time']):
return
self.html += '<div class="version"><a href="https://01.org/pm-graph">%s v%s</a></div>' \
% (sv.title, sv.version)
if sv.logmsg and sv.testlog:
self.html += '<button id="showtest" class="logbtn btnfmt">log</button>'
if sv.dmesglog:
self.html += '<button id="showdmesg" class="logbtn btnfmt">dmesg</button>'
if sv.ftracelog:
self.html += '<button id="showftrace" class="logbtn btnfmt">ftrace</button>'
headline_stamp = '<div class="stamp">{0} {1} {2} {3}</div>\n'
self.html += headline_stamp.format(stamp['host'], stamp['kernel'],
stamp['mode'], stamp['time'])
if 'man' in stamp and 'plat' in stamp and 'cpu' in stamp and \
stamp['man'] and stamp['plat'] and stamp['cpu']:
headline_sysinfo = '<div class="stamp sysinfo">{0} {1} <i>with</i> {2}</div>\n'
self.html += headline_sysinfo.format(stamp['man'], stamp['plat'], stamp['cpu'])
# Function: getDeviceRows
# Description:
# determine how may rows the device funcs will take
# Arguments:
# rawlist: the list of devices/actions for a single phase
# Output:
# The total number of rows needed to display this phase of the timeline
def getDeviceRows(self, rawlist):
# clear all rows and set them to undefined
sortdict = dict()
for item in rawlist:
item.row = -1
sortdict[item] = item.length
sortlist = sorted(sortdict, key=sortdict.get, reverse=True)
remaining = len(sortlist)
rowdata = dict()
row = 1
# try to pack each row with as many ranges as possible
while(remaining > 0):
if(row not in rowdata):
rowdata[row] = []
for i in sortlist:
if(i.row >= 0):
continue
s = i.time
e = i.time + i.length
valid = True
for ritem in rowdata[row]:
rs = ritem.time
re = ritem.time + ritem.length
if(not (((s <= rs) and (e <= rs)) or
((s >= re) and (e >= re)))):
valid = False
break
if(valid):
rowdata[row].append(i)
i.row = row
remaining -= 1
row += 1
return row
# Function: getPhaseRows
# Description:
# Organize the timeline entries into the smallest
# number of rows possible, with no entry overlapping
# Arguments:
# devlist: the list of devices/actions in a group of contiguous phases
# Output:
# The total number of rows needed to display this phase of the timeline
def getPhaseRows(self, devlist, row=0, sortby='length'):
# clear all rows and set them to undefined
remaining = len(devlist)
rowdata = dict()
sortdict = dict()
myphases = []
# initialize all device rows to -1 and calculate devrows
for item in devlist:
dev = item.dev
tp = (item.test, item.phase)
if tp not in myphases:
myphases.append(tp)
dev['row'] = -1
if sortby == 'start':
# sort by start 1st, then length 2nd
sortdict[item] = (-1*float(dev['start']), float(dev['end']) - float(dev['start']))
else:
# sort by length 1st, then name 2nd
sortdict[item] = (float(dev['end']) - float(dev['start']), item.dev['name'])
if 'src' in dev:
dev['devrows'] = self.getDeviceRows(dev['src'])
# sort the devlist by length so that large items graph on top
sortlist = sorted(sortdict, key=sortdict.get, reverse=True)
orderedlist = []
for item in sortlist:
if item.dev['pid'] == -2:
orderedlist.append(item)
for item in sortlist:
if item not in orderedlist:
orderedlist.append(item)
# try to pack each row with as many devices as possible
while(remaining > 0):
rowheight = 1
if(row not in rowdata):
rowdata[row] = []
for item in orderedlist:
dev = item.dev
if(dev['row'] < 0):
s = dev['start']
e = dev['end']
valid = True
for ritem in rowdata[row]:
rs = ritem.dev['start']
re = ritem.dev['end']
if(not (((s <= rs) and (e <= rs)) or
((s >= re) and (e >= re)))):
valid = False
break
if(valid):
rowdata[row].append(item)
dev['row'] = row
remaining -= 1
if 'devrows' in dev and dev['devrows'] > rowheight:
rowheight = dev['devrows']
for t, p in myphases:
if t not in self.rowlines or t not in self.rowheight:
self.rowlines[t] = dict()
self.rowheight[t] = dict()
if p not in self.rowlines[t] or p not in self.rowheight[t]:
self.rowlines[t][p] = dict()
self.rowheight[t][p] = dict()
rh = self.rowH
# section headers should use a different row height
if len(rowdata[row]) == 1 and \
'htmlclass' in rowdata[row][0].dev and \
'sec' in rowdata[row][0].dev['htmlclass']:
rh = 15
self.rowlines[t][p][row] = rowheight
self.rowheight[t][p][row] = rowheight * rh
row += 1
if(row > self.rows):
self.rows = int(row)
return row
def phaseRowHeight(self, test, phase, row):
return self.rowheight[test][phase][row]
def phaseRowTop(self, test, phase, row):
top = 0
for i in sorted(self.rowheight[test][phase]):
if i >= row:
break
top += self.rowheight[test][phase][i]
return top
def calcTotalRows(self):
# Calculate the heights and offsets for the header and rows
maxrows = 0
standardphases = []
for t in self.rowlines:
for p in self.rowlines[t]:
total = 0
for i in sorted(self.rowlines[t][p]):
total += self.rowlines[t][p][i]
if total > maxrows:
maxrows = total
if total == len(self.rowlines[t][p]):
standardphases.append((t, p))
self.height = self.scaleH + (maxrows*self.rowH)
self.bodyH = self.height - self.scaleH
# if there is 1 line per row, draw them the standard way
for t, p in standardphases:
for i in sorted(self.rowheight[t][p]):
self.rowheight[t][p][i] = float(self.bodyH)/len(self.rowlines[t][p])
def createZoomBox(self, mode='command', testcount=1):
# Create bounding box, add buttons
html_zoombox = '<center><button id="zoomin">ZOOM IN +</button><button id="zoomout">ZOOM OUT -</button><button id="zoomdef">ZOOM 1:1</button></center>\n'
html_timeline = '<div id="dmesgzoombox" class="zoombox">\n<div id="{0}" class="timeline" style="height:{1}px">\n'
html_devlist1 = '<button id="devlist1" class="devlist" style="float:left;">Device Detail{0}</button>'
html_devlist2 = '<button id="devlist2" class="devlist" style="float:right;">Device Detail2</button>\n'
if mode != 'command':
if testcount > 1:
self.html += html_devlist2
self.html += html_devlist1.format('1')
else:
self.html += html_devlist1.format('')
self.html += html_zoombox
self.html += html_timeline.format('dmesg', self.height)
# Function: createTimeScale
# Description:
# Create the timescale for a timeline block
# Arguments:
# m0: start time (mode begin)
# mMax: end time (mode end)
# tTotal: total timeline time
# mode: suspend or resume
# Output:
# The html code needed to display the time scale
def createTimeScale(self, m0, mMax, tTotal, mode):
timescale = '<div class="t" style="right:{0}%">{1}</div>\n'
rline = '<div class="t" style="left:0;border-left:1px solid black;border-right:0;">{0}</div>\n'
output = '<div class="timescale">\n'
# set scale for timeline
mTotal = mMax - m0
tS = 0.1
if(tTotal <= 0):
return output+'</div>\n'
if(tTotal > 4):
tS = 1
divTotal = int(mTotal/tS) + 1
divEdge = (mTotal - tS*(divTotal-1))*100/mTotal
for i in range(divTotal):
htmlline = ''
if(mode == 'suspend'):
pos = '%0.3f' % (100 - ((float(i)*tS*100)/mTotal) - divEdge)
val = '%0.fms' % (float(i-divTotal+1)*tS*1000)
if(i == divTotal - 1):
val = mode
htmlline = timescale.format(pos, val)
else:
pos = '%0.3f' % (100 - ((float(i)*tS*100)/mTotal))
val = '%0.fms' % (float(i)*tS*1000)
htmlline = timescale.format(pos, val)
if(i == 0):
htmlline = rline.format(mode)
output += htmlline
self.html += output+'</div>\n'
# Class: TestProps
# Description:
# A list of values describing the properties of these test runs
class TestProps:
stampfmt = '# [a-z]*-(?P<m>[0-9]{2})(?P<d>[0-9]{2})(?P<y>[0-9]{2})-'+\
'(?P<H>[0-9]{2})(?P<M>[0-9]{2})(?P<S>[0-9]{2})'+\
' (?P<host>.*) (?P<mode>.*) (?P<kernel>.*)$'
wififmt = '^# wifi *(?P<d>\S*) *(?P<s>\S*) *(?P<t>[0-9\.]+).*'
tstatfmt = '^# turbostat (?P<t>\S*)'
testerrfmt = '^# enter_sleep_error (?P<e>.*)'
sysinfofmt = '^# sysinfo .*'
cmdlinefmt = '^# command \| (?P<cmd>.*)'
kparamsfmt = '^# kparams \| (?P<kp>.*)'
devpropfmt = '# Device Properties: .*'
pinfofmt = '# platform-(?P<val>[a-z,A-Z,0-9]*): (?P<info>.*)'
tracertypefmt = '# tracer: (?P<t>.*)'
firmwarefmt = '# fwsuspend (?P<s>[0-9]*) fwresume (?P<r>[0-9]*)$'
procexecfmt = 'ps - (?P<ps>.*)$'
ftrace_line_fmt_fg = \
'^ *(?P<time>[0-9\.]*) *\| *(?P<cpu>[0-9]*)\)'+\
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\|'+\
'[ +!#\*@$]*(?P<dur>[0-9\.]*) .*\| (?P<msg>.*)'
ftrace_line_fmt_nop = \
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\[(?P<cpu>[0-9]*)\] *'+\
'(?P<flags>\S*) *(?P<time>[0-9\.]*): *'+\
'(?P<msg>.*)'
machinesuspend = 'machine_suspend\[.*'
def __init__(self):
self.stamp = ''
self.sysinfo = ''
self.cmdline = ''
self.testerror = []
self.turbostat = []
self.wifi = []
self.fwdata = []
self.ftrace_line_fmt = self.ftrace_line_fmt_nop
self.cgformat = False
self.data = 0
self.ktemp = dict()
def setTracerType(self, tracer):
if(tracer == 'function_graph'):
self.cgformat = True
self.ftrace_line_fmt = self.ftrace_line_fmt_fg
elif(tracer == 'nop'):
self.ftrace_line_fmt = self.ftrace_line_fmt_nop
else:
doError('Invalid tracer format: [%s]' % tracer)
def stampInfo(self, line, sv):
if re.match(self.stampfmt, line):
self.stamp = line
return True
elif re.match(self.sysinfofmt, line):
self.sysinfo = line
return True
elif re.match(self.tstatfmt, line):
self.turbostat.append(line)
return True
elif re.match(self.wififmt, line):
self.wifi.append(line)
return True
elif re.match(self.testerrfmt, line):
self.testerror.append(line)
return True
elif re.match(self.firmwarefmt, line):
self.fwdata.append(line)
return True
elif(re.match(self.devpropfmt, line)):
self.parseDevprops(line, sv)
return True
elif(re.match(self.pinfofmt, line)):
self.parsePlatformInfo(line, sv)
return True
m = re.match(self.cmdlinefmt, line)
if m:
self.cmdline = m.group('cmd')
return True
m = re.match(self.tracertypefmt, line)
if(m):
self.setTracerType(m.group('t'))
return True
return False
def parseStamp(self, data, sv):
# global test data
m = re.match(self.stampfmt, self.stamp)
if not self.stamp or not m:
doError('data does not include the expected stamp')
data.stamp = {'time': '', 'host': '', 'mode': ''}
dt = datetime(int(m.group('y'))+2000, int(m.group('m')),
int(m.group('d')), int(m.group('H')), int(m.group('M')),
int(m.group('S')))
data.stamp['time'] = dt.strftime('%B %d %Y, %I:%M:%S %p')
data.stamp['host'] = m.group('host')
data.stamp['mode'] = m.group('mode')
data.stamp['kernel'] = m.group('kernel')
if re.match(self.sysinfofmt, self.sysinfo):
for f in self.sysinfo.split('|'):
if '#' in f:
continue
tmp = f.strip().split(':', 1)
key = tmp[0]
val = tmp[1]
data.stamp[key] = val
sv.hostname = data.stamp['host']
sv.suspendmode = data.stamp['mode']
if sv.suspendmode == 'freeze':
self.machinesuspend = 'timekeeping_freeze\[.*'
else:
self.machinesuspend = 'machine_suspend\[.*'
if sv.suspendmode == 'command' and sv.ftracefile != '':
modes = ['on', 'freeze', 'standby', 'mem', 'disk']
fp = sv.openlog(sv.ftracefile, 'r')
for line in fp:
m = re.match('.* machine_suspend\[(?P<mode>.*)\]', line)
if m and m.group('mode') in ['1', '2', '3', '4']:
sv.suspendmode = modes[int(m.group('mode'))]
data.stamp['mode'] = sv.suspendmode
break
fp.close()
sv.cmdline = self.cmdline
if not sv.stamp:
sv.stamp = data.stamp
# firmware data
if sv.suspendmode == 'mem' and len(self.fwdata) > data.testnumber:
m = re.match(self.firmwarefmt, self.fwdata[data.testnumber])
if m:
data.fwSuspend, data.fwResume = int(m.group('s')), int(m.group('r'))
if(data.fwSuspend > 0 or data.fwResume > 0):
data.fwValid = True
# turbostat data
if len(self.turbostat) > data.testnumber:
m = re.match(self.tstatfmt, self.turbostat[data.testnumber])
if m:
data.turbostat = m.group('t')
# wifi data
if len(self.wifi) > data.testnumber:
m = re.match(self.wififmt, self.wifi[data.testnumber])
if m:
data.wifi = {'dev': m.group('d'), 'stat': m.group('s'),
'time': float(m.group('t'))}
data.stamp['wifi'] = m.group('d')
# sleep mode enter errors
if len(self.testerror) > data.testnumber:
m = re.match(self.testerrfmt, self.testerror[data.testnumber])
if m:
data.enterfail = m.group('e')
def devprops(self, data):
props = dict()
devlist = data.split(';')
for dev in devlist:
f = dev.split(',')
if len(f) < 3:
continue
dev = f[0]
props[dev] = DevProps()
props[dev].altname = f[1]
if int(f[2]):
props[dev].isasync = True
else:
props[dev].isasync = False
return props
def parseDevprops(self, line, sv):
idx = line.index(': ') + 2
if idx >= len(line):
return
props = self.devprops(line[idx:])
if sv.suspendmode == 'command' and 'testcommandstring' in props:
sv.testcommand = props['testcommandstring'].altname
sv.devprops = props
def parsePlatformInfo(self, line, sv):
m = re.match(self.pinfofmt, line)
if not m:
return
name, info = m.group('val'), m.group('info')
if name == 'devinfo':
sv.devprops = self.devprops(sv.b64unzip(info))
return
elif name == 'testcmd':
sv.testcommand = info
return
field = info.split('|')
if len(field) < 2:
return
cmdline = field[0].strip()
output = sv.b64unzip(field[1].strip())
sv.platinfo.append([name, cmdline, output])
# Class: TestRun
# Description:
# A container for a suspend/resume test run. This is necessary as
# there could be more than one, and they need to be separate.
class TestRun:
def __init__(self, dataobj):
self.data = dataobj
self.ftemp = dict()
self.ttemp = dict()
class ProcessMonitor:
def __init__(self):
self.proclist = dict()
self.running = False
def procstat(self):
c = ['cat /proc/[1-9]*/stat 2>/dev/null']
process = Popen(c, shell=True, stdout=PIPE)
running = dict()
for line in process.stdout:
data = ascii(line).split()
pid = data[0]
name = re.sub('[()]', '', data[1])
user = int(data[13])
kern = int(data[14])
kjiff = ujiff = 0
if pid not in self.proclist:
self.proclist[pid] = {'name' : name, 'user' : user, 'kern' : kern}
else:
val = self.proclist[pid]
ujiff = user - val['user']
kjiff = kern - val['kern']
val['user'] = user
val['kern'] = kern
if ujiff > 0 or kjiff > 0:
running[pid] = ujiff + kjiff
process.wait()
out = ''
for pid in running:
jiffies = running[pid]
val = self.proclist[pid]
if out:
out += ','
out += '%s-%s %d' % (val['name'], pid, jiffies)
return 'ps - '+out
def processMonitor(self, tid):
while self.running:
out = self.procstat()
if out:
sysvals.fsetVal(out, 'trace_marker')
def start(self):
self.thread = Thread(target=self.processMonitor, args=(0,))
self.running = True
self.thread.start()
def stop(self):
self.running = False
# ----------------- FUNCTIONS --------------------
# Function: doesTraceLogHaveTraceEvents
# Description:
# Quickly determine if the ftrace log has all of the trace events,
# markers, and/or kprobes required for primary parsing.
def doesTraceLogHaveTraceEvents():
kpcheck = ['_cal: (', '_ret: (']
techeck = ['suspend_resume', 'device_pm_callback']
tmcheck = ['SUSPEND START', 'RESUME COMPLETE']
sysvals.usekprobes = False
fp = sysvals.openlog(sysvals.ftracefile, 'r')
for line in fp:
# check for kprobes
if not sysvals.usekprobes:
for i in kpcheck:
if i in line:
sysvals.usekprobes = True
# check for all necessary trace events
check = techeck[:]
for i in techeck:
if i in line:
check.remove(i)
techeck = check
# check for all necessary trace markers
check = tmcheck[:]
for i in tmcheck:
if i in line:
check.remove(i)
tmcheck = check
fp.close()
sysvals.usetraceevents = True if len(techeck) < 2 else False
sysvals.usetracemarkers = True if len(tmcheck) == 0 else False
# Function: appendIncompleteTraceLog
# Description:
# [deprecated for kernel 3.15 or newer]
# Adds callgraph data which lacks trace event data. This is only
# for timelines generated from 3.15 or older
# Arguments:
# testruns: the array of Data objects obtained from parseKernelLog
def appendIncompleteTraceLog(testruns):
# create TestRun vessels for ftrace parsing
testcnt = len(testruns)
testidx = 0
testrun = []
for data in testruns:
testrun.append(TestRun(data))
# extract the callgraph and traceevent data
sysvals.vprint('Analyzing the ftrace data (%s)...' % \
os.path.basename(sysvals.ftracefile))
tp = TestProps()
tf = sysvals.openlog(sysvals.ftracefile, 'r')
data = 0
for line in tf:
# remove any latent carriage returns
line = line.replace('\r\n', '')
if tp.stampInfo(line, sysvals):
continue
# parse only valid lines, if this is not one move on
m = re.match(tp.ftrace_line_fmt, line)
if(not m):
continue
# gather the basic message data from the line
m_time = m.group('time')
m_pid = m.group('pid')
m_msg = m.group('msg')
if(tp.cgformat):
m_param3 = m.group('dur')
else:
m_param3 = 'traceevent'
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_param3)
pid = int(m_pid)
else:
continue
# the line should be a call, return, or event
if(not t.fcall and not t.freturn and not t.fevent):
continue
# look for the suspend start marker
if(t.startMarker()):
data = testrun[testidx].data
tp.parseStamp(data, sysvals)
data.setStart(t.time, t.name)
continue
if(not data):
continue
# find the end of resume
if(t.endMarker()):
data.setEnd(t.time, t.name)
testidx += 1
if(testidx >= testcnt):
break
continue
# trace event processing
if(t.fevent):
continue
# call/return processing
elif sysvals.usecallgraph:
# create a callgraph object for the data
if(pid not in testrun[testidx].ftemp):
testrun[testidx].ftemp[pid] = []
testrun[testidx].ftemp[pid].append(FTraceCallGraph(pid, sysvals))
# when the call is finished, see which device matches it
cg = testrun[testidx].ftemp[pid][-1]
res = cg.addLine(t)
if(res != 0):
testrun[testidx].ftemp[pid].append(FTraceCallGraph(pid, sysvals))
if(res == -1):
testrun[testidx].ftemp[pid][-1].addLine(t)
tf.close()
for test in testrun:
# add the callgraph data to the device hierarchy
for pid in test.ftemp:
for cg in test.ftemp[pid]:
if len(cg.list) < 1 or cg.invalid or (cg.end - cg.start == 0):
continue
if(not cg.postProcess()):
id = 'task %s cpu %s' % (pid, m.group('cpu'))
sysvals.vprint('Sanity check failed for '+\
id+', ignoring this callback')
continue
callstart = cg.start
callend = cg.end
for p in test.data.sortedPhases():
if(test.data.dmesg[p]['start'] <= callstart and
callstart <= test.data.dmesg[p]['end']):
list = test.data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
callstart <= dev['start'] and
callend >= dev['end']):
dev['ftrace'] = cg
break
# Function: parseTraceLog
# Description:
# Analyze an ftrace log output file generated from this app during
# the execution phase. Used when the ftrace log is the primary data source
# and includes the suspend_resume and device_pm_callback trace events
# The ftrace filename is taken from sysvals
# Output:
# An array of Data objects
def parseTraceLog(live=False):
sysvals.vprint('Analyzing the ftrace data (%s)...' % \
os.path.basename(sysvals.ftracefile))
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s does not exist' % sysvals.ftracefile)
if not live:
sysvals.setupAllKprobes()
ksuscalls = ['ksys_sync', 'pm_prepare_console']
krescalls = ['pm_restore_console']
tracewatch = ['irq_wakeup']
if sysvals.usekprobes:
tracewatch += ['sync_filesystems', 'freeze_processes', 'syscore_suspend',
'syscore_resume', 'resume_console', 'thaw_processes', 'CPU_ON',
'CPU_OFF', 'acpi_suspend']
# extract the callgraph and traceevent data
s2idle_enter = hwsus = False
tp = TestProps()
testruns, testdata = [], []
testrun, data, limbo = 0, 0, True
tf = sysvals.openlog(sysvals.ftracefile, 'r')
phase = 'suspend_prepare'
for line in tf:
# remove any latent carriage returns
line = line.replace('\r\n', '')
if tp.stampInfo(line, sysvals):
continue
# ignore all other commented lines
if line[0] == '#':
continue
# ftrace line: parse only valid lines
m = re.match(tp.ftrace_line_fmt, line)
if(not m):
continue
# gather the basic message data from the line
m_time = m.group('time')
m_proc = m.group('proc')
m_pid = m.group('pid')
m_msg = m.group('msg')
if(tp.cgformat):
m_param3 = m.group('dur')
else:
m_param3 = 'traceevent'
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_param3)
pid = int(m_pid)
else:
continue
# the line should be a call, return, or event
if(not t.fcall and not t.freturn and not t.fevent):
continue
# find the start of suspend
if(t.startMarker()):
data, limbo = Data(len(testdata)), False
testdata.append(data)
testrun = TestRun(data)
testruns.append(testrun)
tp.parseStamp(data, sysvals)
data.setStart(t.time, t.name)
data.first_suspend_prepare = True
phase = data.setPhase('suspend_prepare', t.time, True)
continue
if(not data or limbo):
continue
# process cpu exec line
if t.type == 'tracing_mark_write':
m = re.match(tp.procexecfmt, t.name)
if(m):
proclist = dict()
for ps in m.group('ps').split(','):
val = ps.split()
if not val:
continue
name = val[0].replace('--', '-')
proclist[name] = int(val[1])
data.pstl[t.time] = proclist
continue
# find the end of resume
if(t.endMarker()):
if data.tKernRes == 0:
data.tKernRes = t.time
data.handleEndMarker(t.time, t.name)
if(not sysvals.usetracemarkers):
# no trace markers? then quit and be sure to finish recording
# the event we used to trigger resume end
if('thaw_processes' in testrun.ttemp and len(testrun.ttemp['thaw_processes']) > 0):
# if an entry exists, assume this is its end
testrun.ttemp['thaw_processes'][-1]['end'] = t.time
limbo = True
continue
# trace event processing
if(t.fevent):
if(t.type == 'suspend_resume'):
# suspend_resume trace events have two types, begin and end
if(re.match('(?P<name>.*) begin$', t.name)):
isbegin = True
elif(re.match('(?P<name>.*) end$', t.name)):
isbegin = False
else:
continue
if '[' in t.name:
m = re.match('(?P<name>.*)\[.*', t.name)
else:
m = re.match('(?P<name>.*) .*', t.name)
name = m.group('name')
# ignore these events
if(name.split('[')[0] in tracewatch):
continue
# -- phase changes --
# start of kernel suspend
if(re.match('suspend_enter\[.*', t.name)):
if(isbegin and data.tKernSus == 0):
data.tKernSus = t.time
continue
# suspend_prepare start
elif(re.match('dpm_prepare\[.*', t.name)):
if isbegin and data.first_suspend_prepare:
data.first_suspend_prepare = False
if data.tKernSus == 0:
data.tKernSus = t.time
continue
phase = data.setPhase('suspend_prepare', t.time, isbegin)
continue
# suspend start
elif(re.match('dpm_suspend\[.*', t.name)):
phase = data.setPhase('suspend', t.time, isbegin)
continue
# suspend_late start
elif(re.match('dpm_suspend_late\[.*', t.name)):
phase = data.setPhase('suspend_late', t.time, isbegin)
continue
# suspend_noirq start
elif(re.match('dpm_suspend_noirq\[.*', t.name)):
phase = data.setPhase('suspend_noirq', t.time, isbegin)
continue
# suspend_machine/resume_machine
elif(re.match(tp.machinesuspend, t.name)):
lp = data.lastPhase()
if(isbegin):
hwsus = True
if lp.startswith('resume_machine'):
# trim out s2idle loops, track time trying to freeze
llp = data.lastPhase(2)
if llp.startswith('suspend_machine'):
if 'waking' not in data.dmesg[llp]:
data.dmesg[llp]['waking'] = [0, 0.0]
data.dmesg[llp]['waking'][0] += 1
data.dmesg[llp]['waking'][1] += \
t.time - data.dmesg[lp]['start']
data.currphase = ''
del data.dmesg[lp]
continue
phase = data.setPhase('suspend_machine', data.dmesg[lp]['end'], True)
data.setPhase(phase, t.time, False)
if data.tSuspended == 0:
data.tSuspended = t.time
else:
if lp.startswith('resume_machine'):
data.dmesg[lp]['end'] = t.time
continue
phase = data.setPhase('resume_machine', t.time, True)
if(sysvals.suspendmode in ['mem', 'disk']):
susp = phase.replace('resume', 'suspend')
if susp in data.dmesg:
data.dmesg[susp]['end'] = t.time
data.tSuspended = t.time
data.tResumed = t.time
continue
# resume_noirq start
elif(re.match('dpm_resume_noirq\[.*', t.name)):
phase = data.setPhase('resume_noirq', t.time, isbegin)
continue
# resume_early start
elif(re.match('dpm_resume_early\[.*', t.name)):
phase = data.setPhase('resume_early', t.time, isbegin)
continue
# resume start
elif(re.match('dpm_resume\[.*', t.name)):
phase = data.setPhase('resume', t.time, isbegin)
continue
# resume complete start
elif(re.match('dpm_complete\[.*', t.name)):
phase = data.setPhase('resume_complete', t.time, isbegin)
continue
# skip trace events inside devices calls
if(not data.isTraceEventOutsideDeviceCalls(pid, t.time)):
continue
# global events (outside device calls) are graphed
if(name not in testrun.ttemp):
testrun.ttemp[name] = []
# special handling for s2idle_enter
if name == 'machine_suspend':
if hwsus:
s2idle_enter = hwsus = False
elif s2idle_enter and not isbegin:
if(len(testrun.ttemp[name]) > 0):
testrun.ttemp[name][-1]['end'] = t.time
testrun.ttemp[name][-1]['loop'] += 1
elif not s2idle_enter and isbegin:
s2idle_enter = True
testrun.ttemp[name].append({'begin': t.time,
'end': t.time, 'pid': pid, 'loop': 0})
continue
if(isbegin):
# create a new list entry
testrun.ttemp[name].append(\
{'begin': t.time, 'end': t.time, 'pid': pid})
else:
if(len(testrun.ttemp[name]) > 0):
# if an entry exists, assume this is its end
testrun.ttemp[name][-1]['end'] = t.time
# device callback start
elif(t.type == 'device_pm_callback_start'):
if phase not in data.dmesg:
continue
m = re.match('(?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*',\
t.name);
if(not m):
continue
drv = m.group('drv')
n = m.group('d')
p = m.group('p')
if(n and p):
data.newAction(phase, n, pid, p, t.time, -1, drv)
if pid not in data.devpids:
data.devpids.append(pid)
# device callback finish
elif(t.type == 'device_pm_callback_end'):
if phase not in data.dmesg:
continue
m = re.match('(?P<drv>.*) (?P<d>.*), err.*', t.name);
if(not m):
continue
n = m.group('d')
dev = data.findDevice(phase, n)
if dev:
dev['length'] = t.time - dev['start']
dev['end'] = t.time
# kprobe event processing
elif(t.fkprobe):
kprobename = t.type
kprobedata = t.name
key = (kprobename, pid)
# displayname is generated from kprobe data
displayname = ''
if(t.fcall):
displayname = sysvals.kprobeDisplayName(kprobename, kprobedata)
if not displayname:
continue
if(key not in tp.ktemp):
tp.ktemp[key] = []
tp.ktemp[key].append({
'pid': pid,
'begin': t.time,
'end': -1,
'name': displayname,
'cdata': kprobedata,
'proc': m_proc,
})
# start of kernel resume
if(data.tKernSus == 0 and phase == 'suspend_prepare' \
and kprobename in ksuscalls):
data.tKernSus = t.time
elif(t.freturn):
if(key not in tp.ktemp) or len(tp.ktemp[key]) < 1:
continue
e = next((x for x in reversed(tp.ktemp[key]) if x['end'] < 0), 0)
if not e:
continue
e['end'] = t.time
e['rdata'] = kprobedata
# end of kernel resume
if(phase != 'suspend_prepare' and kprobename in krescalls):
if phase in data.dmesg:
data.dmesg[phase]['end'] = t.time
data.tKernRes = t.time
# callgraph processing
elif sysvals.usecallgraph:
# create a callgraph object for the data
key = (m_proc, pid)
if(key not in testrun.ftemp):
testrun.ftemp[key] = []
testrun.ftemp[key].append(FTraceCallGraph(pid, sysvals))
# when the call is finished, see which device matches it
cg = testrun.ftemp[key][-1]
res = cg.addLine(t)
if(res != 0):
testrun.ftemp[key].append(FTraceCallGraph(pid, sysvals))
if(res == -1):
testrun.ftemp[key][-1].addLine(t)
tf.close()
if len(testdata) < 1:
sysvals.vprint('WARNING: ftrace start marker is missing')
if data and not data.devicegroups:
sysvals.vprint('WARNING: ftrace end marker is missing')
data.handleEndMarker(t.time, t.name)
if sysvals.suspendmode == 'command':
for test in testruns:
for p in test.data.sortedPhases():
if p == 'suspend_prepare':
test.data.dmesg[p]['start'] = test.data.start
test.data.dmesg[p]['end'] = test.data.end
else:
test.data.dmesg[p]['start'] = test.data.end
test.data.dmesg[p]['end'] = test.data.end
test.data.tSuspended = test.data.end
test.data.tResumed = test.data.end
test.data.fwValid = False
# dev source and procmon events can be unreadable with mixed phase height
if sysvals.usedevsrc or sysvals.useprocmon:
sysvals.mixedphaseheight = False
# expand phase boundaries so there are no gaps
for data in testdata:
lp = data.sortedPhases()[0]
for p in data.sortedPhases():
if(p != lp and not ('machine' in p and 'machine' in lp)):
data.dmesg[lp]['end'] = data.dmesg[p]['start']
lp = p
for i in range(len(testruns)):
test = testruns[i]
data = test.data
# find the total time range for this test (begin, end)
tlb, tle = data.start, data.end
if i < len(testruns) - 1:
tle = testruns[i+1].data.start
# add the process usage data to the timeline
if sysvals.useprocmon:
data.createProcessUsageEvents()
# add the traceevent data to the device hierarchy
if(sysvals.usetraceevents):
# add actual trace funcs
for name in sorted(test.ttemp):
for event in test.ttemp[name]:
if event['end'] - event['begin'] <= 0:
continue
title = name
if name == 'machine_suspend' and 'loop' in event:
title = 's2idle_enter_%dx' % event['loop']
data.newActionGlobal(title, event['begin'], event['end'], event['pid'])
# add the kprobe based virtual tracefuncs as actual devices
for key in sorted(tp.ktemp):
name, pid = key
if name not in sysvals.tracefuncs:
continue
if pid not in data.devpids:
data.devpids.append(pid)
for e in tp.ktemp[key]:
kb, ke = e['begin'], e['end']
if ke - kb < 0.000001 or tlb > kb or tle <= kb:
continue
color = sysvals.kprobeColor(name)
data.newActionGlobal(e['name'], kb, ke, pid, color)
# add config base kprobes and dev kprobes
if sysvals.usedevsrc:
for key in sorted(tp.ktemp):
name, pid = key
if name in sysvals.tracefuncs or name not in sysvals.dev_tracefuncs:
continue
for e in tp.ktemp[key]:
kb, ke = e['begin'], e['end']
if ke - kb < 0.000001 or tlb > kb or tle <= kb:
continue
data.addDeviceFunctionCall(e['name'], name, e['proc'], pid, kb,
ke, e['cdata'], e['rdata'])
if sysvals.usecallgraph:
# add the callgraph data to the device hierarchy
sortlist = dict()
for key in sorted(test.ftemp):
proc, pid = key
for cg in test.ftemp[key]:
if len(cg.list) < 1 or cg.invalid or (cg.end - cg.start == 0):
continue
if(not cg.postProcess()):
id = 'task %s' % (pid)
sysvals.vprint('Sanity check failed for '+\
id+', ignoring this callback')
continue
# match cg data to devices
devname = ''
if sysvals.suspendmode != 'command':
devname = cg.deviceMatch(pid, data)
if not devname:
sortkey = '%f%f%d' % (cg.start, cg.end, pid)
sortlist[sortkey] = cg
elif len(cg.list) > 1000000 and cg.name != sysvals.ftopfunc:
sysvals.vprint('WARNING: the callgraph for %s is massive (%d lines)' %\
(devname, len(cg.list)))
# create blocks for orphan cg data
for sortkey in sorted(sortlist):
cg = sortlist[sortkey]
name = cg.name
if sysvals.isCallgraphFunc(name):
sysvals.vprint('Callgraph found for task %d: %.3fms, %s' % (cg.pid, (cg.end - cg.start)*1000, name))
cg.newActionFromFunction(data)
if sysvals.suspendmode == 'command':
return (testdata, '')
# fill in any missing phases
error = []
for data in testdata:
tn = '' if len(testdata) == 1 else ('%d' % (data.testnumber + 1))
terr = ''
phasedef = data.phasedef
lp = 'suspend_prepare'
for p in sorted(phasedef, key=lambda k:phasedef[k]['order']):
if p not in data.dmesg:
if not terr:
ph = p if 'machine' in p else lp
terr = '%s%s failed in %s phase' % (sysvals.suspendmode, tn, ph)
pprint('TEST%s FAILED: %s' % (tn, terr))
error.append(terr)
if data.tSuspended == 0:
data.tSuspended = data.dmesg[lp]['end']
if data.tResumed == 0:
data.tResumed = data.dmesg[lp]['end']
data.fwValid = False
sysvals.vprint('WARNING: phase "%s" is missing!' % p)
lp = p
if not terr and 'dev' in data.wifi and data.wifi['stat'] == 'timeout':
terr = '%s%s failed in wifi_resume <i>(%s %.0fs timeout)</i>' % \
(sysvals.suspendmode, tn, data.wifi['dev'], data.wifi['time'])
error.append(terr)
if not terr and data.enterfail:
pprint('test%s FAILED: enter %s failed with %s' % (tn, sysvals.suspendmode, data.enterfail))
terr = 'test%s failed to enter %s mode' % (tn, sysvals.suspendmode)
error.append(terr)
if data.tSuspended == 0:
data.tSuspended = data.tKernRes
if data.tResumed == 0:
data.tResumed = data.tSuspended
if(len(sysvals.devicefilter) > 0):
data.deviceFilter(sysvals.devicefilter)
data.fixupInitcallsThatDidntReturn()
if sysvals.usedevsrc:
data.optimizeDevSrc()
# x2: merge any overlapping devices between test runs
if sysvals.usedevsrc and len(testdata) > 1:
tc = len(testdata)
for i in range(tc - 1):
devlist = testdata[i].overflowDevices()
for j in range(i + 1, tc):
testdata[j].mergeOverlapDevices(devlist)
testdata[0].stitchTouchingThreads(testdata[1:])
return (testdata, ', '.join(error))
# Function: loadKernelLog
# Description:
# [deprecated for kernel 3.15.0 or newer]
# load the dmesg file into memory and fix up any ordering issues
# The dmesg filename is taken from sysvals
# Output:
# An array of empty Data objects with only their dmesgtext attributes set
def loadKernelLog():
sysvals.vprint('Analyzing the dmesg data (%s)...' % \
os.path.basename(sysvals.dmesgfile))
if(os.path.exists(sysvals.dmesgfile) == False):
doError('%s does not exist' % sysvals.dmesgfile)
# there can be multiple test runs in a single file
tp = TestProps()
tp.stamp = datetime.now().strftime('# suspend-%m%d%y-%H%M%S localhost mem unknown')
testruns = []
data = 0
lf = sysvals.openlog(sysvals.dmesgfile, 'r')
for line in lf:
line = line.replace('\r\n', '')
idx = line.find('[')
if idx > 1:
line = line[idx:]
if tp.stampInfo(line, sysvals):
continue
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
msg = m.group("msg")
if(re.match('PM: Syncing filesystems.*', msg)):
if(data):
testruns.append(data)
data = Data(len(testruns))
tp.parseStamp(data, sysvals)
if(not data):
continue
m = re.match('.* *(?P<k>[0-9]\.[0-9]{2}\.[0-9]-.*) .*', msg)
if(m):
sysvals.stamp['kernel'] = m.group('k')
m = re.match('PM: Preparing system for (?P<m>.*) sleep', msg)
if(m):
sysvals.stamp['mode'] = sysvals.suspendmode = m.group('m')
data.dmesgtext.append(line)
lf.close()
if data:
testruns.append(data)
if len(testruns) < 1:
doError('dmesg log has no suspend/resume data: %s' \
% sysvals.dmesgfile)
# fix lines with same timestamp/function with the call and return swapped
for data in testruns:
last = ''
for line in data.dmesgtext:
mc = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) calling '+\
'(?P<f>.*)\+ @ .*, parent: .*', line)
mr = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) call '+\
'(?P<f>.*)\+ returned .* after (?P<dt>.*) usecs', last)
if(mc and mr and (mc.group('t') == mr.group('t')) and
(mc.group('f') == mr.group('f'))):
i = data.dmesgtext.index(last)
j = data.dmesgtext.index(line)
data.dmesgtext[i] = line
data.dmesgtext[j] = last
last = line
return testruns
# Function: parseKernelLog
# Description:
# [deprecated for kernel 3.15.0 or newer]
# Analyse a dmesg log output file generated from this app during
# the execution phase. Create a set of device structures in memory
# for subsequent formatting in the html output file
# This call is only for legacy support on kernels where the ftrace
# data lacks the suspend_resume or device_pm_callbacks trace events.
# Arguments:
# data: an empty Data object (with dmesgtext) obtained from loadKernelLog
# Output:
# The filled Data object
def parseKernelLog(data):
phase = 'suspend_runtime'
if(data.fwValid):
sysvals.vprint('Firmware Suspend = %u ns, Firmware Resume = %u ns' % \
(data.fwSuspend, data.fwResume))
# dmesg phase match table
dm = {
'suspend_prepare': ['PM: Syncing filesystems.*'],
'suspend': ['PM: Entering [a-z]* sleep.*', 'Suspending console.*'],
'suspend_late': ['PM: suspend of devices complete after.*'],
'suspend_noirq': ['PM: late suspend of devices complete after.*'],
'suspend_machine': ['PM: noirq suspend of devices complete after.*'],
'resume_machine': ['ACPI: Low-level resume complete.*'],
'resume_noirq': ['ACPI: Waking up from system sleep state.*'],
'resume_early': ['PM: noirq resume of devices complete after.*'],
'resume': ['PM: early resume of devices complete after.*'],
'resume_complete': ['PM: resume of devices complete after.*'],
'post_resume': ['.*Restarting tasks \.\.\..*'],
}
if(sysvals.suspendmode == 'standby'):
dm['resume_machine'] = ['PM: Restoring platform NVS memory']
elif(sysvals.suspendmode == 'disk'):
dm['suspend_late'] = ['PM: freeze of devices complete after.*']
dm['suspend_noirq'] = ['PM: late freeze of devices complete after.*']
dm['suspend_machine'] = ['PM: noirq freeze of devices complete after.*']
dm['resume_machine'] = ['PM: Restoring platform NVS memory']
dm['resume_early'] = ['PM: noirq restore of devices complete after.*']
dm['resume'] = ['PM: early restore of devices complete after.*']
dm['resume_complete'] = ['PM: restore of devices complete after.*']
elif(sysvals.suspendmode == 'freeze'):
dm['resume_machine'] = ['ACPI: resume from mwait']
# action table (expected events that occur and show up in dmesg)
at = {
'sync_filesystems': {
'smsg': 'PM: Syncing filesystems.*',
'emsg': 'PM: Preparing system for mem sleep.*' },
'freeze_user_processes': {
'smsg': 'Freezing user space processes .*',
'emsg': 'Freezing remaining freezable tasks.*' },
'freeze_tasks': {
'smsg': 'Freezing remaining freezable tasks.*',
'emsg': 'PM: Entering (?P<mode>[a-z,A-Z]*) sleep.*' },
'ACPI prepare': {
'smsg': 'ACPI: Preparing to enter system sleep state.*',
'emsg': 'PM: Saving platform NVS memory.*' },
'PM vns': {
'smsg': 'PM: Saving platform NVS memory.*',
'emsg': 'Disabling non-boot CPUs .*' },
}
t0 = -1.0
cpu_start = -1.0
prevktime = -1.0
actions = dict()
for line in data.dmesgtext:
# parse each dmesg line into the time and message
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
val = m.group('ktime')
try:
ktime = float(val)
except:
continue
msg = m.group('msg')
# initialize data start to first line time
if t0 < 0:
data.setStart(ktime)
t0 = ktime
else:
continue
# check for a phase change line
phasechange = False
for p in dm:
for s in dm[p]:
if(re.match(s, msg)):
phasechange, phase = True, p
break
# hack for determining resume_machine end for freeze
if(not sysvals.usetraceevents and sysvals.suspendmode == 'freeze' \
and phase == 'resume_machine' and \
re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)):
data.setPhase(phase, ktime, False)
phase = 'resume_noirq'
data.setPhase(phase, ktime, True)
if phasechange:
if phase == 'suspend_prepare':
data.setPhase(phase, ktime, True)
data.setStart(ktime)
data.tKernSus = ktime
elif phase == 'suspend':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'suspend_late':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'suspend_noirq':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'suspend_machine':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'resume_machine':
lp = data.lastPhase()
if(sysvals.suspendmode in ['freeze', 'standby']):
data.tSuspended = prevktime
if lp:
data.setPhase(lp, prevktime, False)
else:
data.tSuspended = ktime
if lp:
data.setPhase(lp, prevktime, False)
data.tResumed = ktime
data.setPhase(phase, ktime, True)
elif phase == 'resume_noirq':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'resume_early':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'resume':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'resume_complete':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'post_resume':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setEnd(ktime)
data.tKernRes = ktime
break
# -- device callbacks --
if(phase in data.sortedPhases()):
# device init call
if(re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)):
sm = re.match('calling (?P<f>.*)\+ @ '+\
'(?P<n>.*), parent: (?P<p>.*)', msg);
f = sm.group('f')
n = sm.group('n')
p = sm.group('p')
if(f and n and p):
data.newAction(phase, f, int(n), p, ktime, -1, '')
# device init return
elif(re.match('call (?P<f>.*)\+ returned .* after '+\
'(?P<t>.*) usecs', msg)):
sm = re.match('call (?P<f>.*)\+ returned .* after '+\
'(?P<t>.*) usecs(?P<a>.*)', msg);
f = sm.group('f')
t = sm.group('t')
list = data.dmesg[phase]['list']
if(f in list):
dev = list[f]
dev['length'] = int(t)
dev['end'] = ktime
# if trace events are not available, these are better than nothing
if(not sysvals.usetraceevents):
# look for known actions
for a in sorted(at):
if(re.match(at[a]['smsg'], msg)):
if(a not in actions):
actions[a] = []
actions[a].append({'begin': ktime, 'end': ktime})
if(re.match(at[a]['emsg'], msg)):
if(a in actions):
actions[a][-1]['end'] = ktime
# now look for CPU on/off events
if(re.match('Disabling non-boot CPUs .*', msg)):
# start of first cpu suspend
cpu_start = ktime
elif(re.match('Enabling non-boot CPUs .*', msg)):
# start of first cpu resume
cpu_start = ktime
elif(re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)):
# end of a cpu suspend, start of the next
m = re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
elif(re.match('CPU(?P<cpu>[0-9]*) is up', msg)):
# end of a cpu resume, start of the next
m = re.match('CPU(?P<cpu>[0-9]*) is up', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
prevktime = ktime
data.initDevicegroups()
# fill in any missing phases
phasedef = data.phasedef
terr, lp = '', 'suspend_prepare'
for p in sorted(phasedef, key=lambda k:phasedef[k]['order']):
if p not in data.dmesg:
if not terr:
pprint('TEST FAILED: %s failed in %s phase' % (sysvals.suspendmode, lp))
terr = '%s failed in %s phase' % (sysvals.suspendmode, lp)
if data.tSuspended == 0:
data.tSuspended = data.dmesg[lp]['end']
if data.tResumed == 0:
data.tResumed = data.dmesg[lp]['end']
sysvals.vprint('WARNING: phase "%s" is missing!' % p)
lp = p
lp = data.sortedPhases()[0]
for p in data.sortedPhases():
if(p != lp and not ('machine' in p and 'machine' in lp)):
data.dmesg[lp]['end'] = data.dmesg[p]['start']
lp = p
if data.tSuspended == 0:
data.tSuspended = data.tKernRes
if data.tResumed == 0:
data.tResumed = data.tSuspended
# fill in any actions we've found
for name in sorted(actions):
for event in actions[name]:
data.newActionGlobal(name, event['begin'], event['end'])
if(len(sysvals.devicefilter) > 0):
data.deviceFilter(sysvals.devicefilter)
data.fixupInitcallsThatDidntReturn()
return True
def callgraphHTML(sv, hf, num, cg, title, color, devid):
html_func_top = '<article id="{0}" class="atop" style="background:{1}">\n<input type="checkbox" class="pf" id="f{2}" checked/><label for="f{2}">{3} {4}</label>\n'
html_func_start = '<article>\n<input type="checkbox" class="pf" id="f{0}" checked/><label for="f{0}">{1} {2}</label>\n'
html_func_end = '</article>\n'
html_func_leaf = '<article>{0} {1}</article>\n'
cgid = devid
if cg.id:
cgid += cg.id
cglen = (cg.end - cg.start) * 1000
if cglen < sv.mincglen:
return num
fmt = '<r>(%.3f ms @ '+sv.timeformat+' to '+sv.timeformat+')</r>'
flen = fmt % (cglen, cg.start, cg.end)
hf.write(html_func_top.format(cgid, color, num, title, flen))
num += 1
for line in cg.list:
if(line.length < 0.000000001):
flen = ''
else:
fmt = '<n>(%.3f ms @ '+sv.timeformat+')</n>'
flen = fmt % (line.length*1000, line.time)
if line.isLeaf():
hf.write(html_func_leaf.format(line.name, flen))
elif line.freturn:
hf.write(html_func_end)
else:
hf.write(html_func_start.format(num, line.name, flen))
num += 1
hf.write(html_func_end)
return num
def addCallgraphs(sv, hf, data):
hf.write('<section id="callgraphs" class="callgraph">\n')
# write out the ftrace data converted to html
num = 0
for p in data.sortedPhases():
if sv.cgphase and p != sv.cgphase:
continue
list = data.dmesg[p]['list']
for d in data.sortedDevices(p):
if len(sv.cgfilter) > 0 and d not in sv.cgfilter:
continue
dev = list[d]
color = 'white'
if 'color' in data.dmesg[p]:
color = data.dmesg[p]['color']
if 'color' in dev:
color = dev['color']
name = d if '[' not in d else d.split('[')[0]
if(d in sv.devprops):
name = sv.devprops[d].altName(d)
if 'drv' in dev and dev['drv']:
name += ' {%s}' % dev['drv']
if sv.suspendmode in suspendmodename:
name += ' '+p
if('ftrace' in dev):
cg = dev['ftrace']
if cg.name == sv.ftopfunc:
name = 'top level suspend/resume call'
num = callgraphHTML(sv, hf, num, cg,
name, color, dev['id'])
if('ftraces' in dev):
for cg in dev['ftraces']:
num = callgraphHTML(sv, hf, num, cg,
name+' → '+cg.name, color, dev['id'])
hf.write('\n\n </section>\n')
def summaryCSS(title, center=True):
tdcenter = 'text-align:center;' if center else ''
out = '<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\
<title>'+title+'</title>\n\
<style type=\'text/css\'>\n\
.stamp {width: 100%;text-align:center;background:#888;line-height:30px;color:white;font: 25px Arial;}\n\
table {width:100%;border-collapse: collapse;border:1px solid;}\n\
th {border: 1px solid black;background:#222;color:white;}\n\
td {font: 14px "Times New Roman";'+tdcenter+'}\n\
tr.head td {border: 1px solid black;background:#aaa;}\n\
tr.alt {background-color:#ddd;}\n\
tr.notice {color:red;}\n\
.minval {background-color:#BBFFBB;}\n\
.medval {background-color:#BBBBFF;}\n\
.maxval {background-color:#FFBBBB;}\n\
.head a {color:#000;text-decoration: none;}\n\
</style>\n</head>\n<body>\n'
return out
# Function: createHTMLSummarySimple
# Description:
# Create summary html file for a series of tests
# Arguments:
# testruns: array of Data objects from parseTraceLog
def createHTMLSummarySimple(testruns, htmlfile, title):
# write the html header first (html head, css code, up to body start)
html = summaryCSS('Summary - SleepGraph')
# extract the test data into list
list = dict()
tAvg, tMin, tMax, tMed = [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [dict(), dict()]
iMin, iMed, iMax = [0, 0], [0, 0], [0, 0]
num = 0
useturbo = usewifi = False
lastmode = ''
cnt = dict()
for data in sorted(testruns, key=lambda v:(v['mode'], v['host'], v['kernel'], v['time'])):
mode = data['mode']
if mode not in list:
list[mode] = {'data': [], 'avg': [0,0], 'min': [0,0], 'max': [0,0], 'med': [0,0]}
if lastmode and lastmode != mode and num > 0:
for i in range(2):
s = sorted(tMed[i])
list[lastmode]['med'][i] = s[int(len(s)//2)]
iMed[i] = tMed[i][list[lastmode]['med'][i]]
list[lastmode]['avg'] = [tAvg[0] / num, tAvg[1] / num]
list[lastmode]['min'] = tMin
list[lastmode]['max'] = tMax
list[lastmode]['idx'] = (iMin, iMed, iMax)
tAvg, tMin, tMax, tMed = [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [dict(), dict()]
iMin, iMed, iMax = [0, 0], [0, 0], [0, 0]
num = 0
pkgpc10 = syslpi = wifi = ''
if 'pkgpc10' in data and 'syslpi' in data:
pkgpc10, syslpi, useturbo = data['pkgpc10'], data['syslpi'], True
if 'wifi' in data:
wifi, usewifi = data['wifi'], True
res = data['result']
tVal = [float(data['suspend']), float(data['resume'])]
list[mode]['data'].append([data['host'], data['kernel'],
data['time'], tVal[0], tVal[1], data['url'], res,
data['issues'], data['sus_worst'], data['sus_worsttime'],
data['res_worst'], data['res_worsttime'], pkgpc10, syslpi, wifi])
idx = len(list[mode]['data']) - 1
if res.startswith('fail in'):
res = 'fail'
if res not in cnt:
cnt[res] = 1
else:
cnt[res] += 1
if res == 'pass':
for i in range(2):
tMed[i][tVal[i]] = idx
tAvg[i] += tVal[i]
if tMin[i] == 0 or tVal[i] < tMin[i]:
iMin[i] = idx
tMin[i] = tVal[i]
if tMax[i] == 0 or tVal[i] > tMax[i]:
iMax[i] = idx
tMax[i] = tVal[i]
num += 1
lastmode = mode
if lastmode and num > 0:
for i in range(2):
s = sorted(tMed[i])
list[lastmode]['med'][i] = s[int(len(s)//2)]
iMed[i] = tMed[i][list[lastmode]['med'][i]]
list[lastmode]['avg'] = [tAvg[0] / num, tAvg[1] / num]
list[lastmode]['min'] = tMin
list[lastmode]['max'] = tMax
list[lastmode]['idx'] = (iMin, iMed, iMax)
# group test header
desc = []
for ilk in sorted(cnt, reverse=True):
if cnt[ilk] > 0:
desc.append('%d %s' % (cnt[ilk], ilk))
html += '<div class="stamp">%s (%d tests: %s)</div>\n' % (title, len(testruns), ', '.join(desc))
th = '\t<th>{0}</th>\n'
td = '\t<td>{0}</td>\n'
tdh = '\t<td{1}>{0}</td>\n'
tdlink = '\t<td><a href="{0}">html</a></td>\n'
cols = 12
if useturbo:
cols += 2
if usewifi:
cols += 1
colspan = '%d' % cols
# table header
html += '<table>\n<tr>\n' + th.format('#') +\
th.format('Mode') + th.format('Host') + th.format('Kernel') +\
th.format('Test Time') + th.format('Result') + th.format('Issues') +\
th.format('Suspend') + th.format('Resume') +\
th.format('Worst Suspend Device') + th.format('SD Time') +\
th.format('Worst Resume Device') + th.format('RD Time')
if useturbo:
html += th.format('PkgPC10') + th.format('SysLPI')
if usewifi:
html += th.format('Wifi')
html += th.format('Detail')+'</tr>\n'
# export list into html
head = '<tr class="head"><td>{0}</td><td>{1}</td>'+\
'<td colspan='+colspan+' class="sus">Suspend Avg={2} '+\
'<span class=minval><a href="#s{10}min">Min={3}</a></span> '+\
'<span class=medval><a href="#s{10}med">Med={4}</a></span> '+\
'<span class=maxval><a href="#s{10}max">Max={5}</a></span> '+\
'Resume Avg={6} '+\
'<span class=minval><a href="#r{10}min">Min={7}</a></span> '+\
'<span class=medval><a href="#r{10}med">Med={8}</a></span> '+\
'<span class=maxval><a href="#r{10}max">Max={9}</a></span></td>'+\
'</tr>\n'
headnone = '<tr class="head"><td>{0}</td><td>{1}</td><td colspan='+\
colspan+'></td></tr>\n'
for mode in sorted(list):
# header line for each suspend mode
num = 0
tAvg, tMin, tMax, tMed = list[mode]['avg'], list[mode]['min'],\
list[mode]['max'], list[mode]['med']
count = len(list[mode]['data'])
if 'idx' in list[mode]:
iMin, iMed, iMax = list[mode]['idx']
html += head.format('%d' % count, mode.upper(),
'%.3f' % tAvg[0], '%.3f' % tMin[0], '%.3f' % tMed[0], '%.3f' % tMax[0],
'%.3f' % tAvg[1], '%.3f' % tMin[1], '%.3f' % tMed[1], '%.3f' % tMax[1],
mode.lower()
)
else:
iMin = iMed = iMax = [-1, -1, -1]
html += headnone.format('%d' % count, mode.upper())
for d in list[mode]['data']:
# row classes - alternate row color
rcls = ['alt'] if num % 2 == 1 else []
if d[6] != 'pass':
rcls.append('notice')
html += '<tr class="'+(' '.join(rcls))+'">\n' if len(rcls) > 0 else '<tr>\n'
# figure out if the line has sus or res highlighted
idx = list[mode]['data'].index(d)
tHigh = ['', '']
for i in range(2):
tag = 's%s' % mode if i == 0 else 'r%s' % mode
if idx == iMin[i]:
tHigh[i] = ' id="%smin" class=minval title="Minimum"' % tag
elif idx == iMax[i]:
tHigh[i] = ' id="%smax" class=maxval title="Maximum"' % tag
elif idx == iMed[i]:
tHigh[i] = ' id="%smed" class=medval title="Median"' % tag
html += td.format("%d" % (list[mode]['data'].index(d) + 1)) # row
html += td.format(mode) # mode
html += td.format(d[0]) # host
html += td.format(d[1]) # kernel
html += td.format(d[2]) # time
html += td.format(d[6]) # result
html += td.format(d[7]) # issues
html += tdh.format('%.3f ms' % d[3], tHigh[0]) if d[3] else td.format('') # suspend
html += tdh.format('%.3f ms' % d[4], tHigh[1]) if d[4] else td.format('') # resume
html += td.format(d[8]) # sus_worst
html += td.format('%.3f ms' % d[9]) if d[9] else td.format('') # sus_worst time
html += td.format(d[10]) # res_worst
html += td.format('%.3f ms' % d[11]) if d[11] else td.format('') # res_worst time
if useturbo:
html += td.format(d[12]) # pkg_pc10
html += td.format(d[13]) # syslpi
if usewifi:
html += td.format(d[14]) # wifi
html += tdlink.format(d[5]) if d[5] else td.format('') # url
html += '</tr>\n'
num += 1
# flush the data to file
hf = open(htmlfile, 'w')
hf.write(html+'</table>\n</body>\n</html>\n')
hf.close()
def createHTMLDeviceSummary(testruns, htmlfile, title):
html = summaryCSS('Device Summary - SleepGraph', False)
# create global device list from all tests
devall = dict()
for data in testruns:
host, url, devlist = data['host'], data['url'], data['devlist']
for type in devlist:
if type not in devall:
devall[type] = dict()
mdevlist, devlist = devall[type], data['devlist'][type]
for name in devlist:
length = devlist[name]
if name not in mdevlist:
mdevlist[name] = {'name': name, 'host': host,
'worst': length, 'total': length, 'count': 1,
'url': url}
else:
if length > mdevlist[name]['worst']:
mdevlist[name]['worst'] = length
mdevlist[name]['url'] = url
mdevlist[name]['host'] = host
mdevlist[name]['total'] += length
mdevlist[name]['count'] += 1
# generate the html
th = '\t<th>{0}</th>\n'
td = '\t<td align=center>{0}</td>\n'
tdr = '\t<td align=right>{0}</td>\n'
tdlink = '\t<td align=center><a href="{0}">html</a></td>\n'
limit = 1
for type in sorted(devall, reverse=True):
num = 0
devlist = devall[type]
# table header
html += '<div class="stamp">%s (%s devices > %d ms)</div><table>\n' % \
(title, type.upper(), limit)
html += '<tr>\n' + '<th align=right>Device Name</th>' +\
th.format('Average Time') + th.format('Count') +\
th.format('Worst Time') + th.format('Host (worst time)') +\
th.format('Link (worst time)') + '</tr>\n'
for name in sorted(devlist, key=lambda k:(devlist[k]['worst'], \
devlist[k]['total'], devlist[k]['name']), reverse=True):
data = devall[type][name]
data['average'] = data['total'] / data['count']
if data['average'] < limit:
continue
# row classes - alternate row color
rcls = ['alt'] if num % 2 == 1 else []
html += '<tr class="'+(' '.join(rcls))+'">\n' if len(rcls) > 0 else '<tr>\n'
html += tdr.format(data['name']) # name
html += td.format('%.3f ms' % data['average']) # average
html += td.format(data['count']) # count
html += td.format('%.3f ms' % data['worst']) # worst
html += td.format(data['host']) # host
html += tdlink.format(data['url']) # url
html += '</tr>\n'
num += 1
html += '</table>\n'
# flush the data to file
hf = open(htmlfile, 'w')
hf.write(html+'</body>\n</html>\n')
hf.close()
return devall
def createHTMLIssuesSummary(testruns, issues, htmlfile, title, extra=''):
multihost = len([e for e in issues if len(e['urls']) > 1]) > 0
html = summaryCSS('Issues Summary - SleepGraph', False)
total = len(testruns)
# generate the html
th = '\t<th>{0}</th>\n'
td = '\t<td align={0}>{1}</td>\n'
tdlink = '<a href="{1}">{0}</a>'
subtitle = '%d issues' % len(issues) if len(issues) > 0 else 'no issues'
html += '<div class="stamp">%s (%s)</div><table>\n' % (title, subtitle)
html += '<tr>\n' + th.format('Issue') + th.format('Count')
if multihost:
html += th.format('Hosts')
html += th.format('Tests') + th.format('Fail Rate') +\
th.format('First Instance') + '</tr>\n'
num = 0
for e in sorted(issues, key=lambda v:v['count'], reverse=True):
testtotal = 0
links = []
for host in sorted(e['urls']):
links.append(tdlink.format(host, e['urls'][host][0]))
testtotal += len(e['urls'][host])
rate = '%d/%d (%.2f%%)' % (testtotal, total, 100*float(testtotal)/float(total))
# row classes - alternate row color
rcls = ['alt'] if num % 2 == 1 else []
html += '<tr class="'+(' '.join(rcls))+'">\n' if len(rcls) > 0 else '<tr>\n'
html += td.format('left', e['line']) # issue
html += td.format('center', e['count']) # count
if multihost:
html += td.format('center', len(e['urls'])) # hosts
html += td.format('center', testtotal) # test count
html += td.format('center', rate) # test rate
html += td.format('center nowrap', '<br>'.join(links)) # links
html += '</tr>\n'
num += 1
# flush the data to file
hf = open(htmlfile, 'w')
hf.write(html+'</table>\n'+extra+'</body>\n</html>\n')
hf.close()
return issues
def ordinal(value):
suffix = 'th'
if value < 10 or value > 19:
if value % 10 == 1:
suffix = 'st'
elif value % 10 == 2:
suffix = 'nd'
elif value % 10 == 3:
suffix = 'rd'
return '%d%s' % (value, suffix)
# Function: createHTML
# Description:
# Create the output html file from the resident test data
# Arguments:
# testruns: array of Data objects from parseKernelLog or parseTraceLog
# Output:
# True if the html file was created, false if it failed
def createHTML(testruns, testfail):
if len(testruns) < 1:
pprint('ERROR: Not enough test data to build a timeline')
return
kerror = False
for data in testruns:
if data.kerror:
kerror = True
if(sysvals.suspendmode in ['freeze', 'standby']):
data.trimFreezeTime(testruns[-1].tSuspended)
else:
data.getMemTime()
# html function templates
html_error = '<div id="{1}" title="kernel error/warning" class="err" style="right:{0}%">{2}→</div>\n'
html_traceevent = '<div title="{0}" class="traceevent{6}" style="left:{1}%;top:{2}px;height:{3}px;width:{4}%;line-height:{3}px;{7}">{5}</div>\n'
html_cpuexec = '<div class="jiffie" style="left:{0}%;top:{1}px;height:{2}px;width:{3}%;background:{4};"></div>\n'
html_timetotal = '<table class="time1">\n<tr>'\
'<td class="green" title="{3}">{2} Suspend Time: <b>{0} ms</b></td>'\
'<td class="yellow" title="{4}">{2} Resume Time: <b>{1} ms</b></td>'\
'</tr>\n</table>\n'
html_timetotal2 = '<table class="time1">\n<tr>'\
'<td class="green" title="{4}">{3} Suspend Time: <b>{0} ms</b></td>'\
'<td class="gray" title="time spent in low-power mode with clock running">'+sysvals.suspendmode+' time: <b>{1} ms</b></td>'\
'<td class="yellow" title="{5}">{3} Resume Time: <b>{2} ms</b></td>'\
'</tr>\n</table>\n'
html_timetotal3 = '<table class="time1">\n<tr>'\
'<td class="green">Execution Time: <b>{0} ms</b></td>'\
'<td class="yellow">Command: <b>{1}</b></td>'\
'</tr>\n</table>\n'
html_fail = '<table class="testfail"><tr><td>{0}</td></tr></table>\n'
html_kdesc = '<td class="{3}" title="time spent in kernel execution">{0}Kernel {2}: {1} ms</td>'
html_fwdesc = '<td class="{3}" title="time spent in firmware">{0}Firmware {2}: {1} ms</td>'
html_wifdesc = '<td class="yellow" title="time for wifi to reconnect after resume complete ({2})">{0}Wifi Resume: {1}</td>'
# html format variables
scaleH = 20
if kerror:
scaleH = 40
# device timeline
devtl = Timeline(30, scaleH)
# write the test title and general info header
devtl.createHeader(sysvals, testruns[0].stamp)
# Generate the header for this timeline
for data in testruns:
tTotal = data.end - data.start
if(tTotal == 0):
doError('No timeline data')
if sysvals.suspendmode == 'command':
run_time = '%.0f' % (tTotal * 1000)
if sysvals.testcommand:
testdesc = sysvals.testcommand
else:
testdesc = 'unknown'
if(len(testruns) > 1):
testdesc = ordinal(data.testnumber+1)+' '+testdesc
thtml = html_timetotal3.format(run_time, testdesc)
devtl.html += thtml
continue
# typical full suspend/resume header
stot, rtot = sktime, rktime = data.getTimeValues()
ssrc, rsrc, testdesc, testdesc2 = ['kernel'], ['kernel'], 'Kernel', ''
if data.fwValid:
stot += (data.fwSuspend/1000000.0)
rtot += (data.fwResume/1000000.0)
ssrc.append('firmware')
rsrc.append('firmware')
testdesc = 'Total'
if 'time' in data.wifi and data.wifi['stat'] != 'timeout':
rtot += data.end - data.tKernRes + (data.wifi['time'] * 1000.0)
rsrc.append('wifi')
testdesc = 'Total'
suspend_time, resume_time = '%.3f' % stot, '%.3f' % rtot
stitle = 'time from kernel suspend start to %s mode [%s time]' % \
(sysvals.suspendmode, ' & '.join(ssrc))
rtitle = 'time from %s mode to kernel resume complete [%s time]' % \
(sysvals.suspendmode, ' & '.join(rsrc))
if(len(testruns) > 1):
testdesc = testdesc2 = ordinal(data.testnumber+1)
testdesc2 += ' '
if(len(data.tLow) == 0):
thtml = html_timetotal.format(suspend_time, \
resume_time, testdesc, stitle, rtitle)
else:
low_time = '+'.join(data.tLow)
thtml = html_timetotal2.format(suspend_time, low_time, \
resume_time, testdesc, stitle, rtitle)
devtl.html += thtml
if not data.fwValid and 'dev' not in data.wifi:
continue
# extra detail when the times come from multiple sources
thtml = '<table class="time2">\n<tr>'
thtml += html_kdesc.format(testdesc2, '%.3f'%sktime, 'Suspend', 'green')
if data.fwValid:
sftime = '%.3f'%(data.fwSuspend / 1000000.0)
rftime = '%.3f'%(data.fwResume / 1000000.0)
thtml += html_fwdesc.format(testdesc2, sftime, 'Suspend', 'green')
thtml += html_fwdesc.format(testdesc2, rftime, 'Resume', 'yellow')
thtml += html_kdesc.format(testdesc2, '%.3f'%rktime, 'Resume', 'yellow')
if 'time' in data.wifi:
if data.wifi['stat'] != 'timeout':
wtime = '%.0f ms'%(data.end - data.tKernRes + (data.wifi['time'] * 1000.0))
else:
wtime = 'TIMEOUT'
thtml += html_wifdesc.format(testdesc2, wtime, data.wifi['dev'])
thtml += '</tr>\n</table>\n'
devtl.html += thtml
if testfail:
devtl.html += html_fail.format(testfail)
# time scale for potentially multiple datasets
t0 = testruns[0].start
tMax = testruns[-1].end
tTotal = tMax - t0
# determine the maximum number of rows we need to draw
fulllist = []
threadlist = []
pscnt = 0
devcnt = 0
for data in testruns:
data.selectTimelineDevices('%f', tTotal, sysvals.mindevlen)
for group in data.devicegroups:
devlist = []
for phase in group:
for devname in sorted(data.tdevlist[phase]):
d = DevItem(data.testnumber, phase, data.dmesg[phase]['list'][devname])
devlist.append(d)
if d.isa('kth'):
threadlist.append(d)
else:
if d.isa('ps'):
pscnt += 1
else:
devcnt += 1
fulllist.append(d)
if sysvals.mixedphaseheight:
devtl.getPhaseRows(devlist)
if not sysvals.mixedphaseheight:
if len(threadlist) > 0 and len(fulllist) > 0:
if pscnt > 0 and devcnt > 0:
msg = 'user processes & device pm callbacks'
elif pscnt > 0:
msg = 'user processes'
else:
msg = 'device pm callbacks'
d = testruns[0].addHorizontalDivider(msg, testruns[-1].end)
fulllist.insert(0, d)
devtl.getPhaseRows(fulllist)
if len(threadlist) > 0:
d = testruns[0].addHorizontalDivider('asynchronous kernel threads', testruns[-1].end)
threadlist.insert(0, d)
devtl.getPhaseRows(threadlist, devtl.rows)
devtl.calcTotalRows()
# draw the full timeline
devtl.createZoomBox(sysvals.suspendmode, len(testruns))
for data in testruns:
# draw each test run and block chronologically
phases = {'suspend':[],'resume':[]}
for phase in data.sortedPhases():
if data.dmesg[phase]['start'] >= data.tSuspended:
phases['resume'].append(phase)
else:
phases['suspend'].append(phase)
# now draw the actual timeline blocks
for dir in phases:
# draw suspend and resume blocks separately
bname = '%s%d' % (dir[0], data.testnumber)
if dir == 'suspend':
m0 = data.start
mMax = data.tSuspended
left = '%f' % (((m0-t0)*100.0)/tTotal)
else:
m0 = data.tSuspended
mMax = data.end
# in an x2 run, remove any gap between blocks
if len(testruns) > 1 and data.testnumber == 0:
mMax = testruns[1].start
left = '%f' % ((((m0-t0)*100.0)+sysvals.srgap/2)/tTotal)
mTotal = mMax - m0
# if a timeline block is 0 length, skip altogether
if mTotal == 0:
continue
width = '%f' % (((mTotal*100.0)-sysvals.srgap/2)/tTotal)
devtl.html += devtl.html_tblock.format(bname, left, width, devtl.scaleH)
for b in phases[dir]:
# draw the phase color background
phase = data.dmesg[b]
length = phase['end']-phase['start']
left = '%f' % (((phase['start']-m0)*100.0)/mTotal)
width = '%f' % ((length*100.0)/mTotal)
devtl.html += devtl.html_phase.format(left, width, \
'%.3f'%devtl.scaleH, '%.3f'%devtl.bodyH, \
data.dmesg[b]['color'], '')
for e in data.errorinfo[dir]:
# draw red lines for any kernel errors found
type, t, idx1, idx2 = e
id = '%d_%d' % (idx1, idx2)
right = '%f' % (((mMax-t)*100.0)/mTotal)
devtl.html += html_error.format(right, id, type)
for b in phases[dir]:
# draw the devices for this phase
phaselist = data.dmesg[b]['list']
for d in sorted(data.tdevlist[b]):
dname = d if ('[' not in d or 'CPU' in d) else d.split('[')[0]
name, dev = dname, phaselist[d]
drv = xtraclass = xtrainfo = xtrastyle = ''
if 'htmlclass' in dev:
xtraclass = dev['htmlclass']
if 'color' in dev:
xtrastyle = 'background:%s;' % dev['color']
if(d in sysvals.devprops):
name = sysvals.devprops[d].altName(d)
xtraclass = sysvals.devprops[d].xtraClass()
xtrainfo = sysvals.devprops[d].xtraInfo()
elif xtraclass == ' kth':
xtrainfo = ' kernel_thread'
if('drv' in dev and dev['drv']):
drv = ' {%s}' % dev['drv']
rowheight = devtl.phaseRowHeight(data.testnumber, b, dev['row'])
rowtop = devtl.phaseRowTop(data.testnumber, b, dev['row'])
top = '%.3f' % (rowtop + devtl.scaleH)
left = '%f' % (((dev['start']-m0)*100)/mTotal)
width = '%f' % (((dev['end']-dev['start'])*100)/mTotal)
length = ' (%0.3f ms) ' % ((dev['end']-dev['start'])*1000)
title = name+drv+xtrainfo+length
if sysvals.suspendmode == 'command':
title += sysvals.testcommand
elif xtraclass == ' ps':
if 'suspend' in b:
title += 'pre_suspend_process'
else:
title += 'post_resume_process'
else:
title += b
devtl.html += devtl.html_device.format(dev['id'], \
title, left, top, '%.3f'%rowheight, width, \
dname+drv, xtraclass, xtrastyle)
if('cpuexec' in dev):
for t in sorted(dev['cpuexec']):
start, end = t
j = float(dev['cpuexec'][t]) / 5
if j > 1.0:
j = 1.0
height = '%.3f' % (rowheight/3)
top = '%.3f' % (rowtop + devtl.scaleH + 2*rowheight/3)
left = '%f' % (((start-m0)*100)/mTotal)
width = '%f' % ((end-start)*100/mTotal)
color = 'rgba(255, 0, 0, %f)' % j
devtl.html += \
html_cpuexec.format(left, top, height, width, color)
if('src' not in dev):
continue
# draw any trace events for this device
for e in dev['src']:
if e.length == 0:
continue
height = '%.3f' % devtl.rowH
top = '%.3f' % (rowtop + devtl.scaleH + (e.row*devtl.rowH))
left = '%f' % (((e.time-m0)*100)/mTotal)
width = '%f' % (e.length*100/mTotal)
xtrastyle = ''
if e.color:
xtrastyle = 'background:%s;' % e.color
devtl.html += \
html_traceevent.format(e.title(), \
left, top, height, width, e.text(), '', xtrastyle)
# draw the time scale, try to make the number of labels readable
devtl.createTimeScale(m0, mMax, tTotal, dir)
devtl.html += '</div>\n'
# timeline is finished
devtl.html += '</div>\n</div>\n'
# draw a legend which describes the phases by color
if sysvals.suspendmode != 'command':
phasedef = testruns[-1].phasedef
devtl.html += '<div class="legend">\n'
pdelta = 100.0/len(phasedef.keys())
pmargin = pdelta / 4.0
for phase in sorted(phasedef, key=lambda k:phasedef[k]['order']):
id, p = '', phasedef[phase]
for word in phase.split('_'):
id += word[0]
order = '%.2f' % ((p['order'] * pdelta) + pmargin)
name = phase.replace('_', ' ')
devtl.html += devtl.html_legend.format(order, p['color'], name, id)
devtl.html += '</div>\n'
hf = open(sysvals.htmlfile, 'w')
addCSS(hf, sysvals, len(testruns), kerror)
# write the device timeline
hf.write(devtl.html)
hf.write('<div id="devicedetailtitle"></div>\n')
hf.write('<div id="devicedetail" style="display:none;">\n')
# draw the colored boxes for the device detail section
for data in testruns:
hf.write('<div id="devicedetail%d">\n' % data.testnumber)
pscolor = 'linear-gradient(to top left, #ccc, #eee)'
hf.write(devtl.html_phaselet.format('pre_suspend_process', \
'0', '0', pscolor))
for b in data.sortedPhases():
phase = data.dmesg[b]
length = phase['end']-phase['start']
left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal)
width = '%.3f' % ((length*100.0)/tTotal)
hf.write(devtl.html_phaselet.format(b, left, width, \
data.dmesg[b]['color']))
hf.write(devtl.html_phaselet.format('post_resume_process', \
'0', '0', pscolor))
if sysvals.suspendmode == 'command':
hf.write(devtl.html_phaselet.format('cmdexec', '0', '0', pscolor))
hf.write('</div>\n')
hf.write('</div>\n')
# write the ftrace data (callgraph)
if sysvals.cgtest >= 0 and len(testruns) > sysvals.cgtest:
data = testruns[sysvals.cgtest]
else:
data = testruns[-1]
if sysvals.usecallgraph:
addCallgraphs(sysvals, hf, data)
# add the test log as a hidden div
if sysvals.testlog and sysvals.logmsg:
hf.write('<div id="testlog" style="display:none;">\n'+sysvals.logmsg+'</div>\n')
# add the dmesg log as a hidden div
if sysvals.dmesglog and sysvals.dmesgfile:
hf.write('<div id="dmesglog" style="display:none;">\n')
lf = sysvals.openlog(sysvals.dmesgfile, 'r')
for line in lf:
line = line.replace('<', '<').replace('>', '>')
hf.write(line)
lf.close()
hf.write('</div>\n')
# add the ftrace log as a hidden div
if sysvals.ftracelog and sysvals.ftracefile:
hf.write('<div id="ftracelog" style="display:none;">\n')
lf = sysvals.openlog(sysvals.ftracefile, 'r')
for line in lf:
hf.write(line)
lf.close()
hf.write('</div>\n')
# write the footer and close
addScriptCode(hf, testruns)
hf.write('</body>\n</html>\n')
hf.close()
return True
def addCSS(hf, sv, testcount=1, kerror=False, extra=''):
kernel = sv.stamp['kernel']
host = sv.hostname[0].upper()+sv.hostname[1:]
mode = sv.suspendmode
if sv.suspendmode in suspendmodename:
mode = suspendmodename[sv.suspendmode]
title = host+' '+mode+' '+kernel
# various format changes by flags
cgchk = 'checked'
cgnchk = 'not(:checked)'
if sv.cgexp:
cgchk = 'not(:checked)'
cgnchk = 'checked'
hoverZ = 'z-index:8;'
if sv.usedevsrc:
hoverZ = ''
devlistpos = 'absolute'
if testcount > 1:
devlistpos = 'relative'
scaleTH = 20
if kerror:
scaleTH = 60
# write the html header first (html head, css code, up to body start)
html_header = '<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\
<title>'+title+'</title>\n\
<style type=\'text/css\'>\n\
body {overflow-y:scroll;}\n\
.stamp {width:100%;text-align:center;background:gray;line-height:30px;color:white;font:25px Arial;}\n\
.stamp.sysinfo {font:10px Arial;}\n\
.callgraph {margin-top:30px;box-shadow:5px 5px 20px black;}\n\
.callgraph article * {padding-left:28px;}\n\
h1 {color:black;font:bold 30px Times;}\n\
t0 {color:black;font:bold 30px Times;}\n\
t1 {color:black;font:30px Times;}\n\
t2 {color:black;font:25px Times;}\n\
t3 {color:black;font:20px Times;white-space:nowrap;}\n\
t4 {color:black;font:bold 30px Times;line-height:60px;white-space:nowrap;}\n\
cS {font:bold 13px Times;}\n\
table {width:100%;}\n\
.gray {background:rgba(80,80,80,0.1);}\n\
.green {background:rgba(204,255,204,0.4);}\n\
.purple {background:rgba(128,0,128,0.2);}\n\
.yellow {background:rgba(255,255,204,0.4);}\n\
.blue {background:rgba(169,208,245,0.4);}\n\
.time1 {font:22px Arial;border:1px solid;}\n\
.time2 {font:15px Arial;border-bottom:1px solid;border-left:1px solid;border-right:1px solid;}\n\
.testfail {font:bold 22px Arial;color:red;border:1px dashed;}\n\
td {text-align:center;}\n\
r {color:#500000;font:15px Tahoma;}\n\
n {color:#505050;font:15px Tahoma;}\n\
.tdhl {color:red;}\n\
.hide {display:none;}\n\
.pf {display:none;}\n\
.pf:'+cgchk+' + label {background:url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/><rect x="8" y="4" width="2" height="10" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\
.pf:'+cgnchk+' ~ label {background:url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\
.pf:'+cgchk+' ~ *:not(:nth-child(2)) {display:none;}\n\
.zoombox {position:relative;width:100%;overflow-x:scroll;-webkit-user-select:none;-moz-user-select:none;user-select:none;}\n\
.timeline {position:relative;font-size:14px;cursor:pointer;width:100%; overflow:hidden;background:linear-gradient(#cccccc, white);}\n\
.thread {position:absolute;height:0%;overflow:hidden;z-index:7;line-height:30px;font-size:14px;border:1px solid;text-align:center;white-space:nowrap;}\n\
.thread.ps {border-radius:3px;background:linear-gradient(to top, #ccc, #eee);}\n\
.thread:hover {background:white;border:1px solid red;'+hoverZ+'}\n\
.thread.sec,.thread.sec:hover {background:black;border:0;color:white;line-height:15px;font-size:10px;}\n\
.hover {background:white;border:1px solid red;'+hoverZ+'}\n\
.hover.sync {background:white;}\n\
.hover.bg,.hover.kth,.hover.sync,.hover.ps {background:white;}\n\
.jiffie {position:absolute;pointer-events: none;z-index:8;}\n\
.traceevent {position:absolute;font-size:10px;z-index:7;overflow:hidden;color:black;text-align:center;white-space:nowrap;border-radius:5px;border:1px solid black;background:linear-gradient(to bottom right,#CCC,#969696);}\n\
.traceevent:hover {color:white;font-weight:bold;border:1px solid white;}\n\
.phase {position:absolute;overflow:hidden;border:0px;text-align:center;}\n\
.phaselet {float:left;overflow:hidden;border:0px;text-align:center;min-height:100px;font-size:24px;}\n\
.t {position:absolute;line-height:'+('%d'%scaleTH)+'px;pointer-events:none;top:0;height:100%;border-right:1px solid black;z-index:6;}\n\
.err {position:absolute;top:0%;height:100%;border-right:3px solid red;color:red;font:bold 14px Times;line-height:18px;}\n\
.legend {position:relative; width:100%; height:40px; text-align:center;margin-bottom:20px}\n\
.legend .square {position:absolute;cursor:pointer;top:10px; width:0px;height:20px;border:1px solid;padding-left:20px;}\n\
button {height:40px;width:200px;margin-bottom:20px;margin-top:20px;font-size:24px;}\n\
.btnfmt {position:relative;float:right;height:25px;width:auto;margin-top:3px;margin-bottom:0;font-size:10px;text-align:center;}\n\
.devlist {position:'+devlistpos+';width:190px;}\n\
a:link {color:white;text-decoration:none;}\n\
a:visited {color:white;}\n\
a:hover {color:white;}\n\
a:active {color:white;}\n\
.version {position:relative;float:left;color:white;font-size:10px;line-height:30px;margin-left:10px;}\n\
#devicedetail {min-height:100px;box-shadow:5px 5px 20px black;}\n\
.tblock {position:absolute;height:100%;background:#ddd;}\n\
.tback {position:absolute;width:100%;background:linear-gradient(#ccc, #ddd);}\n\
.bg {z-index:1;}\n\
'+extra+'\
</style>\n</head>\n<body>\n'
hf.write(html_header)
# Function: addScriptCode
# Description:
# Adds the javascript code to the output html
# Arguments:
# hf: the open html file pointer
# testruns: array of Data objects from parseKernelLog or parseTraceLog
def addScriptCode(hf, testruns):
t0 = testruns[0].start * 1000
tMax = testruns[-1].end * 1000
# create an array in javascript memory with the device details
detail = ' var devtable = [];\n'
for data in testruns:
topo = data.deviceTopology()
detail += ' devtable[%d] = "%s";\n' % (data.testnumber, topo)
detail += ' var bounds = [%f,%f];\n' % (t0, tMax)
# add the code which will manipulate the data in the browser
script_code = \
'<script type="text/javascript">\n'+detail+\
' var resolution = -1;\n'\
' var dragval = [0, 0];\n'\
' function redrawTimescale(t0, tMax, tS) {\n'\
' var rline = \'<div class="t" style="left:0;border-left:1px solid black;border-right:0;">\';\n'\
' var tTotal = tMax - t0;\n'\
' var list = document.getElementsByClassName("tblock");\n'\
' for (var i = 0; i < list.length; i++) {\n'\
' var timescale = list[i].getElementsByClassName("timescale")[0];\n'\
' var m0 = t0 + (tTotal*parseFloat(list[i].style.left)/100);\n'\
' var mTotal = tTotal*parseFloat(list[i].style.width)/100;\n'\
' var mMax = m0 + mTotal;\n'\
' var html = "";\n'\
' var divTotal = Math.floor(mTotal/tS) + 1;\n'\
' if(divTotal > 1000) continue;\n'\
' var divEdge = (mTotal - tS*(divTotal-1))*100/mTotal;\n'\
' var pos = 0.0, val = 0.0;\n'\
' for (var j = 0; j < divTotal; j++) {\n'\
' var htmlline = "";\n'\
' var mode = list[i].id[5];\n'\
' if(mode == "s") {\n'\
' pos = 100 - (((j)*tS*100)/mTotal) - divEdge;\n'\
' val = (j-divTotal+1)*tS;\n'\
' if(j == divTotal - 1)\n'\
' htmlline = \'<div class="t" style="right:\'+pos+\'%"><cS>S→</cS></div>\';\n'\
' else\n'\
' htmlline = \'<div class="t" style="right:\'+pos+\'%">\'+val+\'ms</div>\';\n'\
' } else {\n'\
' pos = 100 - (((j)*tS*100)/mTotal);\n'\
' val = (j)*tS;\n'\
' htmlline = \'<div class="t" style="right:\'+pos+\'%">\'+val+\'ms</div>\';\n'\
' if(j == 0)\n'\
' if(mode == "r")\n'\
' htmlline = rline+"<cS>←R</cS></div>";\n'\
' else\n'\
' htmlline = rline+"<cS>0ms</div>";\n'\
' }\n'\
' html += htmlline;\n'\
' }\n'\
' timescale.innerHTML = html;\n'\
' }\n'\
' }\n'\
' function zoomTimeline() {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var zoombox = document.getElementById("dmesgzoombox");\n'\
' var left = zoombox.scrollLeft;\n'\
' var val = parseFloat(dmesg.style.width);\n'\
' var newval = 100;\n'\
' var sh = window.outerWidth / 2;\n'\
' if(this.id == "zoomin") {\n'\
' newval = val * 1.2;\n'\
' if(newval > 910034) newval = 910034;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((left + sh) * newval / val) - sh;\n'\
' } else if (this.id == "zoomout") {\n'\
' newval = val / 1.2;\n'\
' if(newval < 100) newval = 100;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((left + sh) * newval / val) - sh;\n'\
' } else {\n'\
' zoombox.scrollLeft = 0;\n'\
' dmesg.style.width = "100%";\n'\
' }\n'\
' var tS = [10000, 5000, 2000, 1000, 500, 200, 100, 50, 20, 10, 5, 2, 1];\n'\
' var t0 = bounds[0];\n'\
' var tMax = bounds[1];\n'\
' var tTotal = tMax - t0;\n'\
' var wTotal = tTotal * 100.0 / newval;\n'\
' var idx = 7*window.innerWidth/1100;\n'\
' for(var i = 0; (i < tS.length)&&((wTotal / tS[i]) < idx); i++);\n'\
' if(i >= tS.length) i = tS.length - 1;\n'\
' if(tS[i] == resolution) return;\n'\
' resolution = tS[i];\n'\
' redrawTimescale(t0, tMax, tS[i]);\n'\
' }\n'\
' function deviceName(title) {\n'\
' var name = title.slice(0, title.indexOf(" ("));\n'\
' return name;\n'\
' }\n'\
' function deviceHover() {\n'\
' var name = deviceName(this.title);\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' var cpu = -1;\n'\
' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(7));\n'\
' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(8));\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dname = deviceName(dev[i].title);\n'\
' var cname = dev[i].className.slice(dev[i].className.indexOf("thread"));\n'\
' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
' (name == dname))\n'\
' {\n'\
' dev[i].className = "hover "+cname;\n'\
' } else {\n'\
' dev[i].className = cname;\n'\
' }\n'\
' }\n'\
' }\n'\
' function deviceUnhover() {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dev[i].className = dev[i].className.slice(dev[i].className.indexOf("thread"));\n'\
' }\n'\
' }\n'\
' function deviceTitle(title, total, cpu) {\n'\
' var prefix = "Total";\n'\
' if(total.length > 3) {\n'\
' prefix = "Average";\n'\
' total[1] = (total[1]+total[3])/2;\n'\
' total[2] = (total[2]+total[4])/2;\n'\
' }\n'\
' var devtitle = document.getElementById("devicedetailtitle");\n'\
' var name = deviceName(title);\n'\
' if(cpu >= 0) name = "CPU"+cpu;\n'\
' var driver = "";\n'\
' var tS = "<t2>(</t2>";\n'\
' var tR = "<t2>)</t2>";\n'\
' if(total[1] > 0)\n'\
' tS = "<t2>("+prefix+" Suspend:</t2><t0> "+total[1].toFixed(3)+" ms</t0> ";\n'\
' if(total[2] > 0)\n'\
' tR = " <t2>"+prefix+" Resume:</t2><t0> "+total[2].toFixed(3)+" ms<t2>)</t2></t0>";\n'\
' var s = title.indexOf("{");\n'\
' var e = title.indexOf("}");\n'\
' if((s >= 0) && (e >= 0))\n'\
' driver = title.slice(s+1, e) + " <t1>@</t1> ";\n'\
' if(total[1] > 0 && total[2] > 0)\n'\
' devtitle.innerHTML = "<t0>"+driver+name+"</t0> "+tS+tR;\n'\
' else\n'\
' devtitle.innerHTML = "<t0>"+title+"</t0>";\n'\
' return name;\n'\
' }\n'\
' function deviceDetail() {\n'\
' var devinfo = document.getElementById("devicedetail");\n'\
' devinfo.style.display = "block";\n'\
' var name = deviceName(this.title);\n'\
' var cpu = -1;\n'\
' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(7));\n'\
' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(8));\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' var idlist = [];\n'\
' var pdata = [[]];\n'\
' if(document.getElementById("devicedetail1"))\n'\
' pdata = [[], []];\n'\
' var pd = pdata[0];\n'\
' var total = [0.0, 0.0, 0.0];\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dname = deviceName(dev[i].title);\n'\
' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
' (name == dname))\n'\
' {\n'\
' idlist[idlist.length] = dev[i].id;\n'\
' var tidx = 1;\n'\
' if(dev[i].id[0] == "a") {\n'\
' pd = pdata[0];\n'\
' } else {\n'\
' if(pdata.length == 1) pdata[1] = [];\n'\
' if(total.length == 3) total[3]=total[4]=0.0;\n'\
' pd = pdata[1];\n'\
' tidx = 3;\n'\
' }\n'\
' var info = dev[i].title.split(" ");\n'\
' var pname = info[info.length-1];\n'\
' pd[pname] = parseFloat(info[info.length-3].slice(1));\n'\
' total[0] += pd[pname];\n'\
' if(pname.indexOf("suspend") >= 0)\n'\
' total[tidx] += pd[pname];\n'\
' else\n'\
' total[tidx+1] += pd[pname];\n'\
' }\n'\
' }\n'\
' var devname = deviceTitle(this.title, total, cpu);\n'\
' var left = 0.0;\n'\
' for (var t = 0; t < pdata.length; t++) {\n'\
' pd = pdata[t];\n'\
' devinfo = document.getElementById("devicedetail"+t);\n'\
' var phases = devinfo.getElementsByClassName("phaselet");\n'\
' for (var i = 0; i < phases.length; i++) {\n'\
' if(phases[i].id in pd) {\n'\
' var w = 100.0*pd[phases[i].id]/total[0];\n'\
' var fs = 32;\n'\
' if(w < 8) fs = 4*w | 0;\n'\
' var fs2 = fs*3/4;\n'\
' phases[i].style.width = w+"%";\n'\
' phases[i].style.left = left+"%";\n'\
' phases[i].title = phases[i].id+" "+pd[phases[i].id]+" ms";\n'\
' left += w;\n'\
' var time = "<t4 style=\\"font-size:"+fs+"px\\">"+pd[phases[i].id]+" ms<br></t4>";\n'\
' var pname = "<t3 style=\\"font-size:"+fs2+"px\\">"+phases[i].id.replace(new RegExp("_", "g"), " ")+"</t3>";\n'\
' phases[i].innerHTML = time+pname;\n'\
' } else {\n'\
' phases[i].style.width = "0%";\n'\
' phases[i].style.left = left+"%";\n'\
' }\n'\
' }\n'\
' }\n'\
' if(typeof devstats !== \'undefined\')\n'\
' callDetail(this.id, this.title);\n'\
' var cglist = document.getElementById("callgraphs");\n'\
' if(!cglist) return;\n'\
' var cg = cglist.getElementsByClassName("atop");\n'\
' if(cg.length < 10) return;\n'\
' for (var i = 0; i < cg.length; i++) {\n'\
' cgid = cg[i].id.split("x")[0]\n'\
' if(idlist.indexOf(cgid) >= 0) {\n'\
' cg[i].style.display = "block";\n'\
' } else {\n'\
' cg[i].style.display = "none";\n'\
' }\n'\
' }\n'\
' }\n'\
' function callDetail(devid, devtitle) {\n'\
' if(!(devid in devstats) || devstats[devid].length < 1)\n'\
' return;\n'\
' var list = devstats[devid];\n'\
' var tmp = devtitle.split(" ");\n'\
' var name = tmp[0], phase = tmp[tmp.length-1];\n'\
' var dd = document.getElementById(phase);\n'\
' var total = parseFloat(tmp[1].slice(1));\n'\
' var mlist = [];\n'\
' var maxlen = 0;\n'\
' var info = []\n'\
' for(var i in list) {\n'\
' if(list[i][0] == "@") {\n'\
' info = list[i].split("|");\n'\
' continue;\n'\
' }\n'\
' var tmp = list[i].split("|");\n'\
' var t = parseFloat(tmp[0]), f = tmp[1], c = parseInt(tmp[2]);\n'\
' var p = (t*100.0/total).toFixed(2);\n'\
' mlist[mlist.length] = [f, c, t.toFixed(2), p+"%"];\n'\
' if(f.length > maxlen)\n'\
' maxlen = f.length;\n'\
' }\n'\
' var pad = 5;\n'\
' if(mlist.length == 0) pad = 30;\n'\
' var html = \'<div style="padding-top:\'+pad+\'px"><t3> <b>\'+name+\':</b>\';\n'\
' if(info.length > 2)\n'\
' html += " start=<b>"+info[1]+"</b>, end=<b>"+info[2]+"</b>";\n'\
' if(info.length > 3)\n'\
' html += ", length<i>(w/o overhead)</i>=<b>"+info[3]+" ms</b>";\n'\
' if(info.length > 4)\n'\
' html += ", return=<b>"+info[4]+"</b>";\n'\
' html += "</t3></div>";\n'\
' if(mlist.length > 0) {\n'\
' html += \'<table class=fstat style="padding-top:\'+(maxlen*5)+\'px;"><tr><th>Function</th>\';\n'\
' for(var i in mlist)\n'\
' html += "<td class=vt>"+mlist[i][0]+"</td>";\n'\
' html += "</tr><tr><th>Calls</th>";\n'\
' for(var i in mlist)\n'\
' html += "<td>"+mlist[i][1]+"</td>";\n'\
' html += "</tr><tr><th>Time(ms)</th>";\n'\
' for(var i in mlist)\n'\
' html += "<td>"+mlist[i][2]+"</td>";\n'\
' html += "</tr><tr><th>Percent</th>";\n'\
' for(var i in mlist)\n'\
' html += "<td>"+mlist[i][3]+"</td>";\n'\
' html += "</tr></table>";\n'\
' }\n'\
' dd.innerHTML = html;\n'\
' var height = (maxlen*5)+100;\n'\
' dd.style.height = height+"px";\n'\
' document.getElementById("devicedetail").style.height = height+"px";\n'\
' }\n'\
' function callSelect() {\n'\
' var cglist = document.getElementById("callgraphs");\n'\
' if(!cglist) return;\n'\
' var cg = cglist.getElementsByClassName("atop");\n'\
' for (var i = 0; i < cg.length; i++) {\n'\
' if(this.id == cg[i].id) {\n'\
' cg[i].style.display = "block";\n'\
' } else {\n'\
' cg[i].style.display = "none";\n'\
' }\n'\
' }\n'\
' }\n'\
' function devListWindow(e) {\n'\
' var win = window.open();\n'\
' var html = "<title>"+e.target.innerHTML+"</title>"+\n'\
' "<style type=\\"text/css\\">"+\n'\
' " ul {list-style-type:circle;padding-left:10px;margin-left:10px;}"+\n'\
' "</style>"\n'\
' var dt = devtable[0];\n'\
' if(e.target.id != "devlist1")\n'\
' dt = devtable[1];\n'\
' win.document.write(html+dt);\n'\
' }\n'\
' function errWindow() {\n'\
' var range = this.id.split("_");\n'\
' var idx1 = parseInt(range[0]);\n'\
' var idx2 = parseInt(range[1]);\n'\
' var win = window.open();\n'\
' var log = document.getElementById("dmesglog");\n'\
' var title = "<title>dmesg log</title>";\n'\
' var text = log.innerHTML.split("\\n");\n'\
' var html = "";\n'\
' for(var i = 0; i < text.length; i++) {\n'\
' if(i == idx1) {\n'\
' html += "<e id=target>"+text[i]+"</e>\\n";\n'\
' } else if(i > idx1 && i <= idx2) {\n'\
' html += "<e>"+text[i]+"</e>\\n";\n'\
' } else {\n'\
' html += text[i]+"\\n";\n'\
' }\n'\
' }\n'\
' win.document.write("<style>e{color:red}</style>"+title+"<pre>"+html+"</pre>");\n'\
' win.location.hash = "#target";\n'\
' win.document.close();\n'\
' }\n'\
' function logWindow(e) {\n'\
' var name = e.target.id.slice(4);\n'\
' var win = window.open();\n'\
' var log = document.getElementById(name+"log");\n'\
' var title = "<title>"+document.title.split(" ")[0]+" "+name+" log</title>";\n'\
' win.document.write(title+"<pre>"+log.innerHTML+"</pre>");\n'\
' win.document.close();\n'\
' }\n'\
' function onMouseDown(e) {\n'\
' dragval[0] = e.clientX;\n'\
' dragval[1] = document.getElementById("dmesgzoombox").scrollLeft;\n'\
' document.onmousemove = onMouseMove;\n'\
' }\n'\
' function onMouseMove(e) {\n'\
' var zoombox = document.getElementById("dmesgzoombox");\n'\
' zoombox.scrollLeft = dragval[1] + dragval[0] - e.clientX;\n'\
' }\n'\
' function onMouseUp(e) {\n'\
' document.onmousemove = null;\n'\
' }\n'\
' function onKeyPress(e) {\n'\
' var c = e.charCode;\n'\
' if(c != 42 && c != 43 && c != 45) return;\n'\
' var click = document.createEvent("Events");\n'\
' click.initEvent("click", true, false);\n'\
' if(c == 43) \n'\
' document.getElementById("zoomin").dispatchEvent(click);\n'\
' else if(c == 45)\n'\
' document.getElementById("zoomout").dispatchEvent(click);\n'\
' else if(c == 42)\n'\
' document.getElementById("zoomdef").dispatchEvent(click);\n'\
' }\n'\
' window.addEventListener("resize", function () {zoomTimeline();});\n'\
' window.addEventListener("load", function () {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' dmesg.style.width = "100%"\n'\
' dmesg.onmousedown = onMouseDown;\n'\
' document.onmouseup = onMouseUp;\n'\
' document.onkeypress = onKeyPress;\n'\
' document.getElementById("zoomin").onclick = zoomTimeline;\n'\
' document.getElementById("zoomout").onclick = zoomTimeline;\n'\
' document.getElementById("zoomdef").onclick = zoomTimeline;\n'\
' var list = document.getElementsByClassName("err");\n'\
' for (var i = 0; i < list.length; i++)\n'\
' list[i].onclick = errWindow;\n'\
' var list = document.getElementsByClassName("logbtn");\n'\
' for (var i = 0; i < list.length; i++)\n'\
' list[i].onclick = logWindow;\n'\
' list = document.getElementsByClassName("devlist");\n'\
' for (var i = 0; i < list.length; i++)\n'\
' list[i].onclick = devListWindow;\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dev[i].onclick = deviceDetail;\n'\
' dev[i].onmouseover = deviceHover;\n'\
' dev[i].onmouseout = deviceUnhover;\n'\
' }\n'\
' var dev = dmesg.getElementsByClassName("srccall");\n'\
' for (var i = 0; i < dev.length; i++)\n'\
' dev[i].onclick = callSelect;\n'\
' zoomTimeline();\n'\
' });\n'\
'</script>\n'
hf.write(script_code);
# Function: executeSuspend
# Description:
# Execute system suspend through the sysfs interface, then copy the output
# dmesg and ftrace files to the test output directory.
def executeSuspend(quiet=False):
sv, tp, pm = sysvals, sysvals.tpath, ProcessMonitor()
if sv.wifi:
wifi = sv.checkWifi()
sv.dlog('wifi check, connected device is "%s"' % wifi)
testdata = []
# run these commands to prepare the system for suspend
if sv.display:
if not quiet:
pprint('SET DISPLAY TO %s' % sv.display.upper())
ret = sv.displayControl(sv.display)
sv.dlog('xset display %s, ret = %d' % (sv.display, ret))
time.sleep(1)
if sv.sync:
if not quiet:
pprint('SYNCING FILESYSTEMS')
sv.dlog('syncing filesystems')
call('sync', shell=True)
sv.dlog('read dmesg')
sv.initdmesg()
# start ftrace
if(sv.usecallgraph or sv.usetraceevents):
if not quiet:
pprint('START TRACING')
sv.dlog('start ftrace tracing')
sv.fsetVal('1', 'tracing_on')
if sv.useprocmon:
sv.dlog('start the process monitor')
pm.start()
sv.dlog('run the cmdinfo list before')
sv.cmdinfo(True)
# execute however many s/r runs requested
for count in range(1,sv.execcount+1):
# x2delay in between test runs
if(count > 1 and sv.x2delay > 0):
sv.fsetVal('WAIT %d' % sv.x2delay, 'trace_marker')
time.sleep(sv.x2delay/1000.0)
sv.fsetVal('WAIT END', 'trace_marker')
# start message
if sv.testcommand != '':
pprint('COMMAND START')
else:
if(sv.rtcwake):
pprint('SUSPEND START')
else:
pprint('SUSPEND START (press a key to resume)')
# set rtcwake
if(sv.rtcwake):
if not quiet:
pprint('will issue an rtcwake in %d seconds' % sv.rtcwaketime)
sv.dlog('enable RTC wake alarm')
sv.rtcWakeAlarmOn()
# start of suspend trace marker
if(sv.usecallgraph or sv.usetraceevents):
sv.fsetVal(datetime.now().strftime(sv.tmstart), 'trace_marker')
# predelay delay
if(count == 1 and sv.predelay > 0):
sv.fsetVal('WAIT %d' % sv.predelay, 'trace_marker')
time.sleep(sv.predelay/1000.0)
sv.fsetVal('WAIT END', 'trace_marker')
# initiate suspend or command
sv.dlog('system executing a suspend')
tdata = {'error': ''}
if sv.testcommand != '':
res = call(sv.testcommand+' 2>&1', shell=True);
if res != 0:
tdata['error'] = 'cmd returned %d' % res
else:
mode = sv.suspendmode
if sv.memmode and os.path.exists(sv.mempowerfile):
mode = 'mem'
sv.testVal(sv.mempowerfile, 'radio', sv.memmode)
if sv.diskmode and os.path.exists(sv.diskpowerfile):
mode = 'disk'
sv.testVal(sv.diskpowerfile, 'radio', sv.diskmode)
if sv.acpidebug:
sv.testVal(sv.acpipath, 'acpi', '0xe')
if mode == 'freeze' and sv.haveTurbostat():
# execution will pause here
turbo = sv.turbostat()
if turbo:
tdata['turbo'] = turbo
else:
pf = open(sv.powerfile, 'w')
pf.write(mode)
# execution will pause here
try:
pf.close()
except Exception as e:
tdata['error'] = str(e)
sv.dlog('system returned from resume')
# reset everything
sv.testVal('restoreall')
if(sv.rtcwake):
sv.dlog('disable RTC wake alarm')
sv.rtcWakeAlarmOff()
# postdelay delay
if(count == sv.execcount and sv.postdelay > 0):
sv.fsetVal('WAIT %d' % sv.postdelay, 'trace_marker')
time.sleep(sv.postdelay/1000.0)
sv.fsetVal('WAIT END', 'trace_marker')
# return from suspend
pprint('RESUME COMPLETE')
if(sv.usecallgraph or sv.usetraceevents):
sv.fsetVal(datetime.now().strftime(sv.tmend), 'trace_marker')
if sv.wifi and wifi:
tdata['wifi'] = sv.pollWifi(wifi)
sv.dlog('wifi check, %s' % tdata['wifi'])
if(sv.suspendmode == 'mem' or sv.suspendmode == 'command'):
sv.dlog('read the ACPI FPDT')
tdata['fw'] = getFPDT(False)
testdata.append(tdata)
sv.dlog('run the cmdinfo list after')
cmdafter = sv.cmdinfo(False)
# stop ftrace
if(sv.usecallgraph or sv.usetraceevents):
if sv.useprocmon:
sv.dlog('stop the process monitor')
pm.stop()
sv.fsetVal('0', 'tracing_on')
# grab a copy of the dmesg output
if not quiet:
pprint('CAPTURING DMESG')
sysvals.dlog('EXECUTION TRACE END')
sv.getdmesg(testdata)
# grab a copy of the ftrace output
if(sv.usecallgraph or sv.usetraceevents):
if not quiet:
pprint('CAPTURING TRACE')
op = sv.writeDatafileHeader(sv.ftracefile, testdata)
fp = open(tp+'trace', 'r')
for line in fp:
op.write(line)
op.close()
sv.fsetVal('', 'trace')
sv.platforminfo(cmdafter)
def readFile(file):
if os.path.islink(file):
return os.readlink(file).split('/')[-1]
else:
return sysvals.getVal(file).strip()
# Function: ms2nice
# Description:
# Print out a very concise time string in minutes and seconds
# Output:
# The time string, e.g. "1901m16s"
def ms2nice(val):
val = int(val)
h = val // 3600000
m = (val // 60000) % 60
s = (val // 1000) % 60
if h > 0:
return '%d:%02d:%02d' % (h, m, s)
if m > 0:
return '%02d:%02d' % (m, s)
return '%ds' % s
def yesno(val):
list = {'enabled':'A', 'disabled':'S', 'auto':'E', 'on':'D',
'active':'A', 'suspended':'S', 'suspending':'S'}
if val not in list:
return ' '
return list[val]
# Function: deviceInfo
# Description:
# Detect all the USB hosts and devices currently connected and add
# a list of USB device names to sysvals for better timeline readability
def deviceInfo(output=''):
if not output:
pprint('LEGEND\n'\
'---------------------------------------------------------------------------------------------\n'\
' A = async/sync PM queue (A/S) C = runtime active children\n'\
' R = runtime suspend enabled/disabled (E/D) rACTIVE = runtime active (min/sec)\n'\
' S = runtime status active/suspended (A/S) rSUSPEND = runtime suspend (min/sec)\n'\
' U = runtime usage count\n'\
'---------------------------------------------------------------------------------------------\n'\
'DEVICE NAME A R S U C rACTIVE rSUSPEND\n'\
'---------------------------------------------------------------------------------------------')
res = []
tgtval = 'runtime_status'
lines = dict()
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(not re.match('.*/power', dirname) or
'control' not in filenames or
tgtval not in filenames):
continue
name = ''
dirname = dirname[:-6]
device = dirname.split('/')[-1]
power = dict()
power[tgtval] = readFile('%s/power/%s' % (dirname, tgtval))
# only list devices which support runtime suspend
if power[tgtval] not in ['active', 'suspended', 'suspending']:
continue
for i in ['product', 'driver', 'subsystem']:
file = '%s/%s' % (dirname, i)
if os.path.exists(file):
name = readFile(file)
break
for i in ['async', 'control', 'runtime_status', 'runtime_usage',
'runtime_active_kids', 'runtime_active_time',
'runtime_suspended_time']:
if i in filenames:
power[i] = readFile('%s/power/%s' % (dirname, i))
if output:
if power['control'] == output:
res.append('%s/power/control' % dirname)
continue
lines[dirname] = '%-26s %-26s %1s %1s %1s %1s %1s %10s %10s' % \
(device[:26], name[:26],
yesno(power['async']), \
yesno(power['control']), \
yesno(power['runtime_status']), \
power['runtime_usage'], \
power['runtime_active_kids'], \
ms2nice(power['runtime_active_time']), \
ms2nice(power['runtime_suspended_time']))
for i in sorted(lines):
print(lines[i])
return res
# Function: getModes
# Description:
# Determine the supported power modes on this system
# Output:
# A string list of the available modes
def getModes():
modes = []
if(os.path.exists(sysvals.powerfile)):
fp = open(sysvals.powerfile, 'r')
modes = fp.read().split()
fp.close()
if(os.path.exists(sysvals.mempowerfile)):
deep = False
fp = open(sysvals.mempowerfile, 'r')
for m in fp.read().split():
memmode = m.strip('[]')
if memmode == 'deep':
deep = True
else:
modes.append('mem-%s' % memmode)
fp.close()
if 'mem' in modes and not deep:
modes.remove('mem')
if('disk' in modes and os.path.exists(sysvals.diskpowerfile)):
fp = open(sysvals.diskpowerfile, 'r')
for m in fp.read().split():
modes.append('disk-%s' % m.strip('[]'))
fp.close()
return modes
# Function: dmidecode
# Description:
# Read the bios tables and pull out system info
# Arguments:
# mempath: /dev/mem or custom mem path
# fatal: True to exit on error, False to return empty dict
# Output:
# A dict object with all available key/values
def dmidecode(mempath, fatal=False):
out = dict()
# the list of values to retrieve, with hardcoded (type, idx)
info = {
'bios-vendor': (0, 4),
'bios-version': (0, 5),
'bios-release-date': (0, 8),
'system-manufacturer': (1, 4),
'system-product-name': (1, 5),
'system-version': (1, 6),
'system-serial-number': (1, 7),
'baseboard-manufacturer': (2, 4),
'baseboard-product-name': (2, 5),
'baseboard-version': (2, 6),
'baseboard-serial-number': (2, 7),
'chassis-manufacturer': (3, 4),
'chassis-type': (3, 5),
'chassis-version': (3, 6),
'chassis-serial-number': (3, 7),
'processor-manufacturer': (4, 7),
'processor-version': (4, 16),
}
if(not os.path.exists(mempath)):
if(fatal):
doError('file does not exist: %s' % mempath)
return out
if(not os.access(mempath, os.R_OK)):
if(fatal):
doError('file is not readable: %s' % mempath)
return out
# by default use legacy scan, but try to use EFI first
memaddr = 0xf0000
memsize = 0x10000
for ep in ['/sys/firmware/efi/systab', '/proc/efi/systab']:
if not os.path.exists(ep) or not os.access(ep, os.R_OK):
continue
fp = open(ep, 'r')
buf = fp.read()
fp.close()
i = buf.find('SMBIOS=')
if i >= 0:
try:
memaddr = int(buf[i+7:], 16)
memsize = 0x20
except:
continue
# read in the memory for scanning
try:
fp = open(mempath, 'rb')
fp.seek(memaddr)
buf = fp.read(memsize)
except:
if(fatal):
doError('DMI table is unreachable, sorry')
else:
pprint('WARNING: /dev/mem is not readable, ignoring DMI data')
return out
fp.close()
# search for either an SM table or DMI table
i = base = length = num = 0
while(i < memsize):
if buf[i:i+4] == b'_SM_' and i < memsize - 16:
length = struct.unpack('H', buf[i+22:i+24])[0]
base, num = struct.unpack('IH', buf[i+24:i+30])
break
elif buf[i:i+5] == b'_DMI_':
length = struct.unpack('H', buf[i+6:i+8])[0]
base, num = struct.unpack('IH', buf[i+8:i+14])
break
i += 16
if base == 0 and length == 0 and num == 0:
if(fatal):
doError('Neither SMBIOS nor DMI were found')
else:
return out
# read in the SM or DMI table
try:
fp = open(mempath, 'rb')
fp.seek(base)
buf = fp.read(length)
except:
if(fatal):
doError('DMI table is unreachable, sorry')
else:
pprint('WARNING: /dev/mem is not readable, ignoring DMI data')
return out
fp.close()
# scan the table for the values we want
count = i = 0
while(count < num and i <= len(buf) - 4):
type, size, handle = struct.unpack('BBH', buf[i:i+4])
n = i + size
while n < len(buf) - 1:
if 0 == struct.unpack('H', buf[n:n+2])[0]:
break
n += 1
data = buf[i+size:n+2].split(b'\0')
for name in info:
itype, idxadr = info[name]
if itype == type:
idx = struct.unpack('B', buf[i+idxadr:i+idxadr+1])[0]
if idx > 0 and idx < len(data) - 1:
s = data[idx-1].decode('utf-8')
if s.strip() and s.strip().lower() != 'to be filled by o.e.m.':
out[name] = s
i = n + 2
count += 1
return out
# Function: getFPDT
# Description:
# Read the acpi bios tables and pull out FPDT, the firmware data
# Arguments:
# output: True to output the info to stdout, False otherwise
def getFPDT(output):
rectype = {}
rectype[0] = 'Firmware Basic Boot Performance Record'
rectype[1] = 'S3 Performance Table Record'
prectype = {}
prectype[0] = 'Basic S3 Resume Performance Record'
prectype[1] = 'Basic S3 Suspend Performance Record'
sysvals.rootCheck(True)
if(not os.path.exists(sysvals.fpdtpath)):
if(output):
doError('file does not exist: %s' % sysvals.fpdtpath)
return False
if(not os.access(sysvals.fpdtpath, os.R_OK)):
if(output):
doError('file is not readable: %s' % sysvals.fpdtpath)
return False
if(not os.path.exists(sysvals.mempath)):
if(output):
doError('file does not exist: %s' % sysvals.mempath)
return False
if(not os.access(sysvals.mempath, os.R_OK)):
if(output):
doError('file is not readable: %s' % sysvals.mempath)
return False
fp = open(sysvals.fpdtpath, 'rb')
buf = fp.read()
fp.close()
if(len(buf) < 36):
if(output):
doError('Invalid FPDT table data, should '+\
'be at least 36 bytes')
return False
table = struct.unpack('4sIBB6s8sI4sI', buf[0:36])
if(output):
pprint('\n'\
'Firmware Performance Data Table (%s)\n'\
' Signature : %s\n'\
' Table Length : %u\n'\
' Revision : %u\n'\
' Checksum : 0x%x\n'\
' OEM ID : %s\n'\
' OEM Table ID : %s\n'\
' OEM Revision : %u\n'\
' Creator ID : %s\n'\
' Creator Revision : 0x%x\n'\
'' % (ascii(table[0]), ascii(table[0]), table[1], table[2],
table[3], ascii(table[4]), ascii(table[5]), table[6],
ascii(table[7]), table[8]))
if(table[0] != b'FPDT'):
if(output):
doError('Invalid FPDT table')
return False
if(len(buf) <= 36):
return False
i = 0
fwData = [0, 0]
records = buf[36:]
try:
fp = open(sysvals.mempath, 'rb')
except:
pprint('WARNING: /dev/mem is not readable, ignoring the FPDT data')
return False
while(i < len(records)):
header = struct.unpack('HBB', records[i:i+4])
if(header[0] not in rectype):
i += header[1]
continue
if(header[1] != 16):
i += header[1]
continue
addr = struct.unpack('Q', records[i+8:i+16])[0]
try:
fp.seek(addr)
first = fp.read(8)
except:
if(output):
pprint('Bad address 0x%x in %s' % (addr, sysvals.mempath))
return [0, 0]
rechead = struct.unpack('4sI', first)
recdata = fp.read(rechead[1]-8)
if(rechead[0] == b'FBPT'):
record = struct.unpack('HBBIQQQQQ', recdata[:48])
if(output):
pprint('%s (%s)\n'\
' Reset END : %u ns\n'\
' OS Loader LoadImage Start : %u ns\n'\
' OS Loader StartImage Start : %u ns\n'\
' ExitBootServices Entry : %u ns\n'\
' ExitBootServices Exit : %u ns'\
'' % (rectype[header[0]], ascii(rechead[0]), record[4], record[5],
record[6], record[7], record[8]))
elif(rechead[0] == b'S3PT'):
if(output):
pprint('%s (%s)' % (rectype[header[0]], ascii(rechead[0])))
j = 0
while(j < len(recdata)):
prechead = struct.unpack('HBB', recdata[j:j+4])
if(prechead[0] not in prectype):
continue
if(prechead[0] == 0):
record = struct.unpack('IIQQ', recdata[j:j+prechead[1]])
fwData[1] = record[2]
if(output):
pprint(' %s\n'\
' Resume Count : %u\n'\
' FullResume : %u ns\n'\
' AverageResume : %u ns'\
'' % (prectype[prechead[0]], record[1],
record[2], record[3]))
elif(prechead[0] == 1):
record = struct.unpack('QQ', recdata[j+4:j+prechead[1]])
fwData[0] = record[1] - record[0]
if(output):
pprint(' %s\n'\
' SuspendStart : %u ns\n'\
' SuspendEnd : %u ns\n'\
' SuspendTime : %u ns'\
'' % (prectype[prechead[0]], record[0],
record[1], fwData[0]))
j += prechead[1]
if(output):
pprint('')
i += header[1]
fp.close()
return fwData
# Function: statusCheck
# Description:
# Verify that the requested command and options will work, and
# print the results to the terminal
# Output:
# True if the test will work, False if not
def statusCheck(probecheck=False):
status = ''
pprint('Checking this system (%s)...' % platform.node())
# check we have root access
res = sysvals.colorText('NO (No features of this tool will work!)')
if(sysvals.rootCheck(False)):
res = 'YES'
pprint(' have root access: %s' % res)
if(res != 'YES'):
pprint(' Try running this script with sudo')
return 'missing root access'
# check sysfs is mounted
res = sysvals.colorText('NO (No features of this tool will work!)')
if(os.path.exists(sysvals.powerfile)):
res = 'YES'
pprint(' is sysfs mounted: %s' % res)
if(res != 'YES'):
return 'sysfs is missing'
# check target mode is a valid mode
if sysvals.suspendmode != 'command':
res = sysvals.colorText('NO')
modes = getModes()
if(sysvals.suspendmode in modes):
res = 'YES'
else:
status = '%s mode is not supported' % sysvals.suspendmode
pprint(' is "%s" a valid power mode: %s' % (sysvals.suspendmode, res))
if(res == 'NO'):
pprint(' valid power modes are: %s' % modes)
pprint(' please choose one with -m')
# check if ftrace is available
res = sysvals.colorText('NO')
ftgood = sysvals.verifyFtrace()
if(ftgood):
res = 'YES'
elif(sysvals.usecallgraph):
status = 'ftrace is not properly supported'
pprint(' is ftrace supported: %s' % res)
# check if kprobes are available
if sysvals.usekprobes:
res = sysvals.colorText('NO')
sysvals.usekprobes = sysvals.verifyKprobes()
if(sysvals.usekprobes):
res = 'YES'
else:
sysvals.usedevsrc = False
pprint(' are kprobes supported: %s' % res)
# what data source are we using
res = 'DMESG'
if(ftgood):
sysvals.usetraceevents = True
for e in sysvals.traceevents:
if not os.path.exists(sysvals.epath+e):
sysvals.usetraceevents = False
if(sysvals.usetraceevents):
res = 'FTRACE (all trace events found)'
pprint(' timeline data source: %s' % res)
# check if rtcwake
res = sysvals.colorText('NO')
if(sysvals.rtcpath != ''):
res = 'YES'
elif(sysvals.rtcwake):
status = 'rtcwake is not properly supported'
pprint(' is rtcwake supported: %s' % res)
# check info commands
pprint(' optional commands this tool may use for info:')
no = sysvals.colorText('MISSING')
yes = sysvals.colorText('FOUND', 32)
for c in ['turbostat', 'mcelog', 'lspci', 'lsusb']:
if c == 'turbostat':
res = yes if sysvals.haveTurbostat() else no
else:
res = yes if sysvals.getExec(c) else no
pprint(' %s: %s' % (c, res))
if not probecheck:
return status
# verify kprobes
if sysvals.usekprobes:
for name in sysvals.tracefuncs:
sysvals.defaultKprobe(name, sysvals.tracefuncs[name])
if sysvals.usedevsrc:
for name in sysvals.dev_tracefuncs:
sysvals.defaultKprobe(name, sysvals.dev_tracefuncs[name])
sysvals.addKprobes(True)
return status
# Function: doError
# Description:
# generic error function for catastrphic failures
# Arguments:
# msg: the error message to print
# help: True if printHelp should be called after, False otherwise
def doError(msg, help=False):
if(help == True):
printHelp()
pprint('ERROR: %s\n' % msg)
sysvals.outputResult({'error':msg})
sys.exit(1)
# Function: getArgInt
# Description:
# pull out an integer argument from the command line with checks
def getArgInt(name, args, min, max, main=True):
if main:
try:
arg = next(args)
except:
doError(name+': no argument supplied', True)
else:
arg = args
try:
val = int(arg)
except:
doError(name+': non-integer value given', True)
if(val < min or val > max):
doError(name+': value should be between %d and %d' % (min, max), True)
return val
# Function: getArgFloat
# Description:
# pull out a float argument from the command line with checks
def getArgFloat(name, args, min, max, main=True):
if main:
try:
arg = next(args)
except:
doError(name+': no argument supplied', True)
else:
arg = args
try:
val = float(arg)
except:
doError(name+': non-numerical value given', True)
if(val < min or val > max):
doError(name+': value should be between %f and %f' % (min, max), True)
return val
def processData(live=False, quiet=False):
if not quiet:
pprint('PROCESSING: %s' % sysvals.htmlfile)
sysvals.vprint('usetraceevents=%s, usetracemarkers=%s, usekprobes=%s' % \
(sysvals.usetraceevents, sysvals.usetracemarkers, sysvals.usekprobes))
error = ''
if(sysvals.usetraceevents):
testruns, error = parseTraceLog(live)
if sysvals.dmesgfile:
for data in testruns:
data.extractErrorInfo()
else:
testruns = loadKernelLog()
for data in testruns:
parseKernelLog(data)
if(sysvals.ftracefile and (sysvals.usecallgraph or sysvals.usetraceevents)):
appendIncompleteTraceLog(testruns)
if not sysvals.stamp:
pprint('ERROR: data does not include the expected stamp')
return (testruns, {'error': 'timeline generation failed'})
shown = ['bios', 'biosdate', 'cpu', 'host', 'kernel', 'man', 'memfr',
'memsz', 'mode', 'numcpu', 'plat', 'time', 'wifi']
sysvals.vprint('System Info:')
for key in sorted(sysvals.stamp):
if key in shown:
sysvals.vprint(' %-8s : %s' % (key.upper(), sysvals.stamp[key]))
sysvals.vprint('Command:\n %s' % sysvals.cmdline)
for data in testruns:
if data.turbostat:
idx, s = 0, 'Turbostat:\n '
for val in data.turbostat.split('|'):
idx += len(val) + 1
if idx >= 80:
idx = 0
s += '\n '
s += val + ' '
sysvals.vprint(s)
data.printDetails()
if len(sysvals.platinfo) > 0:
sysvals.vprint('\nPlatform Info:')
for info in sysvals.platinfo:
sysvals.vprint('[%s - %s]' % (info[0], info[1]))
sysvals.vprint(info[2])
sysvals.vprint('')
if sysvals.cgdump:
for data in testruns:
data.debugPrint()
sys.exit(0)
if len(testruns) < 1:
pprint('ERROR: Not enough test data to build a timeline')
return (testruns, {'error': 'timeline generation failed'})
sysvals.vprint('Creating the html timeline (%s)...' % sysvals.htmlfile)
createHTML(testruns, error)
if not quiet:
pprint('DONE: %s' % sysvals.htmlfile)
data = testruns[0]
stamp = data.stamp
stamp['suspend'], stamp['resume'] = data.getTimeValues()
if data.fwValid:
stamp['fwsuspend'], stamp['fwresume'] = data.fwSuspend, data.fwResume
if error:
stamp['error'] = error
return (testruns, stamp)
# Function: rerunTest
# Description:
# generate an output from an existing set of ftrace/dmesg logs
def rerunTest(htmlfile=''):
if sysvals.ftracefile:
doesTraceLogHaveTraceEvents()
if not sysvals.dmesgfile and not sysvals.usetraceevents:
doError('recreating this html output requires a dmesg file')
if htmlfile:
sysvals.htmlfile = htmlfile
else:
sysvals.setOutputFile()
if os.path.exists(sysvals.htmlfile):
if not os.path.isfile(sysvals.htmlfile):
doError('a directory already exists with this name: %s' % sysvals.htmlfile)
elif not os.access(sysvals.htmlfile, os.W_OK):
doError('missing permission to write to %s' % sysvals.htmlfile)
testruns, stamp = processData()
sysvals.resetlog()
return stamp
# Function: runTest
# Description:
# execute a suspend/resume, gather the logs, and generate the output
def runTest(n=0, quiet=False):
# prepare for the test
sysvals.initTestOutput('suspend')
op = sysvals.writeDatafileHeader(sysvals.dmesgfile, [])
op.write('# EXECUTION TRACE START\n')
op.close()
if n <= 1:
if sysvals.rs != 0:
sysvals.dlog('%sabling runtime suspend' % ('en' if sysvals.rs > 0 else 'dis'))
sysvals.setRuntimeSuspend(True)
if sysvals.display:
ret = sysvals.displayControl('init')
sysvals.dlog('xset display init, ret = %d' % ret)
sysvals.dlog('initialize ftrace')
sysvals.initFtrace(quiet)
# execute the test
executeSuspend(quiet)
sysvals.cleanupFtrace()
if sysvals.skiphtml:
sysvals.outputResult({}, n)
sysvals.sudoUserchown(sysvals.testdir)
return
testruns, stamp = processData(True, quiet)
for data in testruns:
del data
sysvals.sudoUserchown(sysvals.testdir)
sysvals.outputResult(stamp, n)
if 'error' in stamp:
return 2
return 0
def find_in_html(html, start, end, firstonly=True):
cnt, out, list = len(html), [], []
if firstonly:
m = re.search(start, html)
if m:
list.append(m)
else:
list = re.finditer(start, html)
for match in list:
s = match.end()
e = cnt if (len(out) < 1 or s + 10000 > cnt) else s + 10000
m = re.search(end, html[s:e])
if not m:
break
e = s + m.start()
str = html[s:e]
if end == 'ms':
num = re.search(r'[-+]?\d*\.\d+|\d+', str)
str = num.group() if num else 'NaN'
if firstonly:
return str
out.append(str)
if firstonly:
return ''
return out
def data_from_html(file, outpath, issues, fulldetail=False):
html = open(file, 'r').read()
sysvals.htmlfile = os.path.relpath(file, outpath)
# extract general info
suspend = find_in_html(html, 'Kernel Suspend', 'ms')
resume = find_in_html(html, 'Kernel Resume', 'ms')
sysinfo = find_in_html(html, '<div class="stamp sysinfo">', '</div>')
line = find_in_html(html, '<div class="stamp">', '</div>')
stmp = line.split()
if not suspend or not resume or len(stmp) != 8:
return False
try:
dt = datetime.strptime(' '.join(stmp[3:]), '%B %d %Y, %I:%M:%S %p')
except:
return False
sysvals.hostname = stmp[0]
tstr = dt.strftime('%Y/%m/%d %H:%M:%S')
error = find_in_html(html, '<table class="testfail"><tr><td>', '</td>')
if error:
m = re.match('[a-z0-9]* failed in (?P<p>\S*).*', error)
if m:
result = 'fail in %s' % m.group('p')
else:
result = 'fail'
else:
result = 'pass'
# extract error info
tp, ilist = False, []
extra = dict()
log = find_in_html(html, '<div id="dmesglog" style="display:none;">',
'</div>').strip()
if log:
d = Data(0)
d.end = 999999999
d.dmesgtext = log.split('\n')
tp = d.extractErrorInfo()
for msg in tp.msglist:
sysvals.errorSummary(issues, msg)
if stmp[2] == 'freeze':
extra = d.turbostatInfo()
elist = dict()
for dir in d.errorinfo:
for err in d.errorinfo[dir]:
if err[0] not in elist:
elist[err[0]] = 0
elist[err[0]] += 1
for i in elist:
ilist.append('%sx%d' % (i, elist[i]) if elist[i] > 1 else i)
wifi = find_in_html(html, 'Wifi Resume: ', '</td>')
if wifi:
extra['wifi'] = wifi
low = find_in_html(html, 'freeze time: <b>', ' ms</b>')
for lowstr in ['waking', '+']:
if not low:
break
if lowstr not in low:
continue
if lowstr == '+':
issue = 'S2LOOPx%d' % len(low.split('+'))
else:
m = re.match('.*waking *(?P<n>[0-9]*) *times.*', low)
issue = 'S2WAKEx%s' % m.group('n') if m else 'S2WAKExNaN'
match = [i for i in issues if i['match'] == issue]
if len(match) > 0:
match[0]['count'] += 1
if sysvals.hostname not in match[0]['urls']:
match[0]['urls'][sysvals.hostname] = [sysvals.htmlfile]
elif sysvals.htmlfile not in match[0]['urls'][sysvals.hostname]:
match[0]['urls'][sysvals.hostname].append(sysvals.htmlfile)
else:
issues.append({
'match': issue, 'count': 1, 'line': issue,
'urls': {sysvals.hostname: [sysvals.htmlfile]},
})
ilist.append(issue)
# extract device info
devices = dict()
for line in html.split('\n'):
m = re.match(' *<div id=\"[a,0-9]*\" *title=\"(?P<title>.*)\" class=\"thread.*', line)
if not m or 'thread kth' in line or 'thread sec' in line:
continue
m = re.match('(?P<n>.*) \((?P<t>[0-9,\.]*) ms\) (?P<p>.*)', m.group('title'))
if not m:
continue
name, time, phase = m.group('n'), m.group('t'), m.group('p')
if ' async' in name or ' sync' in name:
name = ' '.join(name.split(' ')[:-1])
if phase.startswith('suspend'):
d = 'suspend'
elif phase.startswith('resume'):
d = 'resume'
else:
continue
if d not in devices:
devices[d] = dict()
if name not in devices[d]:
devices[d][name] = 0.0
devices[d][name] += float(time)
# create worst device info
worst = dict()
for d in ['suspend', 'resume']:
worst[d] = {'name':'', 'time': 0.0}
dev = devices[d] if d in devices else 0
if dev and len(dev.keys()) > 0:
n = sorted(dev, key=lambda k:(dev[k], k), reverse=True)[0]
worst[d]['name'], worst[d]['time'] = n, dev[n]
data = {
'mode': stmp[2],
'host': stmp[0],
'kernel': stmp[1],
'sysinfo': sysinfo,
'time': tstr,
'result': result,
'issues': ' '.join(ilist),
'suspend': suspend,
'resume': resume,
'devlist': devices,
'sus_worst': worst['suspend']['name'],
'sus_worsttime': worst['suspend']['time'],
'res_worst': worst['resume']['name'],
'res_worsttime': worst['resume']['time'],
'url': sysvals.htmlfile,
}
for key in extra:
data[key] = extra[key]
if fulldetail:
data['funclist'] = find_in_html(html, '<div title="', '" class="traceevent"', False)
if tp:
for arg in ['-multi ', '-info ']:
if arg in tp.cmdline:
data['target'] = tp.cmdline[tp.cmdline.find(arg):].split()[1]
break
return data
def genHtml(subdir, force=False):
for dirname, dirnames, filenames in os.walk(subdir):
sysvals.dmesgfile = sysvals.ftracefile = sysvals.htmlfile = ''
for filename in filenames:
file = os.path.join(dirname, filename)
if sysvals.usable(file):
if(re.match('.*_dmesg.txt', filename)):
sysvals.dmesgfile = file
elif(re.match('.*_ftrace.txt', filename)):
sysvals.ftracefile = file
sysvals.setOutputFile()
if (sysvals.dmesgfile or sysvals.ftracefile) and sysvals.htmlfile and \
(force or not sysvals.usable(sysvals.htmlfile)):
pprint('FTRACE: %s' % sysvals.ftracefile)
if sysvals.dmesgfile:
pprint('DMESG : %s' % sysvals.dmesgfile)
rerunTest()
# Function: runSummary
# Description:
# create a summary of tests in a sub-directory
def runSummary(subdir, local=True, genhtml=False):
inpath = os.path.abspath(subdir)
outpath = os.path.abspath('.') if local else inpath
pprint('Generating a summary of folder:\n %s' % inpath)
if genhtml:
genHtml(subdir)
target, issues, testruns = '', [], []
desc = {'host':[],'mode':[],'kernel':[]}
for dirname, dirnames, filenames in os.walk(subdir):
for filename in filenames:
if(not re.match('.*.html', filename)):
continue
data = data_from_html(os.path.join(dirname, filename), outpath, issues)
if(not data):
continue
if 'target' in data:
target = data['target']
testruns.append(data)
for key in desc:
if data[key] not in desc[key]:
desc[key].append(data[key])
pprint('Summary files:')
if len(desc['host']) == len(desc['mode']) == len(desc['kernel']) == 1:
title = '%s %s %s' % (desc['host'][0], desc['kernel'][0], desc['mode'][0])
if target:
title += ' %s' % target
else:
title = inpath
createHTMLSummarySimple(testruns, os.path.join(outpath, 'summary.html'), title)
pprint(' summary.html - tabular list of test data found')
createHTMLDeviceSummary(testruns, os.path.join(outpath, 'summary-devices.html'), title)
pprint(' summary-devices.html - kernel device list sorted by total execution time')
createHTMLIssuesSummary(testruns, issues, os.path.join(outpath, 'summary-issues.html'), title)
pprint(' summary-issues.html - kernel issues found sorted by frequency')
# Function: checkArgBool
# Description:
# check if a boolean string value is true or false
def checkArgBool(name, value):
if value in switchvalues:
if value in switchoff:
return False
return True
doError('invalid boolean --> (%s: %s), use "true/false" or "1/0"' % (name, value), True)
return False
# Function: configFromFile
# Description:
# Configure the script via the info in a config file
def configFromFile(file):
Config = configparser.ConfigParser()
Config.read(file)
sections = Config.sections()
overridekprobes = False
overridedevkprobes = False
if 'Settings' in sections:
for opt in Config.options('Settings'):
value = Config.get('Settings', opt).lower()
option = opt.lower()
if(option == 'verbose'):
sysvals.verbose = checkArgBool(option, value)
elif(option == 'addlogs'):
sysvals.dmesglog = sysvals.ftracelog = checkArgBool(option, value)
elif(option == 'dev'):
sysvals.usedevsrc = checkArgBool(option, value)
elif(option == 'proc'):
sysvals.useprocmon = checkArgBool(option, value)
elif(option == 'x2'):
if checkArgBool(option, value):
sysvals.execcount = 2
elif(option == 'callgraph'):
sysvals.usecallgraph = checkArgBool(option, value)
elif(option == 'override-timeline-functions'):
overridekprobes = checkArgBool(option, value)
elif(option == 'override-dev-timeline-functions'):
overridedevkprobes = checkArgBool(option, value)
elif(option == 'skiphtml'):
sysvals.skiphtml = checkArgBool(option, value)
elif(option == 'sync'):
sysvals.sync = checkArgBool(option, value)
elif(option == 'rs' or option == 'runtimesuspend'):
if value in switchvalues:
if value in switchoff:
sysvals.rs = -1
else:
sysvals.rs = 1
else:
doError('invalid value --> (%s: %s), use "enable/disable"' % (option, value), True)
elif(option == 'display'):
disopt = ['on', 'off', 'standby', 'suspend']
if value not in disopt:
doError('invalid value --> (%s: %s), use %s' % (option, value, disopt), True)
sysvals.display = value
elif(option == 'gzip'):
sysvals.gzip = checkArgBool(option, value)
elif(option == 'cgfilter'):
sysvals.setCallgraphFilter(value)
elif(option == 'cgskip'):
if value in switchoff:
sysvals.cgskip = ''
else:
sysvals.cgskip = sysvals.configFile(val)
if(not sysvals.cgskip):
doError('%s does not exist' % sysvals.cgskip)
elif(option == 'cgtest'):
sysvals.cgtest = getArgInt('cgtest', value, 0, 1, False)
elif(option == 'cgphase'):
d = Data(0)
if value not in d.phasedef:
doError('invalid phase --> (%s: %s), valid phases are %s'\
% (option, value, d.phasedef.keys()), True)
sysvals.cgphase = value
elif(option == 'fadd'):
file = sysvals.configFile(value)
if(not file):
doError('%s does not exist' % value)
sysvals.addFtraceFilterFunctions(file)
elif(option == 'result'):
sysvals.result = value
elif(option == 'multi'):
nums = value.split()
if len(nums) != 2:
doError('multi requires 2 integers (exec_count and delay)', True)
sysvals.multiinit(nums[0], nums[1])
elif(option == 'devicefilter'):
sysvals.setDeviceFilter(value)
elif(option == 'expandcg'):
sysvals.cgexp = checkArgBool(option, value)
elif(option == 'srgap'):
if checkArgBool(option, value):
sysvals.srgap = 5
elif(option == 'mode'):
sysvals.suspendmode = value
elif(option == 'command' or option == 'cmd'):
sysvals.testcommand = value
elif(option == 'x2delay'):
sysvals.x2delay = getArgInt('x2delay', value, 0, 60000, False)
elif(option == 'predelay'):
sysvals.predelay = getArgInt('predelay', value, 0, 60000, False)
elif(option == 'postdelay'):
sysvals.postdelay = getArgInt('postdelay', value, 0, 60000, False)
elif(option == 'maxdepth'):
sysvals.max_graph_depth = getArgInt('maxdepth', value, 0, 1000, False)
elif(option == 'rtcwake'):
if value in switchoff:
sysvals.rtcwake = False
else:
sysvals.rtcwake = True
sysvals.rtcwaketime = getArgInt('rtcwake', value, 0, 3600, False)
elif(option == 'timeprec'):
sysvals.setPrecision(getArgInt('timeprec', value, 0, 6, False))
elif(option == 'mindev'):
sysvals.mindevlen = getArgFloat('mindev', value, 0.0, 10000.0, False)
elif(option == 'callloop-maxgap'):
sysvals.callloopmaxgap = getArgFloat('callloop-maxgap', value, 0.0, 1.0, False)
elif(option == 'callloop-maxlen'):
sysvals.callloopmaxgap = getArgFloat('callloop-maxlen', value, 0.0, 1.0, False)
elif(option == 'mincg'):
sysvals.mincglen = getArgFloat('mincg', value, 0.0, 10000.0, False)
elif(option == 'bufsize'):
sysvals.bufsize = getArgInt('bufsize', value, 1, 1024*1024*8, False)
elif(option == 'output-dir'):
sysvals.outdir = sysvals.setOutputFolder(value)
if sysvals.suspendmode == 'command' and not sysvals.testcommand:
doError('No command supplied for mode "command"')
# compatibility errors
if sysvals.usedevsrc and sysvals.usecallgraph:
doError('-dev is not compatible with -f')
if sysvals.usecallgraph and sysvals.useprocmon:
doError('-proc is not compatible with -f')
if overridekprobes:
sysvals.tracefuncs = dict()
if overridedevkprobes:
sysvals.dev_tracefuncs = dict()
kprobes = dict()
kprobesec = 'dev_timeline_functions_'+platform.machine()
if kprobesec in sections:
for name in Config.options(kprobesec):
text = Config.get(kprobesec, name)
kprobes[name] = (text, True)
kprobesec = 'timeline_functions_'+platform.machine()
if kprobesec in sections:
for name in Config.options(kprobesec):
if name in kprobes:
doError('Duplicate timeline function found "%s"' % (name))
text = Config.get(kprobesec, name)
kprobes[name] = (text, False)
for name in kprobes:
function = name
format = name
color = ''
args = dict()
text, dev = kprobes[name]
data = text.split()
i = 0
for val in data:
# bracketted strings are special formatting, read them separately
if val[0] == '[' and val[-1] == ']':
for prop in val[1:-1].split(','):
p = prop.split('=')
if p[0] == 'color':
try:
color = int(p[1], 16)
color = '#'+p[1]
except:
color = p[1]
continue
# first real arg should be the format string
if i == 0:
format = val
# all other args are actual function args
else:
d = val.split('=')
args[d[0]] = d[1]
i += 1
if not function or not format:
doError('Invalid kprobe: %s' % name)
for arg in re.findall('{(?P<n>[a-z,A-Z,0-9]*)}', format):
if arg not in args:
doError('Kprobe "%s" is missing argument "%s"' % (name, arg))
if (dev and name in sysvals.dev_tracefuncs) or (not dev and name in sysvals.tracefuncs):
doError('Duplicate timeline function found "%s"' % (name))
kp = {
'name': name,
'func': function,
'format': format,
sysvals.archargs: args
}
if color:
kp['color'] = color
if dev:
sysvals.dev_tracefuncs[name] = kp
else:
sysvals.tracefuncs[name] = kp
# Function: printHelp
# Description:
# print out the help text
def printHelp():
pprint('\n%s v%s\n'\
'Usage: sudo sleepgraph <options> <commands>\n'\
'\n'\
'Description:\n'\
' This tool is designed to assist kernel and OS developers in optimizing\n'\
' their linux stack\'s suspend/resume time. Using a kernel image built\n'\
' with a few extra options enabled, the tool will execute a suspend and\n'\
' capture dmesg and ftrace data until resume is complete. This data is\n'\
' transformed into a device timeline and an optional callgraph to give\n'\
' a detailed view of which devices/subsystems are taking the most\n'\
' time in suspend/resume.\n'\
'\n'\
' If no specific command is given, the default behavior is to initiate\n'\
' a suspend/resume and capture the dmesg/ftrace output as an html timeline.\n'\
'\n'\
' Generates output files in subdirectory: suspend-yymmdd-HHMMSS\n'\
' HTML output: <hostname>_<mode>.html\n'\
' raw dmesg output: <hostname>_<mode>_dmesg.txt\n'\
' raw ftrace output: <hostname>_<mode>_ftrace.txt\n'\
'\n'\
'Options:\n'\
' -h Print this help text\n'\
' -v Print the current tool version\n'\
' -config fn Pull arguments and config options from file fn\n'\
' -verbose Print extra information during execution and analysis\n'\
' -m mode Mode to initiate for suspend (default: %s)\n'\
' -o name Overrides the output subdirectory name when running a new test\n'\
' default: suspend-{date}-{time}\n'\
' -rtcwake t Wakeup t seconds after suspend, set t to "off" to disable (default: 15)\n'\
' -addlogs Add the dmesg and ftrace logs to the html output\n'\
' -noturbostat Dont use turbostat in freeze mode (default: disabled)\n'\
' -srgap Add a visible gap in the timeline between sus/res (default: disabled)\n'\
' -skiphtml Run the test and capture the trace logs, but skip the timeline (default: disabled)\n'\
' -result fn Export a results table to a text file for parsing.\n'\
' -wifi If a wifi connection is available, check that it reconnects after resume.\n'\
' [testprep]\n'\
' -sync Sync the filesystems before starting the test\n'\
' -rs on/off Enable/disable runtime suspend for all devices, restore all after test\n'\
' -display m Change the display mode to m for the test (on/off/standby/suspend)\n'\
' [advanced]\n'\
' -gzip Gzip the trace and dmesg logs to save space\n'\
' -cmd {s} Run the timeline over a custom command, e.g. "sync -d"\n'\
' -proc Add usermode process info into the timeline (default: disabled)\n'\
' -dev Add kernel function calls and threads to the timeline (default: disabled)\n'\
' -x2 Run two suspend/resumes back to back (default: disabled)\n'\
' -x2delay t Include t ms delay between multiple test runs (default: 0 ms)\n'\
' -predelay t Include t ms delay before 1st suspend (default: 0 ms)\n'\
' -postdelay t Include t ms delay after last resume (default: 0 ms)\n'\
' -mindev ms Discard all device blocks shorter than ms milliseconds (e.g. 0.001 for us)\n'\
' -multi n d Execute <n> consecutive tests at <d> seconds intervals. If <n> is followed\n'\
' by a "d", "h", or "m" execute for <n> days, hours, or mins instead.\n'\
' The outputs will be created in a new subdirectory with a summary page.\n'\
' -maxfail n Abort a -multi run after n consecutive fails (default is 0 = never abort)\n'\
' [debug]\n'\
' -f Use ftrace to create device callgraphs (default: disabled)\n'\
' -ftop Use ftrace on the top level call: "%s" (default: disabled)\n'\
' -maxdepth N limit the callgraph data to N call levels (default: 0=all)\n'\
' -expandcg pre-expand the callgraph data in the html output (default: disabled)\n'\
' -fadd file Add functions to be graphed in the timeline from a list in a text file\n'\
' -filter "d1,d2,..." Filter out all but this comma-delimited list of device names\n'\
' -mincg ms Discard all callgraphs shorter than ms milliseconds (e.g. 0.001 for us)\n'\
' -cgphase P Only show callgraph data for phase P (e.g. suspend_late)\n'\
' -cgtest N Only show callgraph data for test N (e.g. 0 or 1 in an x2 run)\n'\
' -timeprec N Number of significant digits in timestamps (0:S, [3:ms], 6:us)\n'\
' -cgfilter S Filter the callgraph output in the timeline\n'\
' -cgskip file Callgraph functions to skip, off to disable (default: cgskip.txt)\n'\
' -bufsize N Set trace buffer size to N kilo-bytes (default: all of free memory)\n'\
' -devdump Print out all the raw device data for each phase\n'\
' -cgdump Print out all the raw callgraph data\n'\
'\n'\
'Other commands:\n'\
' -modes List available suspend modes\n'\
' -status Test to see if the system is enabled to run this tool\n'\
' -fpdt Print out the contents of the ACPI Firmware Performance Data Table\n'\
' -wificheck Print out wifi connection info\n'\
' -x<mode> Test xset by toggling the given mode (on/off/standby/suspend)\n'\
' -sysinfo Print out system info extracted from BIOS\n'\
' -devinfo Print out the pm settings of all devices which support runtime suspend\n'\
' -cmdinfo Print out all the platform info collected before and after suspend/resume\n'\
' -flist Print the list of functions currently being captured in ftrace\n'\
' -flistall Print all functions capable of being captured in ftrace\n'\
' -summary dir Create a summary of tests in this dir [-genhtml builds missing html]\n'\
' [redo]\n'\
' -ftrace ftracefile Create HTML output using ftrace input (used with -dmesg)\n'\
' -dmesg dmesgfile Create HTML output using dmesg (used with -ftrace)\n'\
'' % (sysvals.title, sysvals.version, sysvals.suspendmode, sysvals.ftopfunc))
return True
# ----------------- MAIN --------------------
# exec start (skipped if script is loaded as library)
if __name__ == '__main__':
genhtml = False
cmd = ''
simplecmds = ['-sysinfo', '-modes', '-fpdt', '-flist', '-flistall',
'-devinfo', '-status', '-xon', '-xoff', '-xstandby', '-xsuspend',
'-xinit', '-xreset', '-xstat', '-wificheck', '-cmdinfo']
if '-f' in sys.argv:
sysvals.cgskip = sysvals.configFile('cgskip.txt')
# loop through the command line arguments
args = iter(sys.argv[1:])
for arg in args:
if(arg == '-m'):
try:
val = next(args)
except:
doError('No mode supplied', True)
if val == 'command' and not sysvals.testcommand:
doError('No command supplied for mode "command"', True)
sysvals.suspendmode = val
elif(arg in simplecmds):
cmd = arg[1:]
elif(arg == '-h'):
printHelp()
sys.exit(0)
elif(arg == '-v'):
pprint("Version %s" % sysvals.version)
sys.exit(0)
elif(arg == '-x2'):
sysvals.execcount = 2
elif(arg == '-x2delay'):
sysvals.x2delay = getArgInt('-x2delay', args, 0, 60000)
elif(arg == '-predelay'):
sysvals.predelay = getArgInt('-predelay', args, 0, 60000)
elif(arg == '-postdelay'):
sysvals.postdelay = getArgInt('-postdelay', args, 0, 60000)
elif(arg == '-f'):
sysvals.usecallgraph = True
elif(arg == '-ftop'):
sysvals.usecallgraph = True
sysvals.ftop = True
sysvals.usekprobes = False
elif(arg == '-skiphtml'):
sysvals.skiphtml = True
elif(arg == '-cgdump'):
sysvals.cgdump = True
elif(arg == '-devdump'):
sysvals.devdump = True
elif(arg == '-genhtml'):
genhtml = True
elif(arg == '-addlogs'):
sysvals.dmesglog = sysvals.ftracelog = True
elif(arg == '-nologs'):
sysvals.dmesglog = sysvals.ftracelog = False
elif(arg == '-addlogdmesg'):
sysvals.dmesglog = True
elif(arg == '-addlogftrace'):
sysvals.ftracelog = True
elif(arg == '-noturbostat'):
sysvals.tstat = False
elif(arg == '-verbose'):
sysvals.verbose = True
elif(arg == '-proc'):
sysvals.useprocmon = True
elif(arg == '-dev'):
sysvals.usedevsrc = True
elif(arg == '-sync'):
sysvals.sync = True
elif(arg == '-wifi'):
sysvals.wifi = True
elif(arg == '-gzip'):
sysvals.gzip = True
elif(arg == '-info'):
try:
val = next(args)
except:
doError('-info requires one string argument', True)
elif(arg == '-desc'):
try:
val = next(args)
except:
doError('-desc requires one string argument', True)
elif(arg == '-rs'):
try:
val = next(args)
except:
doError('-rs requires "enable" or "disable"', True)
if val.lower() in switchvalues:
if val.lower() in switchoff:
sysvals.rs = -1
else:
sysvals.rs = 1
else:
doError('invalid option: %s, use "enable/disable" or "on/off"' % val, True)
elif(arg == '-display'):
try:
val = next(args)
except:
doError('-display requires an mode value', True)
disopt = ['on', 'off', 'standby', 'suspend']
if val.lower() not in disopt:
doError('valid display mode values are %s' % disopt, True)
sysvals.display = val.lower()
elif(arg == '-maxdepth'):
sysvals.max_graph_depth = getArgInt('-maxdepth', args, 0, 1000)
elif(arg == '-rtcwake'):
try:
val = next(args)
except:
doError('No rtcwake time supplied', True)
if val.lower() in switchoff:
sysvals.rtcwake = False
else:
sysvals.rtcwake = True
sysvals.rtcwaketime = getArgInt('-rtcwake', val, 0, 3600, False)
elif(arg == '-timeprec'):
sysvals.setPrecision(getArgInt('-timeprec', args, 0, 6))
elif(arg == '-mindev'):
sysvals.mindevlen = getArgFloat('-mindev', args, 0.0, 10000.0)
elif(arg == '-mincg'):
sysvals.mincglen = getArgFloat('-mincg', args, 0.0, 10000.0)
elif(arg == '-bufsize'):
sysvals.bufsize = getArgInt('-bufsize', args, 1, 1024*1024*8)
elif(arg == '-cgtest'):
sysvals.cgtest = getArgInt('-cgtest', args, 0, 1)
elif(arg == '-cgphase'):
try:
val = next(args)
except:
doError('No phase name supplied', True)
d = Data(0)
if val not in d.phasedef:
doError('invalid phase --> (%s: %s), valid phases are %s'\
% (arg, val, d.phasedef.keys()), True)
sysvals.cgphase = val
elif(arg == '-cgfilter'):
try:
val = next(args)
except:
doError('No callgraph functions supplied', True)
sysvals.setCallgraphFilter(val)
elif(arg == '-skipkprobe'):
try:
val = next(args)
except:
doError('No kprobe functions supplied', True)
sysvals.skipKprobes(val)
elif(arg == '-cgskip'):
try:
val = next(args)
except:
doError('No file supplied', True)
if val.lower() in switchoff:
sysvals.cgskip = ''
else:
sysvals.cgskip = sysvals.configFile(val)
if(not sysvals.cgskip):
doError('%s does not exist' % sysvals.cgskip)
elif(arg == '-callloop-maxgap'):
sysvals.callloopmaxgap = getArgFloat('-callloop-maxgap', args, 0.0, 1.0)
elif(arg == '-callloop-maxlen'):
sysvals.callloopmaxlen = getArgFloat('-callloop-maxlen', args, 0.0, 1.0)
elif(arg == '-cmd'):
try:
val = next(args)
except:
doError('No command string supplied', True)
sysvals.testcommand = val
sysvals.suspendmode = 'command'
elif(arg == '-expandcg'):
sysvals.cgexp = True
elif(arg == '-srgap'):
sysvals.srgap = 5
elif(arg == '-maxfail'):
sysvals.maxfail = getArgInt('-maxfail', args, 0, 1000000)
elif(arg == '-multi'):
try:
c, d = next(args), next(args)
except:
doError('-multi requires two values', True)
sysvals.multiinit(c, d)
elif(arg == '-o'):
try:
val = next(args)
except:
doError('No subdirectory name supplied', True)
sysvals.outdir = sysvals.setOutputFolder(val)
elif(arg == '-config'):
try:
val = next(args)
except:
doError('No text file supplied', True)
file = sysvals.configFile(val)
if(not file):
doError('%s does not exist' % val)
configFromFile(file)
elif(arg == '-fadd'):
try:
val = next(args)
except:
doError('No text file supplied', True)
file = sysvals.configFile(val)
if(not file):
doError('%s does not exist' % val)
sysvals.addFtraceFilterFunctions(file)
elif(arg == '-dmesg'):
try:
val = next(args)
except:
doError('No dmesg file supplied', True)
sysvals.notestrun = True
sysvals.dmesgfile = val
if(os.path.exists(sysvals.dmesgfile) == False):
doError('%s does not exist' % sysvals.dmesgfile)
elif(arg == '-ftrace'):
try:
val = next(args)
except:
doError('No ftrace file supplied', True)
sysvals.notestrun = True
sysvals.ftracefile = val
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s does not exist' % sysvals.ftracefile)
elif(arg == '-summary'):
try:
val = next(args)
except:
doError('No directory supplied', True)
cmd = 'summary'
sysvals.outdir = val
sysvals.notestrun = True
if(os.path.isdir(val) == False):
doError('%s is not accessible' % val)
elif(arg == '-filter'):
try:
val = next(args)
except:
doError('No devnames supplied', True)
sysvals.setDeviceFilter(val)
elif(arg == '-result'):
try:
val = next(args)
except:
doError('No result file supplied', True)
sysvals.result = val
sysvals.signalHandlerInit()
else:
doError('Invalid argument: '+arg, True)
# compatibility errors
if(sysvals.usecallgraph and sysvals.usedevsrc):
doError('-dev is not compatible with -f')
if(sysvals.usecallgraph and sysvals.useprocmon):
doError('-proc is not compatible with -f')
if sysvals.usecallgraph and sysvals.cgskip:
sysvals.vprint('Using cgskip file: %s' % sysvals.cgskip)
sysvals.setCallgraphBlacklist(sysvals.cgskip)
# callgraph size cannot exceed device size
if sysvals.mincglen < sysvals.mindevlen:
sysvals.mincglen = sysvals.mindevlen
# remove existing buffers before calculating memory
if(sysvals.usecallgraph or sysvals.usedevsrc):
sysvals.fsetVal('16', 'buffer_size_kb')
sysvals.cpuInfo()
# just run a utility command and exit
if(cmd != ''):
ret = 0
if(cmd == 'status'):
if not statusCheck(True):
ret = 1
elif(cmd == 'fpdt'):
if not getFPDT(True):
ret = 1
elif(cmd == 'sysinfo'):
sysvals.printSystemInfo(True)
elif(cmd == 'devinfo'):
deviceInfo()
elif(cmd == 'modes'):
pprint(getModes())
elif(cmd == 'flist'):
sysvals.getFtraceFilterFunctions(True)
elif(cmd == 'flistall'):
sysvals.getFtraceFilterFunctions(False)
elif(cmd == 'summary'):
runSummary(sysvals.outdir, True, genhtml)
elif(cmd in ['xon', 'xoff', 'xstandby', 'xsuspend', 'xinit', 'xreset']):
sysvals.verbose = True
ret = sysvals.displayControl(cmd[1:])
elif(cmd == 'xstat'):
pprint('Display Status: %s' % sysvals.displayControl('stat').upper())
elif(cmd == 'wificheck'):
dev = sysvals.checkWifi()
if dev:
print('%s is connected' % sysvals.wifiDetails(dev))
else:
print('No wifi connection found')
elif(cmd == 'cmdinfo'):
for out in sysvals.cmdinfo(False, True):
print('[%s - %s]\n%s\n' % out)
sys.exit(ret)
# if instructed, re-analyze existing data files
if(sysvals.notestrun):
stamp = rerunTest(sysvals.outdir)
sysvals.outputResult(stamp)
sys.exit(0)
# verify that we can run a test
error = statusCheck()
if(error):
doError(error)
# extract mem/disk extra modes and convert
mode = sysvals.suspendmode
if mode.startswith('mem'):
memmode = mode.split('-', 1)[-1] if '-' in mode else 'deep'
if memmode == 'shallow':
mode = 'standby'
elif memmode == 's2idle':
mode = 'freeze'
else:
mode = 'mem'
sysvals.memmode = memmode
sysvals.suspendmode = mode
if mode.startswith('disk-'):
sysvals.diskmode = mode.split('-', 1)[-1]
sysvals.suspendmode = 'disk'
sysvals.systemInfo(dmidecode(sysvals.mempath))
failcnt, ret = 0, 0
if sysvals.multitest['run']:
# run multiple tests in a separate subdirectory
if not sysvals.outdir:
if 'time' in sysvals.multitest:
s = '-%dm' % sysvals.multitest['time']
else:
s = '-x%d' % sysvals.multitest['count']
sysvals.outdir = datetime.now().strftime('suspend-%y%m%d-%H%M%S'+s)
if not os.path.isdir(sysvals.outdir):
os.makedirs(sysvals.outdir)
sysvals.sudoUserchown(sysvals.outdir)
finish = datetime.now()
if 'time' in sysvals.multitest:
finish += timedelta(minutes=sysvals.multitest['time'])
for i in range(sysvals.multitest['count']):
sysvals.multistat(True, i, finish)
if i != 0 and sysvals.multitest['delay'] > 0:
pprint('Waiting %d seconds...' % (sysvals.multitest['delay']))
time.sleep(sysvals.multitest['delay'])
fmt = 'suspend-%y%m%d-%H%M%S'
sysvals.testdir = os.path.join(sysvals.outdir, datetime.now().strftime(fmt))
ret = runTest(i+1, True)
failcnt = 0 if not ret else failcnt + 1
if sysvals.maxfail > 0 and failcnt >= sysvals.maxfail:
pprint('Maximum fail count of %d reached, aborting multitest' % (sysvals.maxfail))
break
time.sleep(5)
sysvals.resetlog()
sysvals.multistat(False, i, finish)
if 'time' in sysvals.multitest and datetime.now() >= finish:
break
if not sysvals.skiphtml:
runSummary(sysvals.outdir, False, False)
sysvals.sudoUserchown(sysvals.outdir)
else:
if sysvals.outdir:
sysvals.testdir = sysvals.outdir
# run the test in the current directory
ret = runTest()
# reset to default values after testing
if sysvals.display:
sysvals.displayControl('reset')
if sysvals.rs != 0:
sysvals.setRuntimeSuspend(False)
sys.exit(ret)
|
[] |
[] |
[
"USER",
"SUDO_USER"
] |
[]
|
["USER", "SUDO_USER"]
|
python
| 2 | 0 | |
pkg/operator/lib/logging/logging.go
|
/*
Copyright 2021 Cortex Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logging
import (
"os"
"sync"
"github.com/cortexlabs/cortex/pkg/types/userconfig"
"go.uber.org/zap"
)
var logger *zap.SugaredLogger
var loggerLock sync.Mutex
func initializeLogger() {
operatorLogLevel := os.Getenv("CORTEX_OPERATOR_LOG_LEVEL")
if operatorLogLevel == "" {
operatorLogLevel = "info"
}
operatorCortexLogLevel := userconfig.LogLevelFromString(operatorLogLevel)
if operatorCortexLogLevel == userconfig.UnknownLogLevel {
panic(ErrorInvalidOperatorLogLevel(operatorLogLevel, userconfig.LogLevelTypes()))
}
operatorZapConfig := DefaultZapConfig(operatorCortexLogLevel)
operatorLogger, err := operatorZapConfig.Build()
if err != nil {
panic(err)
}
logger = operatorLogger.Sugar()
}
func GetOperatorLogger() *zap.SugaredLogger {
loggerLock.Lock()
defer loggerLock.Unlock()
if logger == nil {
initializeLogger()
}
return logger
}
func DefaultZapConfig(level userconfig.LogLevel, fields ...map[string]interface{}) zap.Config {
encoderConfig := zap.NewProductionEncoderConfig()
encoderConfig.MessageKey = "message"
initialFields := map[string]interface{}{}
for _, m := range fields {
for k, v := range m {
initialFields[k] = v
}
}
return zap.Config{
Level: zap.NewAtomicLevelAt(userconfig.ToZapLogLevel(level)),
Encoding: "json",
EncoderConfig: encoderConfig,
OutputPaths: []string{"stdout"},
ErrorOutputPaths: []string{"stderr"},
InitialFields: map[string]interface{}{"labels": initialFields},
}
}
|
[
"\"CORTEX_OPERATOR_LOG_LEVEL\""
] |
[] |
[
"CORTEX_OPERATOR_LOG_LEVEL"
] |
[]
|
["CORTEX_OPERATOR_LOG_LEVEL"]
|
go
| 1 | 0 | |
tests/checkpointing/test_checkpoint_callback_frequency.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest import mock
import pytest
import torch
from pytorch_lightning import callbacks, seed_everything, Trainer
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf
@mock.patch.dict(os.environ, {"PL_DEV_DEBUG": "1"})
def test_mc_called(tmpdir):
seed_everything(1234)
# -----------------
# TRAIN LOOP ONLY
# -----------------
train_step_only_model = BoringModel()
train_step_only_model.validation_step = None
# no callback
trainer = Trainer(max_epochs=3, checkpoint_callback=False)
trainer.fit(train_step_only_model)
assert len(trainer.dev_debugger.checkpoint_callback_history) == 0
# -----------------
# TRAIN + VAL LOOP ONLY
# -----------------
val_train_model = BoringModel()
# no callback
trainer = Trainer(max_epochs=3, checkpoint_callback=False)
trainer.fit(val_train_model)
assert len(trainer.dev_debugger.checkpoint_callback_history) == 0
@mock.patch('torch.save')
@pytest.mark.parametrize(
['epochs', 'val_check_interval', 'expected'],
[(1, 1.0, 1), (2, 1.0, 2), (1, 0.25, 4), (2, 0.3, 7)],
)
def test_default_checkpoint_freq(save_mock, tmpdir, epochs: int, val_check_interval: float, expected: int):
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=epochs,
weights_summary=None,
val_check_interval=val_check_interval,
progress_bar_refresh_rate=0,
)
trainer.fit(model)
# make sure types are correct
assert save_mock.call_count == expected
@mock.patch('torch.save')
@pytest.mark.parametrize(['k', 'epochs', 'val_check_interval', 'expected'], [
(1, 1, 1.0, 1),
(2, 2, 1.0, 2),
(2, 1, 0.25, 4),
(2, 2, 0.3, 7),
])
def test_top_k(save_mock, tmpdir, k: int, epochs: int, val_check_interval: float, expected: int):
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.last_coeff = 10.0
def training_step(self, batch, batch_idx):
loss = self.step(torch.ones(32))
loss = loss / (loss + 0.0000001)
loss += self.last_coeff
self.log('my_loss', loss)
self.last_coeff *= 0.999
return loss
model = TestModel()
trainer = Trainer(
callbacks=[callbacks.ModelCheckpoint(dirpath=tmpdir, monitor='my_loss', save_top_k=k)],
default_root_dir=tmpdir,
max_epochs=epochs,
weights_summary=None,
val_check_interval=val_check_interval
)
trainer.fit(model)
# make sure types are correct
assert save_mock.call_count == expected
@mock.patch('torch.save')
@RunIf(special=True, min_gpus=2)
@pytest.mark.parametrize(['k', 'epochs', 'val_check_interval', 'expected'], [(1, 1, 1.0, 1), (2, 2, 0.3, 5)])
def test_top_k_ddp(save_mock, tmpdir, k, epochs, val_check_interval, expected):
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
local_rank = int(os.getenv("LOCAL_RANK"))
self.log('my_loss', batch_idx * (1 + local_rank), on_epoch=True)
return super().training_step(batch, batch_idx)
def training_epoch_end(self, outputs) -> None:
data = str(self.global_rank)
obj = [[data], (data, ), set(data)]
out = self.trainer.training_type_plugin.broadcast(obj)
assert obj == [[str(self.global_rank)], (str(self.global_rank), ), set(str(self.global_rank))]
assert out == [['0'], ('0', ), set('0')]
model = TestModel()
trainer = Trainer(
callbacks=[callbacks.ModelCheckpoint(dirpath=tmpdir, monitor='my_loss_step', save_top_k=k, mode="max")],
default_root_dir=tmpdir,
max_epochs=epochs,
weights_summary=None,
val_check_interval=val_check_interval,
accelerator="ddp",
gpus=2,
limit_train_batches=64,
limit_val_batches=32,
)
if os.getenv("LOCAL_RANK") == "0":
with pytest.raises(UserWarning, match="The value associated to the key my_loss_epoch: [15.5, 31.0]"):
trainer.fit(model)
assert save_mock.call_count == expected
else:
trainer.fit(model)
|
[] |
[] |
[
"LOCAL_RANK"
] |
[]
|
["LOCAL_RANK"]
|
python
| 1 | 0 | |
cmd/lambda-revocation-notifier/main.go
|
package main
import (
"bytes"
"context"
"io"
"os"
"sort"
"time"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ses"
awsservices "github.com/empathybroker/aws-vpn/pkg/aws"
"github.com/empathybroker/aws-vpn/pkg/pki"
awspki "github.com/empathybroker/aws-vpn/pkg/pki/aws"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
func init() {
if os.Getenv("DEBUG") == "true" {
log.SetLevel(log.DebugLevel)
}
log.SetFormatter(&log.JSONFormatter{
TimestampFormat: time.RFC3339Nano,
FieldMap: log.FieldMap{
log.FieldKeyTime: "@timestamp",
},
})
}
var (
sesClient = awsservices.NewSESClient()
pkiStorage = awspki.NewAWSStorage(awsservices.NewSecretsManagerClient(), awsservices.NewDynamoDBClient())
awsPKI = pki.NewPKI(pkiStorage)
)
func getEmailBody(w io.Writer, certs []*pki.CertificateInfo) error {
sort.Slice(certs, func(i, j int) bool {
return certs[i].NotAfter.Before(certs[j].NotAfter)
})
return tplEmail.Execute(w, bodyData{
Certificates: certs,
AdminURL: configNotifier.AdminURL,
HelpURL: configNotifier.HelpURL,
Signature: configNotifier.EmailSignature,
})
}
func sendNotificationEmail(ctx context.Context, to string, body string) error {
sesInput := &ses.SendEmailInput{
Source: aws.String(configNotifier.EmailFrom),
Destination: &ses.Destination{
ToAddresses: aws.StringSlice([]string{to}),
},
Message: &ses.Message{
Subject: &ses.Content{
Data: aws.String(configNotifier.EmailSubject),
Charset: aws.String("UTF-8"),
},
Body: &ses.Body{
Text: &ses.Content{
Data: aws.String(body),
Charset: aws.String("UTF-8"),
},
},
},
}
if configNotifier.EmailSourceArn != "" {
sesInput.SourceArn = aws.String(configNotifier.EmailSourceArn)
}
r, err := sesClient.SendEmailWithContext(ctx, sesInput)
if err != nil {
return errors.Wrap(err, "sending message with SES")
}
log.Infof("Sent message to %s with ID %s", to, aws.StringValue(r.MessageId))
return nil
}
func handler(ctx context.Context) error {
certs, err := awsPKI.ListCerts(ctx, "")
if err != nil {
log.WithError(err).Error("Error listing certificates")
return err
}
cutoffTime := time.Now().Add(time.Hour * 24 * time.Duration(configNotifier.DaysBefore)).UTC()
log.Debugf("Sending notifications to users with certs expiring before %s", cutoffTime.Format(time.RFC3339))
targets := make(map[string][]*pki.CertificateInfo)
for _, cert := range certs {
if cert.Revoked != nil {
continue
}
if cert.NotAfter.Before(cutoffTime) {
targets[cert.Subject] = append(targets[cert.Subject], cert)
}
}
for to, certs := range targets {
var body bytes.Buffer
if err := getEmailBody(&body, certs); err != nil {
log.WithError(err).Error("Error building email body from template")
continue
}
if err := sendNotificationEmail(ctx, to, body.String()); err != nil {
log.WithError(err).Error("Error sending email")
continue
}
}
log.Infof("Sent %d notification emails", len(targets))
return nil
}
func main() {
lambda.Start(handler)
}
|
[
"\"DEBUG\""
] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
go
| 1 | 0 | |
pmlc/runlit.cfg.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lit runner configuration."""
import os
import lit.formats
from lit.llvm import llvm_config
from lit.llvm.subst import ToolSubst
# Configuration file for the 'lit' test runner.
# name: The name of this test suite.
config.name = 'MLIR ' + os.path.basename(config.mlir_test_dir)
config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
# test_source_root: The root path where tests are located.
config.test_source_root = config.mlir_test_dir
# test_exec_root: The root path where tests should be run.
config.test_exec_root = os.environ['RUNFILES_DIR']
llvm_config.use_default_substitutions()
# Tweak the PATH to include the tools dir.
llvm_config.with_environment('PATH', config.llvm_tools_dir, append_path=True)
tool_dirs = config.mlir_tf_tools_dirs + [config.mlir_tools_dir, config.llvm_tools_dir]
tool_names = [
'mlir-opt',
'mlir-translate',
'pmlc-opt',
'pmlc-translate',
]
tools = [ToolSubst(s, unresolved='ignore') for s in tool_names]
llvm_config.add_tool_substitutions(tools, tool_dirs)
|
[] |
[] |
[
"RUNFILES_DIR"
] |
[]
|
["RUNFILES_DIR"]
|
python
| 1 | 0 | |
Angry Professor.py
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the angryProfessor function below.
def angryProfessor(k, a):
early_students = 0
for student in a:
if student <= 0:
early_students+=1
return 'YES' if early_students < k else 'NO'
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
nk = input().split()
n = int(nk[0])
k = int(nk[1])
a = list(map(int, input().rstrip().split()))
result = angryProfessor(k, a)
fptr.write(result + '\n')
fptr.close()
|
[] |
[] |
[
"OUTPUT_PATH"
] |
[]
|
["OUTPUT_PATH"]
|
python
| 1 | 0 | |
backend/auth/auth.go
|
package auth
import (
"backend/models"
"backend/utils"
"bytes"
"context"
"fmt"
"io"
"net/http"
"os"
"strings"
"github.com/dgrijalva/jwt-go"
)
//JwtAuthentication authenticates the received JWT token
var JwtAuthentication = func(next http.Handler) http.Handler {
return http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
requestPath := request.URL.Path
//auth is the list of paths that requires authentication
auth := []string{
"/dashboard",
"/api/v1/search",
}
// If the current path is not in the list of auth routes, we can serve the http.
requireAuth := false
for _, value := range auth {
if value == requestPath {
requireAuth = true
break
}
}
if !requireAuth {
// if requestPath == "/api/v1/login" {
// // Set headers everytime login api is hit
// // rw := &responsewriter{w: writer}
// // next.ServeHTTP(rw, request)
// return
// }
next.ServeHTTP(writer, request)
return
}
//other wise it requires authentication
response := make(map[string]interface{})
tokenHeader := request.Header.Get("Authorization")
if tokenHeader == "" {
response = utils.Message(false, "Missing auth token")
writer.WriteHeader(http.StatusForbidden)
writer.Header().Add("Content-Type", "application/json")
utils.Respond(writer, response)
return
}
splitted := strings.Split(tokenHeader, " ") //The token normally comes in format `Bearer {token-body}`, we check if the retrieved token matched this requirement
if len(splitted) != 2 {
response = utils.Message(false, "Invalid/Malformed auth token")
writer.WriteHeader(http.StatusForbidden)
writer.Header().Add("Content-Type", "application/json")
utils.Respond(writer, response)
return
}
tokenPart := splitted[1] // the information that we're interested in
tk := &models.Token{}
token, err := jwt.ParseWithClaims(tokenPart, tk, func(token *jwt.Token) (interface{}, error) {
return []byte(os.Getenv("token_password")), nil
})
//malformed token, return 403
if err != nil {
response = utils.Message(false, "Malformed auth token")
writer.WriteHeader(http.StatusForbidden)
writer.Header().Add("Content-Type", "application/json")
utils.Respond(writer, response)
return
}
//token is invalid
if !token.Valid {
response = utils.Message(false, "Token is invalid")
writer.WriteHeader(http.StatusForbidden)
writer.Header().Add("Content-Type", "application/json")
utils.Respond(writer, response)
return
}
//everything went well
fmt.Sprintf("User ", tk.UserName)
// Set the cookie
ctx := context.WithValue(request.Context(), "user", tk.UserID)
request = request.WithContext(ctx)
next.ServeHTTP(writer, request)
})
}
type responsewriter struct {
w http.ResponseWriter
buf bytes.Buffer
code int
}
func (rw *responsewriter) Header() http.Header {
return rw.w.Header()
}
func (rw *responsewriter) WriteHeader(statusCode int) {
rw.code = statusCode
}
func (rw *responsewriter) Write(data []byte) (int, error) {
return rw.buf.Write(data)
}
func (rw *responsewriter) Done() (int64, error) {
if rw.code > 0 {
rw.w.WriteHeader(rw.code)
}
return io.Copy(rw.w, &rw.buf)
}
|
[
"\"token_password\""
] |
[] |
[
"token_password"
] |
[]
|
["token_password"]
|
go
| 1 | 0 | |
thor/main.py
|
import os
import time
import uuid
import yaml
import logging
import shutil
import numpy as np
import pandas as pd
import multiprocessing as mp
from functools import partial
from astropy.time import Time
from .config import Config
from .config import Configuration
from .clusters import find_clusters, filter_clusters_by_length
from .cell import Cell
from .orbit import TestOrbit
from .orbits import Orbits
from .orbits import generateEphemeris
from .orbits import initialOrbitDetermination
from .orbits import differentialCorrection
from .orbits import mergeAndExtendOrbits
from .observatories import getObserverState
from .utils import _initWorker
from .utils import _checkParallel
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'
logger = logging.getLogger("thor")
__all__ = [
"rangeAndShift_worker",
"rangeAndShift",
"clusterVelocity",
"clusterVelocity_worker",
"clusterAndLink",
"runTHOROrbit",
"runTHOR",
]
def rangeAndShift_worker(observations, ephemeris, cell_area=10):
assert len(observations["mjd_utc"].unique()) == 1
assert len(ephemeris["mjd_utc"].unique()) == 1
assert observations["mjd_utc"].unique()[0] == ephemeris["mjd_utc"].unique()[0]
observation_time = observations["mjd_utc"].unique()[0]
# Create Cell centered on the sky-plane location of the
# test orbit
cell = Cell(
ephemeris[["RA_deg", "Dec_deg"]].values[0],
observation_time,
area=cell_area,
)
# Grab observations within cell
cell.getObservations(observations)
if len(cell.observations) != 0:
# Create test orbit with state of orbit at visit time
test_orbit = TestOrbit(
ephemeris[["obj_x", "obj_y", "obj_z", "obj_vx", "obj_vy", "obj_vz"]].values[0],
observation_time
)
# Prepare rotation matrices
test_orbit.prepare()
# Apply rotation matrices and transform observations into the orbit's
# frame of motion.
test_orbit.applyToObservations(cell.observations)
projected_observations = cell.observations
else:
projected_observations = pd.DataFrame()
return projected_observations
def clusterVelocity(
obs_ids,
x,
y,
dt,
vx,
vy,
eps=0.005,
min_obs=5,
min_arc_length=1.0,
alg="hotspot_2d",
):
"""
Clusters THOR projection with different velocities
in the projection plane using `~scipy.cluster.DBSCAN`.
Parameters
----------
obs_ids : `~numpy.ndarray' (N)
Observation IDs.
x : `~numpy.ndarray' (N)
Projection space x coordinate in degrees or radians.
y : `~numpy.ndarray' (N)
Projection space y coordinate in degrees or radians.
dt : `~numpy.ndarray' (N)
Change in time from 0th exposure in units of MJD.
vx : `~numpy.ndarray' (N)
Projection space x velocity in units of degrees or radians per day in MJD.
vy : `~numpy.ndarray' (N)
Projection space y velocity in units of degrees or radians per day in MJD.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
See: http://scikit-learn.org/stable/modules/generated/sklearn.cluster.dbscan.html
[Default = 0.005]
min_obs : int, optional
The number of samples (or total weight) in a neighborhood for a
point to be considered as a core point. This includes the point itself.
See: http://scikit-learn.org/stable/modules/generated/sklearn.cluster.dbscan.html
[Default = 5]
min_arc_length : float, optional
Minimum arc length in units of days for a cluster to be accepted.
Returns
-------
list
If clusters are found, will return a list of numpy arrays containing the
observation IDs for each cluster. If no clusters are found, will return np.NaN.
"""
logger.debug(f"cluster: vx={vx} vy={vy} n_obs={len(obs_ids)}")
xx = x - vx * dt
yy = y - vy * dt
X = np.stack((xx, yy), 1)
clusters = find_clusters(X, eps, min_obs, alg=alg)
clusters = filter_clusters_by_length(
clusters, dt, min_obs, min_arc_length,
)
cluster_ids = []
for cluster in clusters:
cluster_ids.append(obs_ids[cluster])
if len(cluster_ids) == 0:
cluster_ids = np.NaN
return cluster_ids
def clusterVelocity_worker(
vx,
vy,
obs_ids=None,
x=None,
y=None,
dt=None,
eps=None,
min_obs=None,
min_arc_length=None,
alg=None
):
"""
Helper function to multiprocess clustering.
"""
cluster_ids = clusterVelocity(
obs_ids,
x,
y,
dt,
vx,
vy,
eps=eps,
min_obs=min_obs,
min_arc_length=min_arc_length,
alg=alg
)
return cluster_ids
def rangeAndShift(
observations,
orbit,
cell_area=10,
backend="PYOORB",
backend_kwargs={},
num_jobs=1,
parallel_backend="mp"
):
"""
Propagate the orbit to all observation times in observations. At each epoch gather a circular region of observations of size cell_area
centered about the location of the orbit on the sky-plane. Transform and project each of the gathered observations into
the frame of motion of the test orbit.
Parameters
----------
observations : `~pandas.DataFrame`
DataFrame containing preprocessed observations.
Should contain the following columns:
obs_id : observation IDs
RA_deg : Right Ascension in degrees.
Dec_deg : Declination in degrees.
RA_sigma_deg : 1-sigma uncertainty for Right Ascension in degrees.
Dec_sigma_deg : 1-sigma uncertainty for Declination in degrees.
observatory_code : MPC observatory code
orbit : `~numpy.ndarray` (6)
Orbit to propagate. If backend is 'THOR', then these orbits must be expressed
as heliocentric ecliptic cartesian elements. If backend is 'PYOORB' orbits may be
expressed in keplerian, cometary or cartesian elements.
cell_area : float, optional
Cell's area in units of square degrees.
[Default = 10]
backend : {'THOR', 'PYOORB'}, optional
Which backend to use.
backend_kwargs : dict, optional
Settings and additional parameters to pass to selected
backend.
num_jobs : int, optional
Number of jobs to launch.
parallel_backend : str, optional
Which parallelization backend to use {'ray', 'mp'}. Defaults to using Python's multiprocessing
module ('mp').
Returns
-------
projected_observations : {`~pandas.DataFrame`, -1}
Observations dataframe (from cell.observations) with columns containing
projected coordinates.
"""
time_start = time.time()
logger.info("Running range and shift...")
logger.info("Assuming r = {} au".format(orbit.cartesian[0, :3]))
logger.info("Assuming v = {} au per day".format(orbit.cartesian[0, 3:]))
# Build observers dictionary: keys are observatory codes with exposure times (as astropy.time objects)
# as values
observers = {}
for code in observations["observatory_code"].unique():
observers[code] = Time(
observations[observations["observatory_code"].isin([code])]["mjd_utc"].unique(),
format="mjd",
scale="utc"
)
# Propagate test orbit to all times in observations
ephemeris = generateEphemeris(
orbit,
observers,
backend=backend,
backend_kwargs=backend_kwargs,
chunk_size=1,
num_jobs=1,
parallel_backend=parallel_backend
)
if backend == "FINDORB":
observer_states = []
for observatory_code, observation_times in observers.items():
observer_states.append(
getObserverState(
[observatory_code],
observation_times,
frame='ecliptic',
origin='heliocenter',
)
)
observer_states = pd.concat(observer_states)
observer_states.reset_index(
inplace=True,
drop=True
)
ephemeris = ephemeris.join(observer_states[["obs_x", "obs_y", "obs_z", "obs_vx", "obs_vy", "obs_vz"]])
velocity_cols = []
if backend != "PYOORB":
velocity_cols = ["obs_vx", "obs_vy", "obs_vz"]
observations = observations.merge(
ephemeris[["mjd_utc", "observatory_code", "obs_x", "obs_y", "obs_z"] + velocity_cols],
left_on=["mjd_utc", "observatory_code"],
right_on=["mjd_utc", "observatory_code"]
)
# Split the observations into a single dataframe per unique observatory code and observation time
# Basically split the observations into groups of unique exposures
observations_grouped = observations.groupby(by=["observatory_code", "mjd_utc"])
observations_split = [observations_grouped.get_group(g) for g in observations_grouped.groups]
# Do the same for the test orbit's ephemerides
ephemeris_grouped = ephemeris.groupby(by=["observatory_code", "mjd_utc"])
ephemeris_split = [ephemeris_grouped.get_group(g) for g in ephemeris_grouped.groups]
parallel, num_workers = _checkParallel(num_jobs, parallel_backend)
if parallel:
if parallel_backend == "ray":
import ray
if not ray.is_initialized():
ray.init(address="auto")
rangeAndShift_worker_ray = ray.remote(rangeAndShift_worker)
rangeAndShift_worker_ray = rangeAndShift_worker_ray.options(
num_returns=1,
num_cpus=1
)
p = []
for observations_i, ephemeris_i in zip(observations_split, ephemeris_split):
p.append(
rangeAndShift_worker_ray.remote(
observations_i,
ephemeris_i,
cell_area=cell_area
)
)
projected_dfs = ray.get(p)
else: # parallel_backend == "mp"
p = mp.Pool(
processes=num_workers,
initializer=_initWorker,
)
projected_dfs = p.starmap(
partial(
rangeAndShift_worker,
cell_area=cell_area
),
zip(
observations_split,
ephemeris_split,
)
)
p.close()
else:
projected_dfs = []
for observations_i, ephemeris_i in zip(observations_split, ephemeris_split):
projected_df = rangeAndShift_worker(
observations_i,
ephemeris_i,
cell_area=cell_area
)
projected_dfs.append(projected_df)
projected_observations = pd.concat(projected_dfs)
if len(projected_observations) > 0:
projected_observations.sort_values(by=["mjd_utc", "observatory_code"], inplace=True)
projected_observations.reset_index(inplace=True, drop=True)
else:
projected_observations = pd.DataFrame(
columns=[
'obs_id', 'mjd_utc', 'RA_deg', 'Dec_deg', 'RA_sigma_deg',
'Dec_sigma_deg', 'observatory_code', 'obs_x', 'obs_y', 'obs_z', 'obj_x',
'obj_y', 'obj_z', 'theta_x_deg', 'theta_y_deg'
]
)
time_end = time.time()
logger.info("Found {} observations.".format(len(projected_observations)))
logger.info("Range and shift completed in {:.3f} seconds.".format(time_end - time_start))
return projected_observations
def clusterAndLink(
observations,
vx_range=[-0.1, 0.1],
vy_range=[-0.1, 0.1],
vx_bins=100,
vy_bins=100,
vx_values=None,
vy_values=None,
eps=0.005,
min_obs=5,
min_arc_length=1.0,
alg="dbscan",
num_jobs=1,
parallel_backend="mp"
):
"""
Cluster and link correctly projected (after ranging and shifting)
detections.
Parameters
----------
observations : `~pandas.DataFrame`
DataFrame containing post-range and shift observations.
vx_range : {None, list or `~numpy.ndarray` (2)}
Maximum and minimum velocity range in x.
Will not be used if vx_values are specified.
[Default = [-0.1, 0.1]]
vy_range : {None, list or `~numpy.ndarray` (2)}
Maximum and minimum velocity range in y.
Will not be used if vy_values are specified.
[Default = [-0.1, 0.1]]
vx_bins : int, optional
Length of x-velocity grid between vx_range[0]
and vx_range[-1]. Will not be used if vx_values are
specified.
[Default = 100]
vy_bins: int, optional
Length of y-velocity grid between vy_range[0]
and vy_range[-1]. Will not be used if vy_values are
specified.
[Default = 100]
vx_values : {None, `~numpy.ndarray`}, optional
Values of velocities in x at which to cluster
and link.
[Default = None]
vy_values : {None, `~numpy.ndarray`}, optional
Values of velocities in y at which to cluster
and link.
[Default = None]
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
See: http://scikit-learn.org/stable/modules/generated/sklearn.cluster.dbscan.html
[Default = 0.005]
min_obs : int, optional
The number of samples (or total weight) in a neighborhood for a
point to be considered as a core point. This includes the point itself.
See: http://scikit-learn.org/stable/modules/generated/sklearn.cluster.dbscan.html
[Default = 5]
alg: str
Algorithm to use. Can be "dbscan" or "hotspot_2d".
num_jobs : int, optional
Number of jobs to launch.
parallel_backend : str, optional
Which parallelization backend to use {'ray', 'mp'}. Defaults to using Python's multiprocessing
module ('mp').
Returns
-------
clusters : `~pandas.DataFrame`
DataFrame with the cluster ID, the number of observations, and the x and y velocity.
cluster_members : `~pandas.DataFrame`
DataFrame containing the cluster ID and the observation IDs of its members.
Notes
-----
The algorithm chosen can have a big impact on performance and accuracy.
alg="dbscan" uses the DBSCAN algorithm of Ester et. al. It's relatively slow
but works with high accuracy; it is certain to find all clusters with at
least min_obs points that are separated by at most eps.
alg="hotspot_2d" is much faster (perhaps 10-20x faster) than dbscan, but it
may miss some clusters, particularly when points are spaced a distance of 'eps'
apart.
"""
time_start_cluster = time.time()
logger.info("Running velocity space clustering...")
# Extract useful quantities
obs_ids = observations["obs_id"].values
theta_x = observations["theta_x_deg"].values
theta_y = observations["theta_y_deg"].values
mjd = observations["mjd_utc"].values
# Select detections in first exposure
first = np.where(mjd == mjd.min())[0]
mjd0 = mjd[first][0]
dt = mjd - mjd0
if vx_values is None and vx_range is not None:
vx = np.linspace(*vx_range, num=vx_bins)
elif vx_values is None and vx_range is None:
raise ValueError("Both vx_values and vx_range cannot be None.")
else:
vx = vx_values
vx_range = [vx_values[0], vx_values[-1]]
vx_bins = len(vx)
if vy_values is None and vy_range is not None:
vy = np.linspace(*vy_range, num=vy_bins)
elif vy_values is None and vy_range is None:
raise ValueError("Both vy_values and vy_range cannot be None.")
else:
vy = vy_values
vy_range = [vy_values[0], vy_values[-1]]
vy_bins = len(vy)
if vx_values is None and vy_values is None:
vxx, vyy = np.meshgrid(vx, vy)
vxx = vxx.flatten()
vyy = vyy.flatten()
elif vx_values is not None and vy_values is not None:
vxx = vx
vyy = vy
else:
raise ValueError("")
logger.debug("X velocity range: {}".format(vx_range))
if vx_values is not None:
logger.debug("X velocity values: {}".format(vx_bins))
else:
logger.debug("X velocity bins: {}".format(vx_bins))
logger.debug("Y velocity range: {}".format(vy_range))
if vy_values is not None:
logger.debug("Y velocity values: {}".format(vy_bins))
else:
logger.debug("Y velocity bins: {}".format(vy_bins))
if vx_values is not None:
logger.debug("User defined x velocity values: True")
else:
logger.debug("User defined x velocity values: False")
if vy_values is not None:
logger.debug("User defined y velocity values: True")
else:
logger.debug("User defined y velocity values: False")
if vx_values is None and vy_values is None:
logger.debug("Velocity grid size: {}".format(vx_bins * vy_bins))
else:
logger.debug("Velocity grid size: {}".format(vx_bins))
logger.info("Max sample distance: {}".format(eps))
logger.info("Minimum samples: {}".format(min_obs))
possible_clusters = []
parallel, num_workers = _checkParallel(num_jobs, parallel_backend)
if parallel:
if parallel_backend == "ray":
import ray
if not ray.is_initialized():
ray.init(address="auto")
clusterVelocity_worker_ray = ray.remote(clusterVelocity_worker)
clusterVelocity_worker_ray = clusterVelocity_worker_ray.options(
num_returns=1,
num_cpus=1
)
# Put all arrays (which can be large) in ray's
# local object store ahead of time
obs_ids_oid = ray.put(obs_ids)
theta_x_oid = ray.put(theta_x)
theta_y_oid = ray.put(theta_y)
dt_oid = ray.put(dt)
p = []
for vxi, vyi in zip(vxx, vyy):
p.append(
clusterVelocity_worker_ray.remote(
vxi,
vyi,
obs_ids=obs_ids_oid,
x=theta_x_oid,
y=theta_y_oid,
dt=dt_oid,
eps=eps,
min_obs=min_obs,
min_arc_length=min_arc_length,
alg=alg
)
)
possible_clusters = ray.get(p)
else: # parallel_backend == "mp"
p = mp.Pool(
processes=num_workers,
initializer=_initWorker
)
possible_clusters = p.starmap(
partial(
clusterVelocity_worker,
obs_ids=obs_ids,
x=theta_x,
y=theta_y,
dt=dt,
eps=eps,
min_obs=min_obs,
min_arc_length=min_arc_length,
alg=alg
),
zip(vxx, vyy)
)
p.close()
else:
possible_clusters = []
for vxi, vyi in zip(vxx, vyy):
possible_clusters.append(
clusterVelocity(
obs_ids,
theta_x,
theta_y,
dt,
vxi,
vyi,
eps=eps,
min_obs=min_obs,
min_arc_length=min_arc_length,
alg=alg
)
)
time_end_cluster = time.time()
logger.info("Clustering completed in {:.3f} seconds.".format(time_end_cluster - time_start_cluster))
logger.info("Restructuring clusters...")
time_start_restr = time.time()
possible_clusters = pd.DataFrame({"clusters": possible_clusters})
# Remove empty clusters
possible_clusters = possible_clusters[~possible_clusters["clusters"].isna()]
if len(possible_clusters) != 0:
### The following code is a little messy, its a lot of pandas dataframe manipulation.
### I have tried doing an overhaul wherein the clusters and cluster_members dataframe are created per
### velocity combination in the clusterVelocity function. However, this adds an overhead in that function
### of ~ 1ms. So clustering 90,000 velocities takes 90 seconds longer which on small datasets is problematic.
### On large datasets, the effect is not as pronounced because the below code takes a while to run due to
### in-memory pandas dataframe restructuring.
# Make DataFrame with cluster velocities so we can figure out which
# velocities yielded clusters, add names to index so we can enable the join
cluster_velocities = pd.DataFrame({"vtheta_x": vxx, "vtheta_y": vyy})
cluster_velocities.index.set_names("velocity_id", inplace=True)
# Split lists of cluster ids into one column per cluster for each different velocity
# then stack the result
possible_clusters = pd.DataFrame(
possible_clusters["clusters"].values.tolist(),
index=possible_clusters.index
)
possible_clusters = pd.DataFrame(possible_clusters.stack())
possible_clusters.rename(
columns={0: "obs_ids"},
inplace=True
)
possible_clusters = pd.DataFrame(possible_clusters["obs_ids"].values.tolist(), index=possible_clusters.index)
# Drop duplicate clusters
possible_clusters.drop_duplicates(inplace=True)
# Set index names
possible_clusters.index.set_names(["velocity_id", "cluster_id"], inplace=True)
# Reset index
possible_clusters.reset_index(
"cluster_id",
drop=True,
inplace=True
)
possible_clusters["cluster_id"] = [str(uuid.uuid4().hex) for i in range(len(possible_clusters))]
# Make clusters DataFrame
clusters = possible_clusters.join(cluster_velocities)
clusters.reset_index(drop=True, inplace=True)
clusters = clusters[["cluster_id", "vtheta_x", "vtheta_y"]]
# Make cluster_members DataFrame
cluster_members = possible_clusters.reset_index(drop=True).copy()
cluster_members.index = cluster_members["cluster_id"]
cluster_members.drop("cluster_id", axis=1, inplace=True)
cluster_members = pd.DataFrame(cluster_members.stack())
cluster_members.rename(columns={0: "obs_id"}, inplace=True)
cluster_members.reset_index(inplace=True)
cluster_members.drop("level_1", axis=1, inplace=True)
# Calculate arc length and add it to the clusters dataframe
cluster_members_time = cluster_members.merge(
observations[["obs_id", "mjd_utc"]],
on="obs_id",
how="left"
)
clusters_time = cluster_members_time.groupby(
by=["cluster_id"])["mjd_utc"].apply(lambda x: x.max() - x.min()).to_frame()
clusters_time.reset_index(
inplace=True
)
clusters_time.rename(
columns={"mjd_utc" : "arc_length"},
inplace=True
)
clusters = clusters.merge(
clusters_time[["cluster_id", "arc_length"]],
on="cluster_id",
how="left",
)
else:
cluster_members = pd.DataFrame(columns=["cluster_id", "obs_id"])
clusters = pd.DataFrame(columns=["cluster_id", "vtheta_x", "vtheta_y", "arc_length"])
time_end_restr = time.time()
logger.info("Restructuring completed in {:.3f} seconds.".format(time_end_restr - time_start_restr))
logger.info("Found {} clusters.".format(len(clusters)))
logger.info("Clustering and restructuring completed in {:.3f} seconds.".format(time_end_restr - time_start_cluster))
return clusters, cluster_members
def runTHOROrbit(
preprocessed_observations,
orbit,
range_shift_config=Config.RANGE_SHIFT_CONFIG,
cluster_link_config=Config.CLUSTER_LINK_CONFIG,
iod_config=Config.IOD_CONFIG,
od_config=Config.OD_CONFIG,
odp_config=Config.ODP_CONFIG,
out_dir=None,
if_exists="continue",
logging_level=logging.INFO
):
logger = logging.getLogger("thor")
logger.setLevel(logging_level)
# Build the configuration class which stores the run parameters
config = Configuration(
range_shift_config=range_shift_config,
cluster_link_config=cluster_link_config,
iod_config=iod_config,
od_config=od_config,
odp_config=odp_config
)
status = {
"rangeAndShift" : False,
"clusterAndLink" : False,
"initialOrbitDetermination" : False,
"differentialCorrection" : False,
"mergeAndExtendOrbits" : False,
"complete" : False
}
continue_ = False
if out_dir is not None:
if not os.path.exists(out_dir):
os.mkdir(out_dir)
logger.debug("Created {} directory.".format(out_dir))
else:
if if_exists == "continue":
logger.warning("{} directory already exists, attempting to continue previous run.".format(out_dir))
continue_ = True
elif if_exists == "erase":
logger.warning("{} directory already exists, removing previous results.".format(out_dir))
shutil.rmtree(out_dir)
os.mkdir(out_dir)
logger.debug("Created {} directory.".format(out_dir))
else:
err = (
"if_exists should be one of {'continue', 'erase'}."
)
raise ValueError(err)
file_handler = logging.FileHandler(
os.path.join(out_dir, "thor.log"),
encoding="utf-8",
delay=False
)
file_handler.setLevel(logging.DEBUG)
file_format = logging.Formatter(
'%(asctime)s.%(msecs)03d [%(levelname)s] [%(thread)s] %(message)s (%(filename)s, %(funcName)s, %(lineno)d)',
datefmt='%Y-%m-%d %H:%M:%S'
)
file_handler.setFormatter(file_format)
logger.addHandler(file_handler)
# The primary files which will be used to determine if the run
# can be continued from a previous state and, if so, from where
# to continue the run
config_file = os.path.join(out_dir, "config.yml")
test_orbit_file = os.path.join(out_dir, "test_orbit.csv")
status_file = os.path.join(out_dir, "status.yml")
config_eq = False
test_orbit_eq = False
save_orbit = True
save_config = True
if continue_:
if not os.path.exists(config_file):
logger.warning("No previous configuration file found.")
save_config = True
else:
logger.info("Previous configuration file found. Comparing settings...")
config_prev = Configuration.fromYaml(config_file)
if config_prev != config:
logger.warning("Previous configuration does not match current configuration. Processing will not continue from previous state.")
else:
config_eq = True
save_config = False
logger.info("Previous configuration matches current configuration.")
if not os.path.exists(test_orbit_file):
logger.warning("No previous test orbit file found.")
save_orbit = True
else:
logger.info("Previous test orbit file found.")
test_orbit_prev = Orbits.from_csv(
test_orbit_file,
)
if test_orbit_prev != orbit:
logger.warning("Previous test orbit does not match current test orbit.")
else:
test_orbit_eq = True
save_orbit = False
logger.info("Previous test orbit matches current test orbit.")
if not os.path.exists(status_file):
logger.warning("No previous status file found.")
else:
if test_orbit_eq and config_eq:
with open(status_file, "r") as status_in:
status = yaml.load(status_in, Loader=yaml.FullLoader)
logger.info("Previous status file found.")
if save_config:
config.toYaml(config_file)
logger.debug("Saved config.yml.")
if save_orbit:
orbit.to_csv(
test_orbit_file
)
logger.debug("Saved test_orbit.csv.")
if status["complete"]:
logger.info("Orbit has already finished processing.")
if not status["complete"]:
if not status["rangeAndShift"]:
projected_observations = rangeAndShift(
preprocessed_observations,
orbit,
**range_shift_config
)
if out_dir is not None:
projected_observations.to_csv(
os.path.join(out_dir, "projected_observations.csv"),
index=False,
float_format="%.15e"
)
logger.debug("Saved projected_observations.csv.")
else:
logger.info("Range and shift completed previously.")
projected_observations = pd.read_csv(
os.path.join(out_dir, "projected_observations.csv"),
index_col=False,
dtype={"obs_id" : str},
float_precision="round_trip"
)
logger.debug("Read projected_observations.csv.")
status["rangeAndShift"] = True
if out_dir is not None:
with open(status_file, "w") as status_out:
yaml.safe_dump(status, status_out)
logger.debug("Updated status.yml.")
if not status["clusterAndLink"]:
clusters, cluster_members = clusterAndLink(
projected_observations,
**cluster_link_config
)
if out_dir is not None:
clusters.to_csv(
os.path.join(out_dir, "clusters.csv"),
index=False,
float_format="%.15e"
)
logger.debug("Saved clusters.csv.")
cluster_members.to_csv(
os.path.join(out_dir, "cluster_members.csv"),
index=False,
float_format="%.15e"
)
logger.debug("Saved cluster_members.csv.")
else:
logger.info("Clustering completed previously.")
clusters = pd.read_csv(
os.path.join(out_dir, "clusters.csv"),
index_col=False
)
logger.debug("Read clusters.csv.")
cluster_members = pd.read_csv(
os.path.join(out_dir, "cluster_members.csv"),
index_col=False,
dtype={"obs_id" : str},
float_precision="round_trip"
)
logger.debug("Read cluster_members.csv.")
status["clusterAndLink"] = True
if out_dir is not None:
with open(status_file, "w") as status_out:
yaml.safe_dump(status, status_out)
logger.debug("Updated status.yml.")
if not status["initialOrbitDetermination"]:
iod_orbits, iod_orbit_members = initialOrbitDetermination(
projected_observations,
cluster_members,
**iod_config
)
if out_dir is not None:
Orbits.from_df(iod_orbits).to_csv(
os.path.join(out_dir, "iod_orbits.csv")
)
logger.debug("Saved iod_orbits.csv.")
iod_orbit_members.to_csv(
os.path.join(out_dir, "iod_orbit_members.csv"),
index=False,
float_format="%.15e"
)
logger.debug("Saved iod_orbit_members.csv.")
else:
logger.info("Initial orbit determination completed previously.")
iod_orbits = Orbits.from_csv(
os.path.join(out_dir, "iod_orbits.csv"),
).to_df(include_units=False)
logger.debug("Read iod_orbits.csv.")
iod_orbit_members = pd.read_csv(
os.path.join(out_dir, "iod_orbit_members.csv"),
index_col=False,
dtype={"obs_id" : str},
float_precision="round_trip"
)
logger.debug("Read iod_orbit_members.csv.")
status["initialOrbitDetermination"] = True
if out_dir is not None:
with open(status_file, "w") as status_out:
yaml.safe_dump(status, status_out)
logger.debug("Updated status.yml.")
iod_orbits = iod_orbits[["orbit_id", "epoch", "x", "y", "z", "vx", "vy", "vz"]]
iod_orbit_members = iod_orbit_members[iod_orbit_members["outlier"] == 0][["orbit_id", "obs_id"]]
iod_orbits = iod_orbits[iod_orbits["orbit_id"].isin(iod_orbit_members["orbit_id"].unique())]
for df in [iod_orbits, iod_orbit_members]:
df.reset_index(
inplace=True,
drop=True
)
if not status["differentialCorrection"]:
od_orbits, od_orbit_members = differentialCorrection(
iod_orbits,
iod_orbit_members,
projected_observations,
**od_config
)
if out_dir is not None:
Orbits.from_df(od_orbits).to_csv(
os.path.join(out_dir, "od_orbits.csv")
)
logger.debug("Saved od_orbits.csv.")
od_orbit_members.to_csv(
os.path.join(out_dir, "od_orbit_members.csv"),
index=False,
float_format="%.15e"
)
logger.debug("Saved od_orbit_members.csv.")
else:
logger.info("Differential correction completed previously.")
od_orbits = Orbits.from_csv(
os.path.join(out_dir, "od_orbits.csv"),
).to_df(include_units=False)
logger.debug("Read od_orbits.csv.")
od_orbit_members = pd.read_csv(
os.path.join(out_dir, "od_orbit_members.csv"),
index_col=False,
dtype={"obs_id" : str},
float_precision="round_trip"
)
logger.debug("Read od_orbit_members.csv.")
status["differentialCorrection"] = True
if out_dir is not None:
with open(status_file, "w") as status_out:
yaml.safe_dump(status, status_out)
logger.debug("Updated status.yml.")
od_orbit_members = od_orbit_members[od_orbit_members["outlier"] == 0][["orbit_id", "obs_id"]]
od_orbits = od_orbits[od_orbits["orbit_id"].isin(od_orbit_members["orbit_id"].unique())]
for df in [od_orbits, od_orbit_members]:
df.reset_index(
inplace=True,
drop=True
)
if not status["mergeAndExtendOrbits"]:
recovered_orbits, recovered_orbit_members = mergeAndExtendOrbits(
od_orbits,
od_orbit_members,
projected_observations,
**odp_config
)
if out_dir is not None:
Orbits.from_df(recovered_orbits).to_csv(
os.path.join(out_dir, "recovered_orbits.csv")
)
logger.debug("Saved recovered_orbits.csv.")
recovered_orbit_members.to_csv(
os.path.join(out_dir, "recovered_orbit_members.csv"),
index=False,
float_format="%.15e"
)
logger.debug("Saved recovered_orbit_members.csv.")
else:
logger.info("Orbit extension and merging completed previously.")
recovered_orbits = Orbits.from_csv(
os.path.join(out_dir, "recovered_orbits.csv"),
).to_df(include_units=False)
logger.debug("Read recovered_orbits.csv.")
recovered_orbit_members = pd.read_csv(
os.path.join(out_dir, "recovered_orbit_members.csv"),
index_col=False,
dtype={"obs_id" : str},
float_precision="round_trip"
)
logger.debug("Read recovered_orbit_members.csv.")
status["mergeAndExtendOrbits"] = True
status["complete"] = True
if out_dir is not None:
with open(status_file, "w") as status_out:
yaml.safe_dump(status, status_out)
logger.debug("Updated status.yml.")
else:
logger.info("Orbit previously completed processing.")
recovered_orbits = Orbits.from_csv(
os.path.join(out_dir, "recovered_orbits.csv"),
).to_df(include_units=False)
logger.debug("Read recovered_orbits.csv.")
recovered_orbit_members = pd.read_csv(
os.path.join(out_dir, "recovered_orbit_members.csv"),
index_col=False,
dtype={"obs_id" : str},
float_precision="round_trip"
)
logger.debug("Read recovered_orbit_members.csv.")
logger.removeHandler(file_handler)
return recovered_orbits, recovered_orbit_members
def runTHOR(
preprocessed_observations,
test_orbits,
range_shift_config=Config.RANGE_SHIFT_CONFIG,
cluster_link_config=Config.CLUSTER_LINK_CONFIG,
iod_config=Config.IOD_CONFIG,
od_config=Config.OD_CONFIG,
odp_config=Config.ODP_CONFIG,
out_dir=None,
if_exists="continue",
logging_level=logger.info
):
logger.setLevel(logging_level)
# Connect to ray cluster if enabled
enable_ray = False
configs = [
range_shift_config,
cluster_link_config,
iod_config,
od_config,
odp_config
]
for conf in configs:
if conf["parallel_backend"] == "ray":
enable_ray = True
if enable_ray:
import ray
if not ray.is_initialized():
ray.init(address="auto")
# Build the configuration class which stores the run parameters
config = Configuration(
range_shift_config=range_shift_config,
cluster_link_config=cluster_link_config,
iod_config=iod_config,
od_config=od_config,
odp_config=odp_config
)
orbits_completed = []
continue_ = False
if_exists_ = if_exists
if out_dir is not None:
if not os.path.exists(out_dir):
os.mkdir(out_dir)
logger.debug("Created {} directory.".format(out_dir))
else:
if if_exists == "continue":
logger.warning("{} directory already exists, attempting to continue previous run.".format(out_dir))
continue_ = True
elif if_exists == "erase":
logger.warning("{} directory already exists, removing previous results.".format(out_dir))
shutil.rmtree(out_dir)
os.mkdir(out_dir)
logger.debug("Created {} directory.".format(out_dir))
else:
err = (
"if_exists should be one of {'continue', 'erase'}."
)
raise ValueError(err)
# The primary files which will be used to determine if the run
# can be continued from a previous state and, if so, from where
# to continue the run
config_file = os.path.join(out_dir, "config.yml")
test_orbits_in_file = os.path.join(out_dir, "test_orbits_in.csv")
status_file = os.path.join(out_dir, "status.txt")
config_eq = False
test_orbits_eq = False
save_orbits = True
save_config = True
# Add summary file for test_orbits that tracks number of recovered orbits and number of observations
# linked in addition to the test_orbit_id used by THOR
test_orbits_out_file = os.path.join(out_dir, "test_orbits_out.csv")
if continue_:
if not os.path.exists(config_file):
logger.warning("No previous configuration file found.")
save_config = True
if_exists_ = "erase"
else:
logger.info("Previous configuration file found. Comparing settings...")
config_prev = Configuration.fromYaml(config_file)
if config_prev != config:
logger.warning("Previous configuration does not match current configuration. Processing will not continue from previous state.")
if_exists_ = "erase"
else:
config_eq = True
save_config = False
logger.info("Previous configuration matches current configuration.")
if not os.path.exists(test_orbits_in_file):
logger.warning("No previous test orbits file found.")
save_orbits = True
else:
logger.info("Previous test orbits file found.")
test_orbits_prev = Orbits.from_csv(
test_orbits_in_file,
)
if test_orbits_prev != test_orbits:
logger.warning("Previous test orbits do not match current test orbits.")
else:
test_orbits_eq = True
save_orbits = False
test_orbits_df = test_orbits_prev.to_df(include_units=False)
logger.info("Previous test orbits match current test orbits.")
if not os.path.exists(status_file):
logger.warning("No previous status file found.")
else:
if test_orbits_eq and config_eq:
orbits_completed = np.loadtxt(
os.path.join(out_dir, "status.txt"),
delimiter="\n",
dtype=str,
ndmin=1
)
logger.info("Previous status file found.")
if (not test_orbits_eq or not config_eq) and continue_:
if if_exists == "continue":
logger.critical("Previous run cannot continue from previous state.")
raise ValueError("Previous run cannot continue from previous state. Set if_exists to 'erase' or change/delete the output directory.")
elif if_exists == "erase":
shutil.rmtree(out_dir)
os.mkdir(out_dir)
logger.debug("Created {} directory.".format(out_dir))
else:
pass
if save_config:
config.toYaml(config_file)
logger.debug("Saved config.yml.")
if save_orbits:
test_orbits.to_csv(
test_orbits_in_file
)
logger.debug("Saved test_orbits_in.csv.")
preprocessed_observations.to_csv(
os.path.join(out_dir, "preprocessed_observations.csv"),
index=False,
float_format="%.15e"
)
logger.debug("Saved preprocessed_observations.csv.")
test_orbit_dfs = []
recovered_orbits_dfs = []
recovered_orbit_members_dfs = []
obs_ids_linked = []
num_orbits = len(test_orbits)
if num_orbits != len(orbits_completed):
test_orbits_split = test_orbits[len(orbits_completed):].split(1)
else:
test_orbits_split = []
# If orbits have previously completed, read the results and continue iterating
# through orbits not previously completed.
id_offset = 0
if len(orbits_completed) > 0:
logger.info("{}/{} orbits have previously finished processing.".format(len(orbits_completed), num_orbits))
test_orbits_df = Orbits.from_csv(
test_orbits_out_file,
).to_df(include_units=False)
logger.debug("Read previous test_orbits_out.csv.")
recovered_orbits = Orbits.from_csv(
os.path.join(out_dir, "recovered_orbits.csv"),
).to_df(include_units=False)
logger.debug("Read previous recovered_orbits.csv.")
recovered_orbit_members = pd.read_csv(
os.path.join(out_dir, "recovered_orbit_members.csv"),
index_col=False,
dtype={"obs_id" : str},
float_precision="round_trip"
)
logger.debug("Read previous recovered_orbit_members.csv.")
test_orbit_dfs = [test_orbits_df]
recovered_orbits_dfs = [recovered_orbits]
recovered_orbit_members_dfs = [recovered_orbit_members]
obs_ids_linked = recovered_orbit_members["obs_id"].values
id_offset = len(orbits_completed)
if len(test_orbits_split) != 0:
for i, orbit_i in enumerate(test_orbits_split):
time_start = time.time()
orbit_id = "{:08d}".format(i + id_offset)
logger.info("Processing orbit {} ({}/{})...".format(orbit_id, i + 1 + id_offset, num_orbits))
if out_dir is not None:
orbit_dir = os.path.join(out_dir, "orbit_{}".format(orbit_id))
else:
orbit_dir = None
linked_mask = (~preprocessed_observations["obs_id"].isin(obs_ids_linked))
recovered_orbits_i, recovered_orbit_members_i = runTHOROrbit(
preprocessed_observations[linked_mask],
orbit_i,
range_shift_config=range_shift_config,
cluster_link_config=cluster_link_config,
iod_config=iod_config,
od_config=od_config,
odp_config=odp_config,
out_dir=orbit_dir,
if_exists=if_exists_,
logging_level=logging_level
)
time_end = time.time()
if len(recovered_orbits_i) > 0:
recovered_orbits_i.insert(0, "test_orbit_id", orbit_id)
recovered_orbit_members_i.insert(0, "test_orbit_id", orbit_id)
obs_ids_linked_i = recovered_orbit_members_i["obs_id"].unique()
obs_ids_linked = np.concatenate([obs_ids_linked, obs_ids_linked_i])
orbits_recovered = len(recovered_orbits_i)
observations_linked = len(obs_ids_linked_i)
else:
orbits_recovered = 0
observations_linked = 0
test_orbit_i = orbit_i.to_df(include_units=False)
test_orbit_i["test_orbit_id"] = orbit_id
test_orbit_i["orbits_recovered"] = orbits_recovered
test_orbit_i["observations_linked"] = observations_linked
test_orbit_i["processing_time"] = time_end - time_start
test_orbit_dfs.append(test_orbit_i)
logger.info("Completed processing orbit {} in {:.3f} seconds.".format(orbit_id, time_end - time_start))
recovered_orbits_dfs.append(recovered_orbits_i)
recovered_orbit_members_dfs.append(recovered_orbit_members_i)
test_orbits_df = pd.concat(
test_orbit_dfs,
ignore_index=True
)
recovered_orbits = pd.concat(
recovered_orbits_dfs,
ignore_index=True
)
recovered_orbit_members = pd.concat(
recovered_orbit_members_dfs,
ignore_index=True
)
if out_dir is not None:
Orbits.from_df(test_orbits_df).to_csv(
test_orbits_out_file
)
logger.debug("Saved test_orbits_out.csv.")
Orbits.from_df(recovered_orbits).to_csv(
os.path.join(out_dir, "recovered_orbits.csv")
)
logger.debug("Saved recovered_orbits.csv.")
recovered_orbit_members.to_csv(
os.path.join(out_dir, "recovered_orbit_members.csv"),
index=False,
float_format="%.15e"
)
logger.debug("Saved recovered_orbit_members.csv.")
orbits_completed = np.concatenate([orbits_completed, np.array([orbit_id])])
if out_dir is not None:
with open(os.path.join(out_dir, "status.txt"), "w") as status_out:
np.savetxt(
status_out,
orbits_completed,
delimiter="\n",
fmt="%s"
)
logger.info("Saved status.txt.")
else:
logger.info("Run completed previously.")
test_orbits_df = Orbits.from_csv(
test_orbits_out_file,
).to_df(include_units=False)
logger.debug("Read test_orbits_out.csv.")
recovered_orbits = Orbits.from_csv(
os.path.join(out_dir, "recovered_orbits.csv"),
).to_df(include_units=False)
logger.debug("Read recovered_orbits.csv.")
recovered_orbit_members = pd.read_csv(
os.path.join(out_dir, "recovered_orbit_members.csv"),
index_col=False,
dtype={"obs_id" : str},
float_precision="round_trip"
)
logger.debug("Read recovered_orbit_members.csv.")
return test_orbits_df, recovered_orbits, recovered_orbit_members
|
[] |
[] |
[
"MKL_NUM_THREADS",
"OPENBLAS_NUM_THREADS"
] |
[]
|
["MKL_NUM_THREADS", "OPENBLAS_NUM_THREADS"]
|
python
| 2 | 0 | |
gitlabber/cli.py
|
import getpass
import os
import sys
import logging
import logging.handlers
import enum
from argparse import ArgumentParser, RawTextHelpFormatter, FileType, SUPPRESS
from .gitlab_tree import GitlabTree
from .format import PrintFormat
from .method import CloneMethod
from .naming import FolderNaming
from .archive import ArchivedResults
from . import __version__ as VERSION
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
log = logging.getLogger(__name__)
def main():
args = parse_args(argv=None if sys.argv[1:] else ['--help'])
if args.version:
print(VERSION)
sys.exit(0)
if args.token is None:
args.token = getpass.getpass(prompt='Gitlab API token: ')
if not args.token:
print('Please specify a valid token with the -t flag or the \'GITLAB_TOKEN\' environment variable')
sys.exit(1)
if args.url is None:
print('Please specify a valid gitlab base url with the -u flag or the \'GITLAB_URL\' environment variable')
sys.exit(1)
elif args.dest is None and args.print is False:
print('Please specify a destination for the gitlab tree')
sys.exit(1)
config_logging(args)
includes=split(args.include)
excludes=split(args.exclude)
tree = GitlabTree(args.url, args.token, args.method, args.naming, args.archived.api_value, includes,
excludes, args.file, args.concurrency, args.recursive, args.verbose)
log.debug("Reading projects tree from gitlab at [%s]", args.url)
tree.load_tree()
if tree.is_empty():
log.fatal("The tree is empty, check your include/exclude patterns or run with more verbosity for debugging")
sys.exit(1)
if args.print:
tree.print_tree(args.print_format)
else:
tree.sync_tree(args.dest)
def split(arg):
return arg.split(",") if arg != "" else None
def config_logging(args):
if args.verbose:
handler = logging.StreamHandler(sys.stdout)
logging.root.handlers = []
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logging.root.addHandler(handler)
level = logging.ERROR if args.print else logging.DEBUG
logging.root.setLevel(level)
log.debug("verbose=[%s], print=[%s], log level set to [%s] level", args.verbose, args.print, level)
os.environ["GIT_PYTHON_TRACE"] = 'full'
def parse_args(argv=None):
example_text = '''examples:
clone an entire gitlab tree using a url and a token:
gitlabber -t <personal access token> -u <gitlab url>
only print the gitlab tree:
gitlabber -p .
clone only projects under subgroup 'MySubGroup' to location '~/GitlabRoot':
gitlabber -i '/MyGroup/MySubGroup**' ~/GitlabRoot
clone only projects under group 'MyGroup' excluding any projects under subgroup 'MySubGroup':
gitlabber -i '/MyGroup**' -x '/MyGroup/MySubGroup**' .
clone an entire gitlab tree except projects under groups named 'ArchiveGroup':
gitlabber -x '/ArchiveGroup**' .
clone projects that start with a case insensitive 'w' using a regular expression:
gitlabber -i '/{[w].*}' .
'''
parser = ArgumentParser(
description='Gitlabber - clones or pulls entire groups/projects tree from gitlab',
prog="gitlabber",
epilog=example_text,
formatter_class=RawTextHelpFormatter)
parser.add_argument(
'dest',
nargs='?',
type=validate_path,
help='destination path for the cloned tree (created if doesn\'t exist)')
parser.add_argument(
'-t',
'--token',
metavar=('token'),
default=os.environ.get('GITLAB_TOKEN'),
help='gitlab personal access token https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html')
parser.add_argument(
'-u',
'--url',
metavar=('url'),
default=os.environ.get('GITLAB_URL'),
help='base gitlab url (e.g.: \'http://gitlab.mycompany.com\')')
parser.add_argument(
'--verbose',
action='store_true',
help='print more verbose output')
parser.add_argument(
'-f',
'--file',
metavar=('file'),
help=SUPPRESS)
parser.add_argument(
'-c',
'--concurrency',
default=os.environ.get('GITLABBER_GIT_CONCURRENCY', 1),
type=int,
metavar=('concurrency'),
help=SUPPRESS)
parser.add_argument(
'-p',
'--print',
action='store_true',
help='print the tree without cloning')
parser.add_argument(
'--print-format',
type=PrintFormat.argparse,
default=PrintFormat.TREE,
choices=list(PrintFormat),
help='print format (default: \'tree\')')
parser.add_argument(
'-n',
'--naming',
type=FolderNaming.argparse,
choices=list(FolderNaming),
default=FolderNaming.argparse(os.environ.get('GITLABBER_FOLDER_NAMING', "name")),
help='the folder naming strategy for projects from the gitlab API attributes (default: "name")')
parser.add_argument(
'-m',
'--method',
type=CloneMethod.argparse,
choices=list(CloneMethod),
default=os.environ.get('GITLABBER_CLONE_METHOD', "ssh"),
help='the git transport method to use for cloning (default: "ssh")')
parser.add_argument(
'-a',
'--archived',
type=ArchivedResults.argparse,
choices=list(ArchivedResults),
default=ArchivedResults.INCLUDE,
help='include archived projects and groups in the results (default: "include")')
parser.add_argument(
'-i',
'--include',
metavar=('csv'),
default=os.environ.get('GITLABBER_INCLUDE', ""),
help='comma delimited list of glob patterns of paths to projects or groups to clone/pull')
parser.add_argument(
'-x',
'--exclude',
metavar=('csv'),
default=os.environ.get('GITLABBER_EXCLUDE', ""),
help='comma delimited list of glob patterns of paths to projects or groups to exclude from clone/pull')
parser.add_argument(
'-r',
'--recursive',
action='store_true',
default=False,
help='clone/pull git submodules recursively')
parser.add_argument(
'--version',
action='store_true',
help='print the version')
args = parser.parse_args(argv)
args_print = vars(args).copy()
args_print['token'] = 'xxxxx'
log.debug("running with args [%s]", args_print)
return args
def validate_path(value):
if value.endswith('/'):
return value[:-1]
return value
|
[] |
[] |
[
"GITLABBER_EXCLUDE",
"GITLAB_URL",
"GITLABBER_CLONE_METHOD",
"GITLABBER_GIT_CONCURRENCY",
"GITLABBER_FOLDER_NAMING",
"GITLABBER_INCLUDE",
"GIT_PYTHON_TRACE",
"GITLAB_TOKEN"
] |
[]
|
["GITLABBER_EXCLUDE", "GITLAB_URL", "GITLABBER_CLONE_METHOD", "GITLABBER_GIT_CONCURRENCY", "GITLABBER_FOLDER_NAMING", "GITLABBER_INCLUDE", "GIT_PYTHON_TRACE", "GITLAB_TOKEN"]
|
python
| 8 | 0 | |
proxy/bootstrap/bootstrap_test.go
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package bootstrap
import (
"github.com/apache/servicecomb-mesher/proxy/cmd"
"github.com/apache/servicecomb-mesher/proxy/common"
_ "github.com/apache/servicecomb-mesher/proxy/pkg/egress/archaius"
"github.com/go-chassis/go-archaius"
"github.com/go-chassis/go-chassis/v2/core/config"
"github.com/go-chassis/go-chassis/v2/core/config/model"
"github.com/go-chassis/go-chassis/v2/core/lager"
"github.com/go-chassis/go-chassis/v2/pkg/util/fileutil"
"github.com/stretchr/testify/assert"
"io"
"os"
"path/filepath"
"sync"
"testing"
// rate limiter handler
_ "github.com/go-chassis/go-chassis/v2/middleware/ratelimiter"
)
var o sync.Once = sync.Once{}
var yamlContent = `---
egress:
infra: cse # pilot or cse
address: http://istio-pilot.istio-system:15010
egressRule:
google-ext:
- hosts:
- "www.google.com"
- "*.yahoo.com"
ports:
- port: 80
protocol: HTTP
facebook-ext:
- hosts:
- "www.facebook.com"
ports:
- port: 80
protocol: HTTP`
func TestBootstrap(t *testing.T) {
lager.Init(&lager.Options{LoggerLevel: "DEBUG"})
// init work dir
os.Setenv(fileutil.ChassisHome, filepath.Join("...", "..."))
os.Setenv(fileutil.ChassisConfDir, filepath.Join("...", "...", "conf"))
t.Log(os.Getenv("CHASSIS_HOME"))
// init archaius
archaius.Init(archaius.WithENVSource())
//ini config
config.Init()
protoMap := make(map[string]model.Protocol)
protoMap["http"] = model.Protocol{
Listen: "127.0.0.1:90909",
}
config.GlobalDefinition = &model.GlobalCfg{
ServiceComb: model.ServiceComb{
Protocols: protoMap,
},
}
configMesher := "../../conf/mesher.yaml"
os.Args = []string{"test", "--config", configMesher}
if err := cmd.Init(); err != nil {
panic(err)
}
if err := cmd.Configs.GeneratePortsMap(); err != nil {
panic(err)
}
// init egress.yaml file
d, _ := os.Getwd()
os.Mkdir(filepath.Join(d, "conf"), os.ModePerm)
filename := filepath.Join(d, "conf", "egress.yaml")
os.Remove(filename)
f1, err := os.Create(filename)
assert.NoError(t, err)
defer f1.Close()
_, err = io.WriteString(f1, yamlContent)
assert.NoError(t, err)
t.Run("Test RegisterFramework", func(t *testing.T) {
// case cmd.Configs.Role is empty
cmd.Configs.Role = ""
RegisterFramework()
// case cmd.Configs.Role == common.RoleSidecar
cmd.Configs.Role = common.RoleSidecar
RegisterFramework()
})
t.Run("Test Start", func(t *testing.T) {
// case Protocols is empty
config.GlobalDefinition.ServiceComb.Protocols = map[string]model.Protocol{}
err := Start()
assert.Error(t, err)
// cmd.Configs.LocalServicePorts = "http:9090"
cmd.Configs.LocalServicePorts = "http:9090"
err = Start()
cmd.Configs.LocalServicePorts = ""
RegisterFramework()
SetHandlers()
err = InitEgressChain()
assert.NoError(t, err)
err = Start()
assert.NoError(t, err)
})
}
|
[
"\"CHASSIS_HOME\""
] |
[] |
[
"CHASSIS_HOME"
] |
[]
|
["CHASSIS_HOME"]
|
go
| 1 | 0 | |
examples/reconstruct_simul_data_big.py
|
from smpr3d.util import *
from smpr3d.algorithm import *
from smpr3d.setup import *
import torch as th
import os
import numpy as np
# salloc -C gpu -N 2 -t 30 -c 10 --gres=gpu:8 -A m1759 --ntasks-per-node=8
# srun -N 2 python ./admm_smatrix_dist_pytorch.py
# module purge
# module load pytorch/v1.4.0-gpu
# module list
# Currently Loaded Modulefiles:
# 1) esslurm 2) modules/3.2.11.1 3) cuda/10.1.168 4) nccl/2.5.6
args = Param()
args.io = Param()
args.io.path = '/home/philipp/drop/Public/nesap_hackathon/'
# args.io.path = '../Inputs/'
if os.environ.get('SLURM_PROCID') is not None:
args.io.path = '/global/cscratch1/sd/pelzphil/'
args.io.summary_log_dir = args.io.path + 'log/'
args.io.logname = 'atoms_aberrations104'
args.io.filename_data = 'atoms_aberrations104.h5'
summary = setup_logging(args.io.path, args.io.logname)
args.dist_backend = 'nccl' # 'mpi'
args.dist_init_method = f'file://{args.io.path}sharedfile'
args.node_config = configure_node(args.dist_backend, args.dist_init_method)
args.beam_threshold_percent = 5e-3
args.max_phase_error = np.pi / 64
args.use_full_smatrix = True
args.uniform_initial_intensity = False
dC1 = 30
# %% load data
i = 0
args.io.filename_results = f'random4_dC{dC1}perc_res_{i + 5:03d}.h5'
world_size = args.node_config.world_size
rank = args.node_config.rank
device = args.node_config.device
lam, alpha_rad, C, dx, specimen_thickness_angstrom, vacuum_probe, D, K, K_rank, MY, MX, NY, NX, \
fy, fx, detector_shape, r, I_target, y_max, x_max, y_min, x_min, S_sol, Psi_sol, r_sol = load_smatrix_data_list2(
args.io.path + args.io.filename_data, device, rank, world_size, subset=[0, 1, 2, 3])
# dx = 1/2/dx
lam *= 1e10
ss = S_sol.shape
S_sol1 = th.zeros((ss[0],ss[1]+2,ss[2]+2,ss[3])).to(S_sol.device)
S_sol1[:,:-2,:-2] = S_sol
S_sol = S_sol1
# %% define data-dependent variables
# Fourier space grid on detector
qnp = fourier_coordinates_2D([MY, MX], dx.numpy(), centered=False)
q = th.as_tensor(qnp, device=device)
q2 = th.as_tensor(np.linalg.norm(qnp, axis=0) ** 2, device=device)
# initial aperture amplitude
A_init = initial_probe_amplitude(vacuum_probe, I_target, world_size, rank)
# mask which beams to include in the S-matrix input channels
take_beams = vacuum_probe > args.beam_threshold_percent
B, B_tile, tile_order, beam_numbers, tile_map = prepare_beam_parameters(take_beams, q2, specimen_thickness_angstrom,
alpha_rad * 1.1, lam, args.max_phase_error,
args.use_full_smatrix, device)
# shape of reconstruction variables
S_shape = (B_tile, NY, NX)
Psi_shape = (D, MY, MX)
z_shape = tuple(I_target.shape)
# map of convergence angles
alpha = q.norm(dim=0) * lam
beam_alphas = th.zeros_like(take_beams, dtype=th.float32, device=device) * -1
beam_alphas[take_beams] = alpha[take_beams]
alpha_map = beam_alphas[take_beams]
# %%
print(specimen_thickness_angstrom)
S0, depth_init = initial_smatrix(S_shape, beam_numbers, device, is_unitary=True, include_plane_waves=B == B_tile,
initial_depth=specimen_thickness_angstrom, lam=lam, q2=q2,
is_pinned=False)
tile_numbers = beam_numbers[beam_numbers >= 0]
beam_numbers = th.ones_like(take_beams).cpu().long() * -1
beam_numbers[take_beams] = th.arange(B)
# %% define S-matrix forward and adjoint operators
from smpr3d.operators import A as A1, AH_S as AH_S1
r_min = th.zeros(2, device=device)
def A(S, Psi, r):
return A1(S, Psi, r, r_min=r_min, out=None, Mx=MX, My=MY)
def AH_S(S, Psi, r):
return AH_S1(S, Psi, r, r_min=r_min, out=None, tau=th.tensor([1.0]).to(device), Ny=NY, Nx=NX)
AH_Psi = None
AH_r = None
a = th.sqrt(I_target)
report_smatrix_parameters(rank, world_size, a, S0, B, D, K, MY, MX, NY, NX, fy, fx, B_tile, K_rank,
specimen_thickness_angstrom, depth_init, y_max, x_max, y_min, x_min)
if world_size == 1:
plot(take_beams.cpu().float().numpy(), 'take_beams')
plot(np.fft.fftshift(beam_numbers.cpu().float().numpy()), 'aperture_tiling', cmap='gist_ncar')
# else:
# dist.barrier()
# %% define initial probes
C_target = C.to(device)
C_target[1] = 10
print('C_target:', C_target)
C_model = th.zeros(12, D).to(device)
C_model[:] = C_target
# define data-dependent variables
# Fourier space grid on detector
detector_shape = np.array([MY, MX])
qnp = fourier_coordinates_2D([MY, MX], dx.numpy(), centered=False)
q = th.as_tensor(qnp, device=device)
q2 = th.as_tensor(np.linalg.norm(qnp, axis=0) ** 2, device=device)
# initial aperture amplitude
Ap0 = vacuum_probe
# del I_target
# mask which beams to include in the S-matrix input channels
# take_beams = vacuum_probe > args.beam_threshold_percent / 100
Psi_gen = ZernikeProbe2(q, lam, fft_shifted=True)
Psi_target = Psi_gen(C_target, Ap0).detach()
Psi_model = Psi_gen(C_model, Ap0).detach()
psi_model = th.fft.ifft2(Psi_model, norm='ortho')
cb = fftshift_checkerboard(MY // 2, MX // 2)
fpr1 = Psi_target[0].cpu().numpy()
pr1 = np.fft.ifft2(fpr1, norm='ortho')
fpr2 = Psi_model[0].cpu().numpy()
pr2 = np.fft.ifft2(fpr2, norm='ortho')
from smpr3d.core import SMeta
s_meta = SMeta(take_beams, dx, S_shape, MY, MX, device)
print(s_meta.q_dft)
# report_initial_probes(summary, rank, world_size, Psi_model, psi_model, C_model, specimen_thickness_angstrom, q, lam,
# alpha_rad)
# %% perform reconstruction
# m = [MY, MX]
# plotAbsAngle(complex_numpy(S_sol[0, m[0]:-m[0], m[1]:-m[1]].cpu()), f'S_sol[{0}]')
args.reconstruction_opts = Param()
args.reconstruction_opts.max_iters = 100
args.reconstruction_opts.beta = 1.0
args.reconstruction_opts.tau_S = 1e-4
args.reconstruction_opts.tau_Psi = 1e6
args.reconstruction_opts.tau_r = 8e-3
args.reconstruction_opts.optimize_psi = lambda i: i > 1e3
args.reconstruction_opts.node_config = args.node_config
args.reconstruction_opts.verbose = 2
r0 = r
Psi0 = Psi_sol
(S_n, Psi_n, C_n, r_n), outs, opts = fasta2(s_meta, A, AH_S, AH_Psi, AH_r, prox_D_gaussian, Psi_gen, a, S0, Psi0,
C_model, Ap0, r0, args.reconstruction_opts, S_sol=S_sol, Psi_sol=Psi_sol,
r_sol=r_sol, summary=summary)
# save_results(rank, S_n, Psi_n, C_n, r_n, outs, S_sol, Psi_sol, r_sol, beam_numbers, tile_map, alpha_map, A.coords, A.inds,
# take_beams, lam, alpha_rad, dx, specimen_thickness_angstrom, args.io.path + args.io.filename_results)
# if world_size > 1:
# dist.barrier()
# dist.destroy_process_group()
# %%
# plotcx(S_n[2])
|
[] |
[] |
[
"SLURM_PROCID"
] |
[]
|
["SLURM_PROCID"]
|
python
| 1 | 0 | |
cli/read_test.go
|
package cli
import (
"log"
"os"
"syscall"
)
func ExampleReadSecure() {
// Interactively prompt the user to enter a value. The value provided won't be
// displayed on the screen.
password, err := ReadSecure("Enter your password: ")
if err != nil {
// Handle error
}
log.Printf("you entered: %s", password)
}
func ExampleReadPipedInput() {
// Read a maximum of 32 bytes from standard input
input, err := ReadPipedInput(32)
if len(input) > 0 && err != nil {
// Handle error
}
log.Printf("data received: %s", input)
}
func ExampleSignalsHandler() {
// Register the signals to look for and wait for one
s := <-SignalsHandler([]os.Signal{
syscall.SIGHUP,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGQUIT,
})
log.Printf("signal received: %s", s)
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
mantle/cmd/kola/options.go
|
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/coreos/stream-metadata-go/stream"
"github.com/pkg/errors"
"github.com/coreos/mantle/auth"
"github.com/coreos/mantle/fcos"
"github.com/coreos/mantle/kola"
"github.com/coreos/mantle/platform"
"github.com/coreos/mantle/rhcos"
"github.com/coreos/mantle/sdk"
"github.com/coreos/mantle/system"
)
var (
outputDir string
kolaPlatform string
kolaArchitectures = []string{"amd64"}
kolaPlatforms = []string{"aws", "azure", "do", "esx", "gce", "openstack", "packet", "qemu", "qemu-unpriv", "qemu-iso"}
kolaDistros = []string{"fcos", "rhcos"}
)
func init() {
sv := root.PersistentFlags().StringVar
bv := root.PersistentFlags().BoolVar
ss := root.PersistentFlags().StringSlice
ssv := root.PersistentFlags().StringSliceVar
// general options
sv(&outputDir, "output-dir", "", "Temporary output directory for test data and logs")
root.PersistentFlags().StringVarP(&kolaPlatform, "platform", "p", "", "VM platform: "+strings.Join(kolaPlatforms, ", "))
root.PersistentFlags().StringVarP(&kola.Options.Distribution, "distro", "b", "", "Distribution: "+strings.Join(kolaDistros, ", "))
root.PersistentFlags().IntVarP(&kola.TestParallelism, "parallel", "j", 1, "number of tests to run in parallel")
sv(&kola.TAPFile, "tapfile", "", "file to write TAP results to")
root.PersistentFlags().BoolVarP(&kola.Options.NoTestExitError, "no-test-exit-error", "T", false, "Don't exit with non-zero if tests fail")
sv(&kola.Options.BaseName, "basename", "kola", "Cluster name prefix")
ss("debug-systemd-unit", []string{}, "full-unit-name.service to enable SYSTEMD_LOG_LEVEL=debug on. Can be specified multiple times.")
ssv(&kola.DenylistedTests, "denylist-test", []string{}, "Test pattern to add to denylist. Can be specified multiple times.")
bv(&kola.NoNet, "no-net", false, "Don't run tests that require an Internet connection")
ssv(&kola.Tags, "tag", []string{}, "Test tag to run. Can be specified multiple times.")
bv(&kola.Options.SSHOnTestFailure, "ssh-on-test-failure", false, "SSH into a machine when tests fail")
sv(&kola.Options.Stream, "stream", "", "CoreOS stream ID (e.g. for Fedora CoreOS: stable, testing, next)")
sv(&kola.Options.CosaWorkdir, "workdir", "", "coreos-assembler working directory")
sv(&kola.Options.CosaBuildId, "build", "", "coreos-assembler build ID")
// rhcos-specific options
sv(&kola.Options.OSContainer, "oscontainer", "", "oscontainer image pullspec for pivot (RHCOS only)")
// aws-specific options
defaultRegion := os.Getenv("AWS_REGION")
if defaultRegion == "" {
// As everyone knows, this is the one, true region. Everything else is a mirage.
defaultRegion = "us-east-1"
}
sv(&kola.AWSOptions.CredentialsFile, "aws-credentials-file", "", "AWS credentials file (default \"~/.aws/credentials\")")
sv(&kola.AWSOptions.Region, "aws-region", defaultRegion, "AWS region")
sv(&kola.AWSOptions.Profile, "aws-profile", "default", "AWS profile name")
sv(&kola.AWSOptions.AMI, "aws-ami", "alpha", `AWS AMI ID, or (alpha|beta|stable) to use the latest image`)
// See https://github.com/openshift/installer/issues/2919 for example
sv(&kola.AWSOptions.InstanceType, "aws-type", "m5.large", "AWS instance type")
sv(&kola.AWSOptions.SecurityGroup, "aws-sg", "kola", "AWS security group name")
sv(&kola.AWSOptions.IAMInstanceProfile, "aws-iam-profile", "kola", "AWS IAM instance profile name")
// azure-specific options
sv(&kola.AzureOptions.AzureProfile, "azure-profile", "", "Azure profile (default \"~/"+auth.AzureProfilePath+"\")")
sv(&kola.AzureOptions.AzureAuthLocation, "azure-auth", "", "Azure auth location (default \"~/"+auth.AzureAuthPath+"\")")
sv(&kola.AzureOptions.DiskURI, "azure-disk-uri", "", "Azure disk uri (custom images)")
sv(&kola.AzureOptions.Publisher, "azure-publisher", "CoreOS", "Azure image publisher (default \"CoreOS\"")
sv(&kola.AzureOptions.Offer, "azure-offer", "CoreOS", "Azure image offer (default \"CoreOS\"")
sv(&kola.AzureOptions.Sku, "azure-sku", "alpha", "Azure image sku/channel (default \"alpha\"")
sv(&kola.AzureOptions.Version, "azure-version", "", "Azure image version")
sv(&kola.AzureOptions.Location, "azure-location", "westus", "Azure location (default \"westus\"")
sv(&kola.AzureOptions.Size, "azure-size", "Standard_D2_v2", "Azure machine size (default \"Standard_D2_v2\")")
// do-specific options
sv(&kola.DOOptions.ConfigPath, "do-config-file", "", "DigitalOcean config file (default \"~/"+auth.DOConfigPath+"\")")
sv(&kola.DOOptions.Profile, "do-profile", "", "DigitalOcean profile (default \"default\")")
sv(&kola.DOOptions.AccessToken, "do-token", "", "DigitalOcean access token (overrides config file)")
sv(&kola.DOOptions.Region, "do-region", "sfo2", "DigitalOcean region slug")
sv(&kola.DOOptions.Size, "do-size", "1gb", "DigitalOcean size slug")
sv(&kola.DOOptions.Image, "do-image", "alpha", "DigitalOcean image ID, {alpha, beta, stable}, or user image name")
// esx-specific options
sv(&kola.ESXOptions.ConfigPath, "esx-config-file", "", "ESX config file (default \"~/"+auth.ESXConfigPath+"\")")
sv(&kola.ESXOptions.Server, "esx-server", "", "ESX server")
sv(&kola.ESXOptions.Profile, "esx-profile", "", "ESX profile (default \"default\")")
sv(&kola.ESXOptions.BaseVMName, "esx-base-vm", "", "ESX base VM name")
// gce-specific options
sv(&kola.GCEOptions.Image, "gce-image", "projects/fedora-coreos-cloud/global/images/family/fedora-coreos-testing", "GCE image, full api endpoints names are accepted if resource is in a different project")
sv(&kola.GCEOptions.Project, "gce-project", "coreos-gce-testing", "GCE project name")
sv(&kola.GCEOptions.Zone, "gce-zone", "us-central1-a", "GCE zone name")
sv(&kola.GCEOptions.MachineType, "gce-machinetype", "n1-standard-1", "GCE machine type")
sv(&kola.GCEOptions.DiskType, "gce-disktype", "pd-ssd", "GCE disk type")
sv(&kola.GCEOptions.Network, "gce-network", "default", "GCE network")
bv(&kola.GCEOptions.ServiceAuth, "gce-service-auth", false, "for non-interactive auth when running within GCE")
sv(&kola.GCEOptions.JSONKeyFile, "gce-json-key", "", "use a service account's JSON key for authentication")
// openstack-specific options
sv(&kola.OpenStackOptions.ConfigPath, "openstack-config-file", "", "Path to a clouds.yaml formatted OpenStack config file. The underlying library defaults to ./clouds.yaml")
sv(&kola.OpenStackOptions.Profile, "openstack-profile", "", "OpenStack profile within clouds.yaml (default \"openstack\")")
sv(&kola.OpenStackOptions.Region, "openstack-region", "", "OpenStack region")
sv(&kola.OpenStackOptions.Image, "openstack-image", "", "OpenStack image ref")
sv(&kola.OpenStackOptions.Flavor, "openstack-flavor", "1", "OpenStack flavor ref")
sv(&kola.OpenStackOptions.Network, "openstack-network", "", "OpenStack network")
sv(&kola.OpenStackOptions.Domain, "openstack-domain", "", "OpenStack domain ID")
sv(&kola.OpenStackOptions.FloatingIPNetwork, "openstack-floating-ip-network", "", "OpenStack network to use when creating a floating IP")
// packet-specific options
sv(&kola.PacketOptions.ConfigPath, "packet-config-file", "", "Packet config file (default \"~/"+auth.PacketConfigPath+"\")")
sv(&kola.PacketOptions.Profile, "packet-profile", "", "Packet profile (default \"default\")")
sv(&kola.PacketOptions.ApiKey, "packet-api-key", "", "Packet API key (overrides config file)")
sv(&kola.PacketOptions.Project, "packet-project", "", "Packet project UUID (overrides config file)")
sv(&kola.PacketOptions.Facility, "packet-facility", "sjc1", "Packet facility code")
sv(&kola.PacketOptions.Plan, "packet-plan", "", "Packet plan slug (default arch-dependent, e.g. \"t1.small.x86\")")
sv(&kola.PacketOptions.Architecture, "packet-architecture", "x86_64", "Packet CPU architecture")
sv(&kola.PacketOptions.IPXEURL, "packet-ipxe-url", "", "iPXE script URL (default arch-dependent, e.g. \"https://raw.githubusercontent.com/coreos/coreos-assembler/main/mantle/platform/api/packet/fcos-x86_64.ipxe\")")
sv(&kola.PacketOptions.ImageURL, "packet-image-url", "", "image URL (default arch-dependent, e.g. \"https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/31.20200223.3.0/x86_64/fedora-coreos-31.20200223.3.0-metal.x86_64.raw.xz\")")
// QEMU-specific options
sv(&kola.QEMUOptions.Firmware, "qemu-firmware", "", "Boot firmware: bios,uefi,uefi-secure (default bios)")
sv(&kola.QEMUOptions.DiskImage, "qemu-image", "", "path to CoreOS disk image")
sv(&kola.QEMUOptions.DiskSize, "qemu-size", "", "Resize target disk via qemu-img resize [+]SIZE")
sv(&kola.QEMUOptions.Memory, "qemu-memory", "", "Default memory size in MB")
bv(&kola.QEMUOptions.NbdDisk, "qemu-nbd-socket", false, "Present the disks over NBD socket to qemu")
bv(&kola.QEMUOptions.MultiPathDisk, "qemu-multipath", false, "Enable multiple paths for the main disk")
bv(&kola.QEMUOptions.Native4k, "qemu-native-4k", false, "Force 4k sectors for main disk")
bv(&kola.QEMUOptions.Nvme, "qemu-nvme", false, "Use NVMe for main disk")
bv(&kola.QEMUOptions.Swtpm, "qemu-swtpm", true, "Create temporary software TPM")
sv(&kola.QEMUIsoOptions.IsoPath, "qemu-iso", "", "path to CoreOS ISO image")
}
// Sync up the command line options if there is dependency
func syncOptionsImpl(useCosa bool) error {
validateOption := func(name, item string, valid []string) error {
for _, v := range valid {
if v == item {
return nil
}
}
return fmt.Errorf("unsupported %v %q", name, item)
}
// TODO: Could also auto-synchronize if e.g. --aws-ami is passed
if kolaPlatform == "" {
if kola.QEMUIsoOptions.IsoPath != "" {
kolaPlatform = "qemu-iso"
} else {
kolaPlatform = "qemu-unpriv"
}
}
// There used to be a "privileged" qemu path, it is no longer supported.
// Alias qemu to qemu-unpriv.
if kolaPlatform == "qemu" {
kolaPlatform = "qemu-unpriv"
}
// native 4k requires a UEFI bootloader
if kola.QEMUOptions.Native4k && kola.QEMUOptions.Firmware == "bios" {
return fmt.Errorf("native 4k requires uefi firmware")
}
// default to BIOS, UEFI for aarch64 and x86(only for 4k)
if kola.QEMUOptions.Firmware == "" {
if system.RpmArch() == "aarch64" {
kola.QEMUOptions.Firmware = "uefi"
} else if system.RpmArch() == "x86_64" && kola.QEMUOptions.Native4k {
kola.QEMUOptions.Firmware = "uefi"
} else {
kola.QEMUOptions.Firmware = "bios"
}
}
if err := validateOption("platform", kolaPlatform, kolaPlatforms); err != nil {
return err
}
// if no external dirs were given, automatically add the working directory;
// does nothing if ./tests/kola/ doesn't exist
if len(runExternals) == 0 {
runExternals = []string{"."}
}
foundCosa := false
if kola.Options.CosaBuildId != "" {
// specified --build? fetch that build. in this path we *require* a
// cosa workdir, either assumed as PWD or via --workdir.
if kola.Options.CosaWorkdir == "" {
kola.Options.CosaWorkdir = "."
}
localbuild, err := sdk.GetLocalBuild(kola.Options.CosaWorkdir, kola.Options.CosaBuildId)
if err != nil {
return err
}
kola.CosaBuild = localbuild
foundCosa = true
} else if kola.Options.Stream != "" {
if err := syncStreamOptions(); err != nil {
return err
}
} else {
if kola.Options.CosaWorkdir == "" {
// specified neither --build nor --workdir; only opportunistically
// try to use the PWD as the workdir, but don't error out if it's
// not
if isroot, err := sdk.IsCosaRoot("."); err != nil {
return err
} else if isroot {
kola.Options.CosaWorkdir = "."
}
}
if kola.Options.CosaWorkdir != "" && kola.Options.CosaWorkdir != "none" {
localbuild, err := sdk.GetLatestLocalBuild(kola.Options.CosaWorkdir)
if err != nil {
if !os.IsNotExist(errors.Cause(err)) {
return err
}
} else {
kola.Options.CosaBuildId = localbuild.Meta.BuildID
kola.CosaBuild = localbuild
foundCosa = true
}
} else if kola.QEMUOptions.DiskImage == "" {
localbuild, err := sdk.GetLocalFastBuildQemu()
if err != nil {
return err
}
if localbuild != "" {
kola.QEMUOptions.DiskImage = localbuild
}
}
}
if foundCosa && useCosa {
if err := syncCosaOptions(); err != nil {
return err
}
}
units, _ := root.PersistentFlags().GetStringSlice("debug-systemd-units")
for _, unit := range units {
kola.Options.SystemdDropins = append(kola.Options.SystemdDropins, platform.SystemdDropin{
Unit: unit,
Name: "10-debug.conf",
Contents: "[Service]\nEnvironment=SYSTEMD_LOG_LEVEL=debug",
})
}
if kola.Options.OSContainer != "" && kola.Options.Distribution != "rhcos" {
return fmt.Errorf("oscontainer is only supported on rhcos")
}
if kola.Options.Distribution == "" {
kola.Options.Distribution = kolaDistros[0]
} else if err := validateOption("distro", kola.Options.Distribution, kolaDistros); err != nil {
return err
}
return nil
}
// syncOptions updates default values of options based on provided ones
func syncOptions() error {
return syncOptionsImpl(true)
}
// syncCosaOptions sets unset platform-specific
// options that can be derived from the cosa build metadata
func syncCosaOptions() error {
switch kolaPlatform {
case "qemu-unpriv", "qemu":
if kola.QEMUOptions.DiskImage == "" && kola.CosaBuild.Meta.BuildArtifacts.Qemu != nil {
kola.QEMUOptions.DiskImage = filepath.Join(kola.CosaBuild.Dir, kola.CosaBuild.Meta.BuildArtifacts.Qemu.Path)
}
case "qemu-iso":
if kola.QEMUIsoOptions.IsoPath == "" && kola.CosaBuild.Meta.BuildArtifacts.LiveIso != nil {
kola.QEMUIsoOptions.IsoPath = filepath.Join(kola.CosaBuild.Dir, kola.CosaBuild.Meta.BuildArtifacts.LiveIso.Path)
}
}
if kola.Options.Distribution == "" {
distro, err := sdk.TargetDistro(kola.CosaBuild.Meta)
if err != nil {
return err
}
kola.Options.Distribution = distro
}
runExternals = append(runExternals, filepath.Join(kola.Options.CosaWorkdir, "src/config"))
return nil
}
// syncStreamOptions sets the underlying raw options based on a stream
// Currently this only handles AWS to demonstrate the idea; we'll
// add generic code to map between streams and cosa builds soon.
func syncStreamOptions() error {
if kola.Options.Stream == "" {
return nil
}
var err error
var artifacts *stream.Arch
switch kola.Options.Distribution {
case "":
return fmt.Errorf("Must specify -b/--distro with --stream")
case "fcos":
artifacts, err = fcos.FetchStreamThisArchitecture(kola.Options.Stream)
if err != nil {
return errors.Wrapf(err, "failed to fetch stream")
}
case "rhcos":
artifacts, err = rhcos.FetchStreamThisArchitecture(kola.Options.Stream)
if err != nil {
return errors.Wrapf(err, "failed to fetch stream")
}
break
default:
return fmt.Errorf("Unhandled stream for distribution %s", kola.Options.Distribution)
}
release := ""
extra := ""
switch kolaPlatform {
case "aws":
regionimg := artifacts.Images.Aws.Regions[kola.AWSOptions.Region]
release = regionimg.Release
kola.AWSOptions.AMI = regionimg.Image
extra = fmt.Sprintf("(region %s, %s)", kola.AWSOptions.Region, kola.AWSOptions.AMI)
default:
return fmt.Errorf("Unhandled platform %s for stream", kolaPlatform)
}
fmt.Printf("Resolved distro=%s stream=%s platform=%s to release=%s %s\n", kola.Options.Distribution, kola.Options.Stream, kolaPlatform, release, extra)
return nil
}
|
[
"\"AWS_REGION\""
] |
[] |
[
"AWS_REGION"
] |
[]
|
["AWS_REGION"]
|
go
| 1 | 0 | |
bank-of-anthos/src/frontend/frontend.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Web service for frontend
"""
import datetime
import json
import logging
import os
import socket
import sys
from decimal import Decimal
import requests
from requests.exceptions import HTTPError, RequestException
import jwt
from flask import Flask, abort, jsonify, make_response, redirect, \
render_template, request, url_for
from opentelemetry import trace
from opentelemetry.sdk.trace.export import BatchExportSpanProcessor
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.propagators import set_global_textmap
from opentelemetry.exporter.cloud_trace import CloudTraceSpanExporter
from opentelemetry.tools.cloud_trace_propagator import CloudTraceFormatPropagator
from opentelemetry.instrumentation.flask import FlaskInstrumentor
from opentelemetry.instrumentation.requests import RequestsInstrumentor
from opentelemetry.instrumentation.jinja2 import Jinja2Instrumentor
# pylint: disable-msg=too-many-locals
def create_app():
"""Flask application factory to create instances
of the Frontend Flask App
"""
app = Flask(__name__)
# Disabling unused-variable for lines with route decorated functions
# as pylint thinks they are unused
# pylint: disable=unused-variable
@app.route('/version', methods=['GET'])
def version():
"""
Service version endpoint
"""
return os.environ.get('VERSION'), 200
@app.route('/ready', methods=['GET'])
def readiness():
"""
Readiness probe
"""
return 'ok', 200
@app.route('/whereami', methods=['GET'])
def whereami():
"""
Returns the cluster name + zone name where this Pod is running.
"""
return "Cluster: " + cluster_name + ", Pod: " + pod_name + ", Zone: " + pod_zone, 200
@app.route("/")
def root():
"""
Renders home page or login page, depending on authentication status.
"""
token = request.cookies.get(app.config['TOKEN_NAME'])
if not verify_token(token):
return login_page()
return home()
@app.route("/home")
def home():
"""
Renders home page. Redirects to /login if token is not valid
"""
token = request.cookies.get(app.config['TOKEN_NAME'])
if not verify_token(token):
# user isn't authenticated
app.logger.debug('User isn\'t authenticated. Redirecting to login page.')
return redirect(url_for('login_page',
_external=True,
_scheme=app.config['SCHEME']))
token_data = jwt.decode(token, verify=False)
display_name = token_data['name']
username = token_data['user']
account_id = token_data['acct']
hed = {'Authorization': 'Bearer ' + token}
# get balance
balance = None
try:
url = '{}/{}'.format(app.config["BALANCES_URI"], account_id)
app.logger.debug('Getting account balance.')
response = requests.get(url=url, headers=hed, timeout=app.config['BACKEND_TIMEOUT'])
if response:
balance = response.json()
except (requests.exceptions.RequestException, ValueError) as err:
app.logger.error('Error getting account balance: %s', str(err))
# get history
transaction_list = None
try:
url = '{}/{}'.format(app.config["HISTORY_URI"], account_id)
app.logger.debug('Getting transaction history.')
response = requests.get(url=url, headers=hed, timeout=app.config['BACKEND_TIMEOUT'])
if response:
transaction_list = response.json()
except (requests.exceptions.RequestException, ValueError) as err:
app.logger.error('Error getting transaction history: %s', str(err))
# get contacts
contacts = []
try:
url = '{}/{}'.format(app.config["CONTACTS_URI"], username)
app.logger.debug('Getting contacts.')
response = requests.get(url=url, headers=hed, timeout=app.config['BACKEND_TIMEOUT'])
if response:
contacts = response.json()
except (requests.exceptions.RequestException, ValueError) as err:
app.logger.error('Error getting contacts: %s', str(err))
_populate_contact_labels(account_id, transaction_list, contacts)
return render_template('index.html',
cluster_name=cluster_name,
pod_name=pod_name,
pod_zone=pod_zone,
cymbal_logo=os.getenv('CYMBAL_LOGO', 'false'),
history=transaction_list,
balance=balance,
name=display_name,
account_id=account_id,
contacts=contacts,
message=request.args.get('msg', None),
bank_name=os.getenv('BANK_NAME', 'Bank of Anthos'))
def _populate_contact_labels(account_id, transactions, contacts):
"""
Populate contact labels for the passed transactions.
Side effect:
Take each transaction and set the 'accountLabel' field with the label of
the contact each transaction was associated with. If there was no
associated contact, set 'accountLabel' to None.
If any parameter is None, nothing happens.
Params: account_id - the account id for the user owning the transaction list
transactions - a list of transactions as key/value dicts
[{transaction1}, {transaction2}, ...]
contacts - a list of contacts as key/value dicts
[{contact1}, {contact2}, ...]
"""
app.logger.debug('Populating contact labels.')
if account_id is None or transactions is None or contacts is None:
return
# Map contact accounts to their labels. If no label found, default to None.
contact_map = {c['account_num']: c.get('label') for c in contacts}
# Populate the 'accountLabel' field. If no match found, default to None.
for trans in transactions:
if trans['toAccountNum'] == account_id:
trans['accountLabel'] = contact_map.get(trans['fromAccountNum'])
elif trans['fromAccountNum'] == account_id:
trans['accountLabel'] = contact_map.get(trans['toAccountNum'])
@app.route('/payment', methods=['POST'])
def payment():
"""
Submits payment request to ledgerwriter service
Fails if:
- token is not valid
- basic validation checks fail
- response code from ledgerwriter is not 201
"""
token = request.cookies.get(app.config['TOKEN_NAME'])
if not verify_token(token):
# user isn't authenticated
app.logger.error('Error submitting payment: user is not authenticated.')
return abort(401)
try:
account_id = jwt.decode(token, verify=False)['acct']
recipient = request.form['account_num']
if recipient == 'add':
recipient = request.form['contact_account_num']
label = request.form.get('contact_label', None)
if label:
# new contact. Add to contacts list
_add_contact(label,
recipient,
app.config['LOCAL_ROUTING'],
False)
transaction_data = {"fromAccountNum": account_id,
"fromRoutingNum": app.config['LOCAL_ROUTING'],
"toAccountNum": recipient,
"toRoutingNum": app.config['LOCAL_ROUTING'],
"amount": int(Decimal(request.form['amount']) * 100),
"uuid": request.form['uuid']}
_submit_transaction(transaction_data)
app.logger.info('Payment initiated successfully.')
return redirect(url_for('home',
msg='Payment successful',
_external=True,
_scheme=app.config['SCHEME']))
except requests.exceptions.RequestException as err:
app.logger.error('Error submitting payment: %s', str(err))
except UserWarning as warn:
app.logger.error('Error submitting payment: %s', str(warn))
msg = 'Payment failed: {}'.format(str(warn))
return redirect(url_for('home',
msg=msg,
_external=True,
_scheme=app.config['SCHEME']))
return redirect(url_for('home',
msg='Payment failed',
_external=True,
_scheme=app.config['SCHEME']))
@app.route('/deposit', methods=['POST'])
def deposit():
"""
Submits deposit request to ledgerwriter service
Fails if:
- token is not valid
- routing number == local routing number
- response code from ledgerwriter is not 201
"""
token = request.cookies.get(app.config['TOKEN_NAME'])
if not verify_token(token):
# user isn't authenticated
app.logger.error('Error submitting deposit: user is not authenticated.')
return abort(401)
try:
# get account id from token
account_id = jwt.decode(token, verify=False)['acct']
if request.form['account'] == 'add':
external_account_num = request.form['external_account_num']
external_routing_num = request.form['external_routing_num']
if external_routing_num == app.config['LOCAL_ROUTING']:
raise UserWarning("invalid routing number")
external_label = request.form.get('external_label', None)
if external_label:
# new contact. Add to contacts list
_add_contact(external_label,
external_account_num,
external_routing_num,
True)
else:
account_details = json.loads(request.form['account'])
external_account_num = account_details['account_num']
external_routing_num = account_details['routing_num']
transaction_data = {"fromAccountNum": external_account_num,
"fromRoutingNum": external_routing_num,
"toAccountNum": account_id,
"toRoutingNum": app.config['LOCAL_ROUTING'],
"amount": int(Decimal(request.form['amount']) * 100),
"uuid": request.form['uuid']}
_submit_transaction(transaction_data)
app.logger.info('Deposit submitted successfully.')
return redirect(url_for('home',
msg='Deposit successful',
_external=True,
_scheme=app.config['SCHEME']))
except requests.exceptions.RequestException as err:
app.logger.error('Error submitting deposit: %s', str(err))
except UserWarning as warn:
app.logger.error('Error submitting deposit: %s', str(warn))
msg = 'Deposit failed: {}'.format(str(warn))
return redirect(url_for('home',
msg=msg,
_external=True,
_scheme=app.config['SCHEME']))
return redirect(url_for('home',
msg='Deposit failed',
_external=True,
_scheme=app.config['SCHEME']))
def _submit_transaction(transaction_data):
app.logger.debug('Submitting transaction.')
token = request.cookies.get(app.config['TOKEN_NAME'])
hed = {'Authorization': 'Bearer ' + token,
'content-type': 'application/json'}
resp = requests.post(url=app.config["TRANSACTIONS_URI"],
data=jsonify(transaction_data).data,
headers=hed,
timeout=app.config['BACKEND_TIMEOUT'])
try:
resp.raise_for_status() # Raise on HTTP Status code 4XX or 5XX
except requests.exceptions.HTTPError as http_request_err:
raise UserWarning(resp.text) from http_request_err
def _add_contact(label, acct_num, routing_num, is_external_acct=False):
"""
Submits a new contact to the contact service.
Raise: UserWarning if the response status is 4xx or 5xx.
"""
app.logger.debug('Adding new contact.')
token = request.cookies.get(app.config['TOKEN_NAME'])
hed = {'Authorization': 'Bearer ' + token,
'content-type': 'application/json'}
contact_data = {
'label': label,
'account_num': acct_num,
'routing_num': routing_num,
'is_external': is_external_acct
}
token_data = jwt.decode(token, verify=False)
url = '{}/{}'.format(app.config["CONTACTS_URI"], token_data['user'])
resp = requests.post(url=url,
data=jsonify(contact_data).data,
headers=hed,
timeout=app.config['BACKEND_TIMEOUT'])
try:
resp.raise_for_status() # Raise on HTTP Status code 4XX or 5XX
except requests.exceptions.HTTPError as http_request_err:
raise UserWarning(resp.text) from http_request_err
@app.route("/login", methods=['GET'])
def login_page():
"""
Renders login page. Redirects to /home if user already has a valid token
"""
token = request.cookies.get(app.config['TOKEN_NAME'])
if verify_token(token):
# already authenticated
app.logger.debug('User already authenticated. Redirecting to /home')
return redirect(url_for('home',
_external=True,
_scheme=app.config['SCHEME']))
return render_template('login.html',
cymbal_logo=os.getenv('CYMBAL_LOGO', 'false'),
cluster_name=cluster_name,
pod_name=pod_name,
pod_zone=pod_zone,
message=request.args.get('msg', None),
default_user=os.getenv('DEFAULT_USERNAME', ''),
default_password=os.getenv('DEFAULT_PASSWORD', ''),
bank_name=os.getenv('BANK_NAME', 'Bank of Anthos'))
@app.route('/login', methods=['POST'])
def login():
"""
Submits login request to userservice and saves resulting token
Fails if userservice does not accept input username and password
"""
return _login_helper(request.form['username'],
request.form['password'])
def _login_helper(username, password):
try:
app.logger.debug('Logging in.')
req = requests.get(url=app.config["LOGIN_URI"],
params={'username': username, 'password': password})
req.raise_for_status() # Raise on HTTP Status code 4XX or 5XX
# login success
token = req.json()['token'].encode('utf-8')
claims = jwt.decode(token, verify=False)
max_age = claims['exp'] - claims['iat']
resp = make_response(redirect(url_for('home',
_external=True,
_scheme=app.config['SCHEME'])))
resp.set_cookie(app.config['TOKEN_NAME'], token, max_age=max_age)
app.logger.info('Successfully logged in.')
return resp
except (RequestException, HTTPError) as err:
app.logger.error('Error logging in: %s', str(err))
return redirect(url_for('login',
msg='Login Failed',
_external=True,
_scheme=app.config['SCHEME']))
@app.route("/signup", methods=['GET'])
def signup_page():
"""
Renders signup page. Redirects to /login if token is not valid
"""
token = request.cookies.get(app.config['TOKEN_NAME'])
if verify_token(token):
# already authenticated
app.logger.debug('User already authenticated. Redirecting to /home')
return redirect(url_for('home',
_external=True,
_scheme=app.config['SCHEME']))
return render_template('signup.html',
cymbal_logo=os.getenv('CYMBAL_LOGO', 'false'),
cluster_name=cluster_name,
pod_name=pod_name,
pod_zone=pod_zone,
bank_name=os.getenv('BANK_NAME', 'Bank of Anthos'))
@app.route("/signup", methods=['POST'])
def signup():
"""
Submits signup request to userservice
Fails if userservice does not accept input form data
"""
try:
# create user
app.logger.debug('Creating new user.')
resp = requests.post(url=app.config["USERSERVICE_URI"],
data=request.form,
timeout=app.config['BACKEND_TIMEOUT'])
if resp.status_code == 201:
# user created. Attempt login
app.logger.info('New user created.')
return _login_helper(request.form['username'],
request.form['password'])
except requests.exceptions.RequestException as err:
app.logger.error('Error creating new user: %s', str(err))
return redirect(url_for('login',
msg='Error: Account creation failed',
_external=True,
_scheme=app.config['SCHEME']))
@app.route('/logout', methods=['POST'])
def logout():
"""
Logs out user by deleting token cookie and redirecting to login page
"""
app.logger.info('Logging out.')
resp = make_response(redirect(url_for('login_page',
_external=True,
_scheme=app.config['SCHEME'])))
resp.delete_cookie(app.config['TOKEN_NAME'])
return resp
def verify_token(token):
"""
Validates token using userservice public key
"""
app.logger.debug('Verifying token.')
if token is None:
return False
try:
jwt.decode(token, key=app.config['PUBLIC_KEY'], algorithms='RS256', verify=True)
app.logger.debug('Token verified.')
return True
except jwt.exceptions.InvalidTokenError as err:
app.logger.error('Error validating token: %s', str(err))
return False
# register html template formatters
def format_timestamp_day(timestamp):
""" Format the input timestamp day in a human readable way """
# TODO: time zones?
date = datetime.datetime.strptime(timestamp, app.config['TIMESTAMP_FORMAT'])
return date.strftime('%d')
def format_timestamp_month(timestamp):
""" Format the input timestamp month in a human readable way """
# TODO: time zones?
date = datetime.datetime.strptime(timestamp, app.config['TIMESTAMP_FORMAT'])
return date.strftime('%b')
def format_currency(int_amount):
""" Format the input currency in a human readable way """
if int_amount is None:
return '$---'
amount_str = '${:0,.2f}'.format(abs(Decimal(int_amount)/100))
if int_amount < 0:
amount_str = '-' + amount_str
return amount_str
# set up global variables
app.config["TRANSACTIONS_URI"] = 'http://{}/transactions'.format(
os.environ.get('TRANSACTIONS_API_ADDR'))
app.config["USERSERVICE_URI"] = 'http://{}/users'.format(
os.environ.get('USERSERVICE_API_ADDR'))
app.config["BALANCES_URI"] = 'http://{}/balances'.format(
os.environ.get('BALANCES_API_ADDR'))
app.config["HISTORY_URI"] = 'http://{}/transactions'.format(
os.environ.get('HISTORY_API_ADDR'))
app.config["LOGIN_URI"] = 'http://{}/login'.format(
os.environ.get('USERSERVICE_API_ADDR'))
app.config["CONTACTS_URI"] = 'http://{}/contacts'.format(
os.environ.get('CONTACTS_API_ADDR'))
app.config['PUBLIC_KEY'] = open(os.environ.get('PUB_KEY_PATH'), 'r').read()
app.config['LOCAL_ROUTING'] = os.getenv('LOCAL_ROUTING_NUM')
app.config['BACKEND_TIMEOUT'] = 4 # timeout in seconds for calls to the backend
app.config['TOKEN_NAME'] = 'token'
app.config['TIMESTAMP_FORMAT'] = '%Y-%m-%dT%H:%M:%S.%f%z'
app.config['SCHEME'] = os.environ.get('SCHEME', 'http')
# where am I?
metadata_url = 'http://metadata.google.internal/computeMetadata/v1/'
metadata_headers = {'Metadata-Flavor': 'Google'}
# get GKE cluster name
cluster_name = "unknown"
try:
req = requests.get(metadata_url + 'instance/attributes/cluster-name',
headers=metadata_headers)
if req.ok:
cluster_name = str(req.text)
except (RequestException, HTTPError) as err:
app.logger.warning("Unable to capture GKE cluster name.")
# get GKE pod name
pod_name = "unknown"
pod_name = socket.gethostname()
# get GKE node zone
pod_zone = "unknown"
try:
req = requests.get(metadata_url + 'instance/zone',
headers=metadata_headers)
if req.ok:
pod_zone = str(req.text.split("/")[3])
except (RequestException, HTTPError) as err:
app.logger.warning("Unable to capture GKE node zone.")
# register formater functions
app.jinja_env.globals.update(format_currency=format_currency)
app.jinja_env.globals.update(format_timestamp_month=format_timestamp_month)
app.jinja_env.globals.update(format_timestamp_day=format_timestamp_day)
# set log formatting
date_format = "%Y-%m-%d %H:%M:%S"
message_format = '%(asctime)s | [%(levelname)s] | %(funcName)s | %(message)s'
logging.basicConfig(format= message_format, datefmt= date_format, stream=sys.stdout)
# set log level
log_levels = {
"DEBUG": logging.DEBUG,
"WARNING": logging.WARNING,
"INFO": logging.INFO,
"ERROR": logging.ERROR,
"CRITICAL": logging.CRITICAL
}
level = logging.INFO #default
user_log_level = os.environ.get("LOG_LEVEL")
if user_log_level is not None and user_log_level.upper() in log_levels:
level = log_levels.get(user_log_level.upper())
app.logger.setLevel(level)
app.logger.info("Starting frontend.")
# Set up tracing and export spans to Cloud Trace.
if os.environ['ENABLE_TRACING'] == "true":
app.logger.info("✅ Tracing enabled.")
trace.set_tracer_provider(TracerProvider())
cloud_trace_exporter = CloudTraceSpanExporter()
trace.get_tracer_provider().add_span_processor(
BatchExportSpanProcessor(cloud_trace_exporter)
)
set_global_textmap(CloudTraceFormatPropagator())
# Add tracing auto-instrumentation for Flask, jinja and requests
FlaskInstrumentor().instrument_app(app)
RequestsInstrumentor().instrument()
Jinja2Instrumentor().instrument()
else:
app.logger.info("🚫 Tracing disabled.")
return app
if __name__ == "__main__":
# Create an instance of flask server when called directly
FRONTEND = create_app()
FRONTEND.run()
|
[] |
[] |
[
"DEFAULT_USERNAME",
"CONTACTS_API_ADDR",
"CYMBAL_LOGO",
"HISTORY_API_ADDR",
"LOG_LEVEL",
"VERSION",
"BALANCES_API_ADDR",
"TRANSACTIONS_API_ADDR",
"PUB_KEY_PATH",
"ENABLE_TRACING",
"BANK_NAME",
"USERSERVICE_API_ADDR",
"LOCAL_ROUTING_NUM",
"SCHEME",
"DEFAULT_PASSWORD"
] |
[]
|
["DEFAULT_USERNAME", "CONTACTS_API_ADDR", "CYMBAL_LOGO", "HISTORY_API_ADDR", "LOG_LEVEL", "VERSION", "BALANCES_API_ADDR", "TRANSACTIONS_API_ADDR", "PUB_KEY_PATH", "ENABLE_TRACING", "BANK_NAME", "USERSERVICE_API_ADDR", "LOCAL_ROUTING_NUM", "SCHEME", "DEFAULT_PASSWORD"]
|
python
| 15 | 0 | |
lei/asgi.py
|
"""
ASGI config for lei project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lei.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
pymatgen/io/vasp/tests/test_inputs.py
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import pickle
import os
import numpy as np
import warnings
import scipy.constants as const
from pathlib import Path
from monty.tempfile import ScratchDir
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.vasp.inputs import Incar, Poscar, Kpoints, Potcar, \
PotcarSingle, VaspInput, BadIncarWarning
from pymatgen import Composition, Structure
from pymatgen.electronic_structure.core import Magmom
from monty.io import zopen
class PoscarTest(PymatgenTest):
def test_init(self):
filepath = self.TEST_FILES_DIR / 'POSCAR'
poscar = Poscar.from_file(filepath, check_for_POTCAR=False)
comp = poscar.structure.composition
self.assertEqual(comp, Composition("Fe4P4O16"))
# Vasp 4 type with symbols at the end.
poscar_string = """Test1
1.0
3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
1 1
direct
0.000000 0.000000 0.000000 Si
0.750000 0.500000 0.750000 F
"""
poscar = Poscar.from_string(poscar_string)
self.assertEqual(poscar.structure.composition, Composition("SiF"))
poscar_string = ""
self.assertRaises(ValueError, Poscar.from_string, poscar_string)
# Vasp 4 tyle file with default names, i.e. no element symbol found.
poscar_string = """Test2
1.0
3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
1 1
direct
0.000000 0.000000 0.000000
0.750000 0.500000 0.750000
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
poscar = Poscar.from_string(poscar_string)
self.assertEqual(poscar.structure.composition, Composition("HHe"))
# Vasp 4 tyle file with default names, i.e. no element symbol found.
poscar_string = """Test3
1.0
3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
1 1
Selective dynamics
direct
0.000000 0.000000 0.000000 T T T Si
0.750000 0.500000 0.750000 F F F O
"""
poscar = Poscar.from_string(poscar_string)
self.assertEqual(poscar.selective_dynamics, [[True, True, True],
[False, False, False]])
self.selective_poscar = poscar
def test_from_file(self):
filepath = self.TEST_FILES_DIR / 'POSCAR.symbols_natoms_multilines'
poscar = Poscar.from_file(filepath, check_for_POTCAR=False,
read_velocities=False)
ordered_expected_elements = ['Fe', 'Cr', 'Fe', 'Fe', 'Cr', 'Cr', 'Cr',
'Cr',
'Fe', 'Fe', 'Cr', 'Fe', 'Cr', 'Fe', 'Fe',
'Cr',
'Fe', 'Cr', 'Fe', 'Fe', 'Fe', 'Fe', 'Cr',
'Fe',
'Ni', 'Fe', 'Fe', 'Fe', 'Fe', 'Fe', 'Cr',
'Cr',
'Cr', 'Fe', 'Fe', 'Fe', 'Fe', 'Fe', 'Fe',
'Cr',
'Fe', 'Fe', 'Ni', 'Fe', 'Fe', 'Fe', 'Cr',
'Cr',
'Fe', 'Fe', 'Fe', 'Fe', 'Fe']
self.assertEqual([site.specie.symbol for site in poscar.structure],
ordered_expected_elements)
def test_to_from_dict(self):
poscar_string = """Test3
1.0
3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
1 1
Selective dynamics
direct
0.000000 0.000000 0.000000 T T T Si
0.750000 0.500000 0.750000 F F F O
"""
poscar = Poscar.from_string(poscar_string)
d = poscar.as_dict()
poscar2 = Poscar.from_dict(d)
self.assertEqual(poscar2.comment, "Test3")
self.assertTrue(all(poscar2.selective_dynamics[0]))
self.assertFalse(all(poscar2.selective_dynamics[1]))
def test_cart_scale(self):
poscar_string = """Test1
1.1
3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
Si F
1 1
cart
0.000000 0.00000000 0.00000000
3.840198 1.50000000 2.35163175
"""
p = Poscar.from_string(poscar_string)
site = p.structure[1]
self.assertArrayAlmostEqual(site.coords,
np.array([3.840198, 1.5, 2.35163175]) * 1.1)
def test_significant_figures(self):
si = 14
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
# Silicon structure for testing.
latt = [[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]]
struct = Structure(latt, [si, si], coords)
poscar = Poscar(struct)
expected_str = '''Si2
1.0
3.84 0.00 0.00
1.92 3.33 0.00
0.00 -2.22 3.14
Si
2
direct
0.00 0.00 0.00 Si
0.75 0.50 0.75 Si
'''
actual_str = poscar.get_string(significant_figures=2)
self.assertEqual(actual_str, expected_str, "Wrong POSCAR output!")
def test_str(self):
si = 14
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
# Silicon structure for testing.
latt = [[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]]
struct = Structure(latt, [si, si], coords)
poscar = Poscar(struct)
expected_str = '''Si2
1.0
3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
Si
2
direct
0.000000 0.000000 0.000000 Si
0.750000 0.500000 0.750000 Si
'''
self.assertEqual(str(poscar), expected_str, "Wrong POSCAR output!")
# Vasp 4 type with symbols at the end.
poscar_string = """Test1
1.0
-3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
1 1
direct
0.000000 0.000000 0.000000 Si
0.750000 0.500000 0.750000 F
"""
expected = """Test1
1.0
3.840198 -0.000000 -0.000000
-1.920099 -3.325710 -0.000000
-0.000000 2.217138 -3.135509
Si F
1 1
direct
0.000000 0.000000 0.000000 Si
0.750000 0.500000 0.750000 F
"""
poscar = Poscar.from_string(poscar_string)
self.assertEqual(str(poscar), expected)
def test_from_md_run(self):
# Parsing from an MD type run with velocities and predictor corrector data
p = Poscar.from_file(self.TEST_FILES_DIR / "CONTCAR.MD", check_for_POTCAR=False)
self.assertAlmostEqual(np.sum(np.array(p.velocities)), 0.0065417961324)
self.assertEqual(p.predictor_corrector[0][0][0], 0.33387820E+00)
self.assertEqual(p.predictor_corrector[0][1][1], -0.10583589E-02)
def test_write_MD_poscar(self):
# Parsing from an MD type run with velocities and predictor corrector data
# And writing a new POSCAR from the new structure
p = Poscar.from_file(self.TEST_FILES_DIR / "CONTCAR.MD", check_for_POTCAR=False)
tempfname = Path("POSCAR.testing.md")
p.write_file(tempfname)
p3 = Poscar.from_file(tempfname)
self.assertArrayAlmostEqual(p.structure.lattice.abc,
p3.structure.lattice.abc, 5)
self.assertArrayAlmostEqual(p.velocities,
p3.velocities, 5)
self.assertArrayAlmostEqual(p.predictor_corrector,
p3.predictor_corrector, 5)
self.assertEqual(p.predictor_corrector_preamble,
p3.predictor_corrector_preamble)
tempfname.unlink()
def test_setattr(self):
filepath = self.TEST_FILES_DIR / 'POSCAR'
poscar = Poscar.from_file(filepath, check_for_POTCAR=False)
self.assertRaises(ValueError, setattr, poscar, 'velocities',
[[0, 0, 0]])
poscar.selective_dynamics = np.array([[True, False, False]] * 24)
ans = """
LiFePO4
1.0
10.411767 0.000000 0.000000
0.000000 6.067172 0.000000
0.000000 0.000000 4.759490
Fe P O
4 4 16
Selective dynamics
direct
0.218728 0.750000 0.474867 T F F Fe
0.281272 0.250000 0.974867 T F F Fe
0.718728 0.750000 0.025133 T F F Fe
0.781272 0.250000 0.525133 T F F Fe
0.094613 0.250000 0.418243 T F F P
0.405387 0.750000 0.918243 T F F P
0.594613 0.250000 0.081757 T F F P
0.905387 0.750000 0.581757 T F F P
0.043372 0.750000 0.707138 T F F O
0.096642 0.250000 0.741320 T F F O
0.165710 0.046072 0.285384 T F F O
0.165710 0.453928 0.285384 T F F O
0.334290 0.546072 0.785384 T F F O
0.334290 0.953928 0.785384 T F F O
0.403358 0.750000 0.241320 T F F O
0.456628 0.250000 0.207138 T F F O
0.543372 0.750000 0.792862 T F F O
0.596642 0.250000 0.758680 T F F O
0.665710 0.046072 0.214616 T F F O
0.665710 0.453928 0.214616 T F F O
0.834290 0.546072 0.714616 T F F O
0.834290 0.953928 0.714616 T F F O
0.903358 0.750000 0.258680 T F F O
0.956628 0.250000 0.292862 T F F O"""
self.assertEqual(str(poscar).strip(), ans.strip())
def test_velocities(self):
si = 14
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
# Silicon structure for testing.
latt = [[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]]
struct = Structure(latt, [si, si], coords)
poscar = Poscar(struct)
poscar.set_temperature(900)
v = np.array(poscar.velocities)
for x in np.sum(v, axis=0):
self.assertAlmostEqual(x, 0, 7)
temperature = struct[0].specie.atomic_mass.to("kg") * np.sum(v ** 2) / (3 * const.k) * 1e10
self.assertAlmostEqual(temperature, 900, 4,
'Temperature instantiated incorrectly')
poscar.set_temperature(700)
v = np.array(poscar.velocities)
for x in np.sum(v, axis=0):
self.assertAlmostEqual(
x, 0, 7, 'Velocities initialized with a net momentum')
temperature = struct[0].specie.atomic_mass.to("kg") * np.sum(v ** 2) / (3 * const.k) * 1e10
self.assertAlmostEqual(temperature, 700, 4,
'Temperature instantiated incorrectly')
def test_write(self):
filepath = self.TEST_FILES_DIR / 'POSCAR'
poscar = Poscar.from_file(filepath)
tempfname = Path("POSCAR.testing")
poscar.write_file(tempfname)
p = Poscar.from_file(tempfname)
self.assertArrayAlmostEqual(poscar.structure.lattice.abc,
p.structure.lattice.abc, 5)
tempfname.unlink()
class IncarTest(PymatgenTest):
def setUp(self):
file_name = self.TEST_FILES_DIR / 'INCAR'
self.incar = Incar.from_file(file_name)
def test_init(self):
incar = self.incar
incar["LDAU"] = "T"
self.assertEqual(incar["ALGO"], "Damped", "Wrong Algo")
self.assertEqual(float(incar["EDIFF"]), 1e-4, "Wrong EDIFF")
self.assertEqual(type(incar["LORBIT"]), int)
def test_diff(self):
incar = self.incar
filepath1 = self.TEST_FILES_DIR / 'INCAR'
incar1 = Incar.from_file(filepath1)
filepath2 = self.TEST_FILES_DIR / 'INCAR.2'
incar2 = Incar.from_file(filepath2)
filepath3 = self.TEST_FILES_DIR / 'INCAR.3'
incar3 = Incar.from_file(filepath2)
self.assertEqual(
incar1.diff(incar2),
{'Different': {
'NELM': {'INCAR1': None, 'INCAR2': 100},
'ISPIND': {'INCAR1': 2, 'INCAR2': None},
'LWAVE': {'INCAR1': True, 'INCAR2': False},
'LDAUPRINT': {'INCAR1': None, 'INCAR2': 1},
'MAGMOM': {'INCAR1': [6, -6, -6, 6, 0.6, 0.6, 0.6,
0.6, 0.6, 0.6, 0.6, 0.6,
0.6, 0.6, 0.6, 0.6, 0.6,
0.6, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6],
'INCAR2': None},
'NELMIN': {'INCAR1': None, 'INCAR2': 3},
'ENCUTFOCK': {'INCAR1': 0.0, 'INCAR2': None},
'HFSCREEN': {'INCAR1': 0.207, 'INCAR2': None},
'LSCALU': {'INCAR1': False, 'INCAR2': None},
'ENCUT': {'INCAR1': 500, 'INCAR2': None},
'NSIM': {'INCAR1': 1, 'INCAR2': None},
'ICHARG': {'INCAR1': None, 'INCAR2': 1},
'NSW': {'INCAR1': 99, 'INCAR2': 51},
'NKRED': {'INCAR1': 2, 'INCAR2': None},
'NUPDOWN': {'INCAR1': 0, 'INCAR2': None},
'LCHARG': {'INCAR1': True, 'INCAR2': None},
'LPLANE': {'INCAR1': True, 'INCAR2': None},
'ISMEAR': {'INCAR1': 0, 'INCAR2': -5},
'NPAR': {'INCAR1': 8, 'INCAR2': 1},
'SYSTEM': {
'INCAR1': 'Id=[0] dblock_code=[97763-icsd] formula=[li mn (p o4)] sg_name=[p n m a]',
'INCAR2': 'Id=[91090] dblock_code=[20070929235612linio-59.53134651-vasp] formula=[li3 ni3 o6] '
'sg_name=[r-3m]'},
'ALGO': {'INCAR1': 'Damped', 'INCAR2': 'Fast'},
'LHFCALC': {'INCAR1': True, 'INCAR2': None},
'TIME': {'INCAR1': 0.4, 'INCAR2': None}},
'Same': {'IBRION': 2, 'PREC': 'Accurate', 'ISIF': 3,
'LMAXMIX': 4,
'LREAL': 'Auto', 'ISPIN': 2, 'EDIFF': 0.0001,
'LORBIT': 11, 'SIGMA': 0.05}})
self.assertEqual(
incar1.diff(incar3),
{'Different': {
'NELM': {'INCAR1': None, 'INCAR2': 100},
'ISPIND': {'INCAR1': 2, 'INCAR2': None},
'LWAVE': {'INCAR1': True, 'INCAR2': False},
'LDAUPRINT': {'INCAR1': None, 'INCAR2': 1},
'MAGMOM': {'INCAR1': [6, -6, -6, 6, 0.6, 0.6, 0.6,
0.6, 0.6, 0.6, 0.6, 0.6,
0.6, 0.6, 0.6, 0.6, 0.6,
0.6, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6],
'INCAR2': None},
'NELMIN': {'INCAR1': None, 'INCAR2': 3},
'ENCUTFOCK': {'INCAR1': 0.0, 'INCAR2': None},
'HFSCREEN': {'INCAR1': 0.207, 'INCAR2': None},
'LSCALU': {'INCAR1': False, 'INCAR2': None},
'ENCUT': {'INCAR1': 500, 'INCAR2': None},
'NSIM': {'INCAR1': 1, 'INCAR2': None},
'ICHARG': {'INCAR1': None, 'INCAR2': 1},
'NSW': {'INCAR1': 99, 'INCAR2': 51},
'NKRED': {'INCAR1': 2, 'INCAR2': None},
'NUPDOWN': {'INCAR1': 0, 'INCAR2': None},
'LCHARG': {'INCAR1': True, 'INCAR2': None},
'LPLANE': {'INCAR1': True, 'INCAR2': None},
'ISMEAR': {'INCAR1': 0, 'INCAR2': -5},
'NPAR': {'INCAR1': 8, 'INCAR2': 1},
'SYSTEM': {
'INCAR1': 'Id=[0] dblock_code=[97763-icsd] formula=[li mn (p o4)] sg_name=[p n m a]',
'INCAR2': 'Id=[91090] dblock_code=[20070929235612linio-59.53134651-vasp] formula=[li3 ni3 o6] '
'sg_name=[r-3m]'},
'ALGO': {'INCAR1': 'Damped', 'INCAR2': 'Fast'},
'LHFCALC': {'INCAR1': True, 'INCAR2': None},
'TIME': {'INCAR1': 0.4, 'INCAR2': None}},
'Same': {'IBRION': 2, 'PREC': 'Accurate', 'ISIF': 3,
'LMAXMIX': 4,
'LREAL': 'Auto', 'ISPIN': 2, 'EDIFF': 0.0001,
'LORBIT': 11, 'SIGMA': 0.05}})
def test_as_dict_and_from_dict(self):
d = self.incar.as_dict()
incar2 = Incar.from_dict(d)
self.assertEqual(self.incar, incar2)
d["MAGMOM"] = [Magmom([1, 2, 3]).as_dict()]
incar3 = Incar.from_dict(d)
self.assertEqual(incar3["MAGMOM"], [Magmom([1, 2, 3])])
def test_write(self):
tempfname = Path("INCAR.testing")
self.incar.write_file(tempfname)
i = Incar.from_file(tempfname)
self.assertEqual(i, self.incar)
tempfname.unlink()
def test_get_string(self):
s = self.incar.get_string(pretty=True, sort_keys=True)
ans = """ALGO = Damped
EDIFF = 0.0001
ENCUT = 500
ENCUTFOCK = 0.0
HFSCREEN = 0.207
IBRION = 2
ISIF = 3
ISMEAR = 0
ISPIN = 2
ISPIND = 2
LCHARG = True
LHFCALC = True
LMAXMIX = 4
LORBIT = 11
LPLANE = True
LREAL = Auto
LSCALU = False
LWAVE = True
MAGMOM = 1*6.0 2*-6.0 1*6.0 20*0.6
NKRED = 2
NPAR = 8
NSIM = 1
NSW = 99
NUPDOWN = 0
PREC = Accurate
SIGMA = 0.05
SYSTEM = Id=[0] dblock_code=[97763-icsd] formula=[li mn (p o4)] sg_name=[p n m a]
TIME = 0.4"""
self.assertEqual(s, ans)
def test_lsorbit_magmom(self):
magmom1 = [[0.0, 0.0, 3.0], [0, 1, 0], [2, 1, 2]]
magmom2 = [-1, -1, -1, 0, 0, 0, 0, 0]
magmom4 = [Magmom([1.0, 2.0, 2.0])]
ans_string1 = "LANGEVIN_GAMMA = 10 10 10\nLSORBIT = True\n" \
"MAGMOM = 0.0 0.0 3.0 0 1 0 2 1 2\n"
ans_string2 = "LANGEVIN_GAMMA = 10\nLSORBIT = True\n" \
"MAGMOM = 3*3*-1 3*5*0\n"
ans_string3 = "LSORBIT = False\nMAGMOM = 2*-1 2*9\n"
ans_string4_nolsorbit = "LANGEVIN_GAMMA = 10\nLSORBIT = False\nMAGMOM = 1*3.0\n"
ans_string4_lsorbit = "LANGEVIN_GAMMA = 10\nLSORBIT = True\nMAGMOM = 1.0 2.0 2.0\n"
incar = Incar({})
incar["MAGMOM"] = magmom1
incar["LSORBIT"] = "T"
incar["LANGEVIN_GAMMA"] = [10, 10, 10]
self.assertEqual(ans_string1, str(incar))
incar["MAGMOM"] = magmom2
incar["LSORBIT"] = "T"
incar["LANGEVIN_GAMMA"] = 10
self.assertEqual(ans_string2, str(incar))
incar["MAGMOM"] = magmom4
incar["LSORBIT"] = "F"
self.assertEqual(ans_string4_nolsorbit, str(incar))
incar["LSORBIT"] = "T"
self.assertEqual(ans_string4_lsorbit, str(incar))
incar = Incar.from_string(ans_string1)
self.assertEqual(incar["MAGMOM"],
[[0.0, 0.0, 3.0], [0, 1, 0], [2, 1, 2]])
self.assertEqual(incar["LANGEVIN_GAMMA"], [10, 10, 10])
incar = Incar.from_string(ans_string2)
self.assertEqual(incar["MAGMOM"], [[-1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [0, 0, 0],
[0, 0, 0], [0, 0, 0],
[0, 0, 0], [0, 0, 0]])
self.assertEqual(incar["LANGEVIN_GAMMA"], [10])
incar = Incar.from_string(ans_string3)
self.assertFalse(incar["LSORBIT"])
self.assertEqual(incar["MAGMOM"], [-1, -1, 9, 9])
def test_quad_efg(self):
incar1 = Incar({})
incar1["LEFG"] = True
incar1["QUAD_EFG"] = [0.0, 146.6, -25.58]
ans_string1 = "LEFG = True\nQUAD_EFG = 0.0 146.6 -25.58\n"
self.assertEqual(ans_string1, str(incar1))
incar2 = Incar.from_string(ans_string1)
self.assertEqual(ans_string1, str(incar2))
def test_types(self):
incar_str = """ALGO = Fast
ECUT = 510
EDIFF = 1e-07
EINT = -0.85 0.85
IBRION = -1
ICHARG = 11
ISIF = 3
ISMEAR = 1
ISPIN = 1
LPARD = True
NBMOD = -3
PREC = Accurate
SIGMA = 0.1"""
i = Incar.from_string(incar_str)
self.assertIsInstance(i["EINT"], list)
self.assertEqual(i["EINT"][0], -0.85)
incar_str += "\nLHFCALC = .TRUE. ; HFSCREEN = 0.2"
incar_str += "\nALGO = All;"
i = Incar.from_string(incar_str)
self.assertTrue(i["LHFCALC"])
self.assertEqual(i["HFSCREEN"], 0.2)
self.assertEqual(i["ALGO"], "All")
def test_proc_types(self):
self.assertEqual(Incar.proc_val("HELLO", "-0.85 0.85"), "-0.85 0.85")
def test_check_params(self):
# Triggers warnings when running into nonsensical parameters
with self.assertWarns(BadIncarWarning) as cm:
incar = Incar({
'ADDGRID': True,
'ALGO': 'Normal',
'AMIN': 0.01,
'AMIX': 0.2,
'BMIX': 0.001,
'EDIFF': 5 + 1j, # EDIFF needs to be real
'EDIFFG': -0.01,
'ENCUT': 520,
'IBRION': 2,
'ICHARG': 1,
'ISIF': 3,
'ISMEAR': 1,
'ISPIN': 2,
'LASPH': 5, # Should be a bool
'LORBIT': 11,
'LREAL': 'Auto',
'LWAVE': False,
'MAGMOM': [1, 2, 4, 5],
'METAGGA': 'SCAM', # spelling mistake
'NELM': 200,
'NPAR': 4,
'NSW': 99,
'PREC': 'Accurate',
'SIGMA': 0.2,
'NBAND': 250, # spelling mistake
'PHON_TLIST': 'is_a_str', # this parameter should be a list
'LATTICE_CONSTRAINTS': [True, False, 'f'], # Should be a list of bools
'M_CONSTR': [True, 1, 'string'] # Should be a list of real numbers
})
incar.check_params()
class KpointsTest(PymatgenTest):
def test_init(self):
filepath = self.TEST_FILES_DIR / 'KPOINTS.auto'
kpoints = Kpoints.from_file(filepath)
self.assertEqual(kpoints.kpts, [[10]], "Wrong kpoint lattice read")
filepath = self.TEST_FILES_DIR / 'KPOINTS.cartesian'
kpoints = Kpoints.from_file(filepath)
self.assertEqual(kpoints.kpts,
[[0.25, 0, 0], [0, 0.25, 0], [0, 0, 0.25]],
"Wrong kpoint lattice read")
self.assertEqual(kpoints.kpts_shift, [0.5, 0.5, 0.5],
"Wrong kpoint shift read")
filepath = self.TEST_FILES_DIR / 'KPOINTS'
kpoints = Kpoints.from_file(filepath)
self.kpoints = kpoints
self.assertEqual(kpoints.kpts, [[2, 4, 6]])
filepath = self.TEST_FILES_DIR / 'KPOINTS.band'
kpoints = Kpoints.from_file(filepath)
self.assertIsNotNone(kpoints.labels)
self.assertEqual(kpoints.style, Kpoints.supported_modes.Line_mode)
kpoints_str = str(kpoints)
self.assertEqual(kpoints_str.split("\n")[3], "Reciprocal")
filepath = self.TEST_FILES_DIR / 'KPOINTS.explicit'
kpoints = Kpoints.from_file(filepath)
self.assertIsNotNone(kpoints.kpts_weights)
self.assertEqual(str(kpoints).strip(), """Example file
4
Cartesian
0.0 0.0 0.0 1 None
0.0 0.0 0.5 1 None
0.0 0.5 0.5 2 None
0.5 0.5 0.5 4 None""")
filepath = self.TEST_FILES_DIR / 'KPOINTS.explicit_tet'
kpoints = Kpoints.from_file(filepath)
self.assertEqual(kpoints.tet_connections, [(6, [1, 2, 3, 4])])
def test_style_setter(self):
filepath = self.TEST_FILES_DIR / 'KPOINTS'
kpoints = Kpoints.from_file(filepath)
self.assertEqual(kpoints.style, Kpoints.supported_modes.Monkhorst)
kpoints.style = "G"
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
def test_static_constructors(self):
kpoints = Kpoints.gamma_automatic([3, 3, 3], [0, 0, 0])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
self.assertEqual(kpoints.kpts, [[3, 3, 3]])
kpoints = Kpoints.monkhorst_automatic([2, 2, 2], [0, 0, 0])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Monkhorst)
self.assertEqual(kpoints.kpts, [[2, 2, 2]])
kpoints = Kpoints.automatic(100)
self.assertEqual(kpoints.style, Kpoints.supported_modes.Automatic)
self.assertEqual(kpoints.kpts, [[100]])
filepath = self.TEST_FILES_DIR / 'POSCAR'
poscar = Poscar.from_file(filepath)
kpoints = Kpoints.automatic_density(poscar.structure, 500)
self.assertEqual(kpoints.kpts, [[1, 3, 3]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
kpoints = Kpoints.automatic_density(poscar.structure, 500, True)
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
kpoints = Kpoints.automatic_density_by_vol(poscar.structure, 1000)
self.assertEqual(kpoints.kpts, [[6, 10, 13]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
s = poscar.structure
s.make_supercell(3)
kpoints = Kpoints.automatic_density(s, 500)
self.assertEqual(kpoints.kpts, [[1, 1, 1]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
kpoints = Kpoints.from_string("""k-point mesh
0
G
10 10 10
0.5 0.5 0.5
""")
self.assertArrayAlmostEqual(kpoints.kpts_shift, [0.5, 0.5, 0.5])
def test_as_dict_from_dict(self):
k = Kpoints.monkhorst_automatic([2, 2, 2], [0, 0, 0])
d = k.as_dict()
k2 = Kpoints.from_dict(d)
self.assertEqual(k.kpts, k2.kpts)
self.assertEqual(k.style, k2.style)
self.assertEqual(k.kpts_shift, k2.kpts_shift)
def test_kpt_bands_as_dict_from_dict(self):
file_name = self.TEST_FILES_DIR / 'KPOINTS.band'
k = Kpoints.from_file(file_name)
d = k.as_dict()
import json
json.dumps(d)
# This doesn't work
k2 = Kpoints.from_dict(d)
self.assertEqual(k.kpts, k2.kpts)
self.assertEqual(k.style, k2.style)
self.assertEqual(k.kpts_shift, k2.kpts_shift)
self.assertEqual(k.num_kpts, k2.num_kpts)
def test_pickle(self):
k = Kpoints.gamma_automatic()
pickle.dumps(k)
def test_automatic_kpoint(self):
# s = PymatgenTest.get_structure("Li2O")
p = Poscar.from_string("""Al1
1.0
2.473329 0.000000 1.427977
0.824443 2.331877 1.427977
0.000000 0.000000 2.855955
Al
1
direct
0.000000 0.000000 0.000000 Al""")
kpoints = Kpoints.automatic_density(p.structure, 1000)
self.assertArrayAlmostEqual(kpoints.kpts[0], [10, 10, 10])
class PotcarSingleTest(PymatgenTest):
_multiprocess_shared_ = True
def setUp(self):
self.psingle = PotcarSingle.from_file(
self.TEST_FILES_DIR / "POT_GGA_PAW_PBE" / "POTCAR.Mn_pv.gz")
def test_keywords(self):
data = {'VRHFIN': 'Mn: 3p4s3d', 'LPAW': True, 'DEXC': -.003,
'STEP': [20.000, 1.050],
'RPACOR': 2.080, 'LEXCH': 'PE',
'ENMAX': 269.865, 'QCUT': -4.454,
'TITEL': 'PAW_PBE Mn_pv 07Sep2000',
'LCOR': True, 'EAUG': 569.085,
'RMAX': 2.807,
'ZVAL': 13.000,
'EATOM': 2024.8347, 'NDATA': 100,
'LULTRA': False,
'QGAM': 8.907,
'ENMIN': 202.399,
'RCLOC': 1.725,
'RCORE': 2.300,
'RDEP': 2.338,
'IUNSCR': 1,
'RAUG': 1.300,
'POMASS': 54.938,
'RWIGS': 1.323}
self.assertEqual(self.psingle.keywords, data)
def test_nelectrons(self):
self.assertEqual(self.psingle.nelectrons, 13)
def test_electron_config(self):
config = self.psingle.electron_configuration
self.assertEqual(config[-1], (3, "p", 6))
def test_attributes(self):
for k in ['DEXC', 'RPACOR', 'ENMAX', 'QCUT', 'EAUG', 'RMAX',
'ZVAL', 'EATOM', 'NDATA', 'QGAM', 'ENMIN', 'RCLOC',
'RCORE', 'RDEP', 'RAUG', 'POMASS', 'RWIGS']:
self.assertIsNotNone(getattr(self.psingle, k))
def test_found_unknown_key(self):
with self.assertRaises(KeyError):
PotcarSingle.parse_functions['BAD_KEY']
def test_bad_value(self):
self.assertRaises(ValueError, PotcarSingle.parse_functions['ENMAX'],
"ThisShouldBeAFloat")
def test_hash(self):
self.assertEqual(self.psingle.get_potcar_hash(),
"fa52f891f234d49bb4cb5ea96aae8f98")
def test_functional_types(self):
self.assertEqual(self.psingle.functional, 'PBE')
self.assertEqual(self.psingle.functional_class, 'GGA')
self.assertEqual(self.psingle.potential_type, 'PAW')
psingle = PotcarSingle.from_file(self.TEST_FILES_DIR / "POT_LDA_PAW" / "POTCAR.Fe.gz")
self.assertEqual(psingle.functional, 'Perdew-Zunger81')
self.assertEqual(psingle.functional_class, 'LDA')
self.assertEqual(psingle.potential_type, 'PAW')
# def test_default_functional(self):
# p = PotcarSingle.from_symbol_and_functional("Fe")
# self.assertEqual(p.functional_class, 'GGA')
# SETTINGS["PMG_DEFAULT_FUNCTIONAL"] = "LDA"
# p = PotcarSingle.from_symbol_and_functional("Fe")
# self.assertEqual(p.functional_class, 'LDA')
# SETTINGS["PMG_DEFAULT_FUNCTIONAL"] = "PBE"
class PotcarTest(PymatgenTest):
def setUp(self):
if "PMG_VASP_PSP_DIR" not in os.environ:
os.environ["PMG_VASP_PSP_DIR"] = str(self.TEST_FILES_DIR)
filepath = self.TEST_FILES_DIR / 'POTCAR'
self.potcar = Potcar.from_file(filepath)
def test_init(self):
self.assertEqual(self.potcar.symbols, ["Fe", "P", "O"],
"Wrong symbols read in for POTCAR")
potcar = Potcar(["Fe_pv", "O"])
self.assertEqual(potcar[0].enmax, 293.238)
def test_potcar_map(self):
fe_potcar = zopen(self.TEST_FILES_DIR / "POT_GGA_PAW_PBE" / "POTCAR.Fe_pv.gz").read().decode(
"utf-8")
# specify V instead of Fe - this makes sure the test won't pass if the
# code just grabs the POTCAR from the config file (the config file would
# grab the V POTCAR)
potcar = Potcar(["V"], sym_potcar_map={"V": fe_potcar})
self.assertEqual(potcar.symbols, ["Fe_pv"], "Wrong symbols read in "
"for POTCAR")
def test_to_from_dict(self):
d = self.potcar.as_dict()
potcar = Potcar.from_dict(d)
self.assertEqual(potcar.symbols, ["Fe", "P", "O"])
def test_write(self):
tempfname = Path("POTCAR.testing")
self.potcar.write_file(tempfname)
p = Potcar.from_file(tempfname)
self.assertEqual(p.symbols, self.potcar.symbols)
tempfname.unlink()
def test_set_symbol(self):
self.assertEqual(self.potcar.symbols, ["Fe", "P", "O"])
self.assertEqual(self.potcar[0].nelectrons, 8)
self.potcar.symbols = ["Fe_pv", "O"]
self.assertEqual(self.potcar.symbols, ["Fe_pv", "O"])
self.assertEqual(self.potcar[0].nelectrons, 14)
# def test_default_functional(self):
# p = Potcar(["Fe", "P"])
# self.assertEqual(p[0].functional_class, 'GGA')
# self.assertEqual(p[1].functional_class, 'GGA')
# SETTINGS["PMG_DEFAULT_FUNCTIONAL"] = "LDA"
# p = Potcar(["Fe", "P"])
# self.assertEqual(p[0].functional_class, 'LDA')
# self.assertEqual(p[1].functional_class, 'LDA')
def test_pickle(self):
pickle.dumps(self.potcar)
# def tearDown(self):
# SETTINGS["PMG_DEFAULT_FUNCTIONAL"] = "PBE"
class VaspInputTest(PymatgenTest):
def setUp(self):
filepath = self.TEST_FILES_DIR / 'INCAR'
incar = Incar.from_file(filepath)
filepath = self.TEST_FILES_DIR / 'POSCAR'
poscar = Poscar.from_file(filepath, check_for_POTCAR=False)
if "PMG_VASP_PSP_DIR" not in os.environ:
os.environ["PMG_VASP_PSP_DIR"] = str(self.TEST_FILES_DIR)
filepath = self.TEST_FILES_DIR / 'POTCAR'
potcar = Potcar.from_file(filepath)
filepath = self.TEST_FILES_DIR / 'KPOINTS.auto'
kpoints = Kpoints.from_file(filepath)
self.vinput = VaspInput(incar, kpoints, poscar, potcar)
def test_to_from_dict(self):
d = self.vinput.as_dict()
vinput = VaspInput.from_dict(d)
comp = vinput["POSCAR"].structure.composition
self.assertEqual(comp, Composition("Fe4P4O16"))
def test_write(self):
tmp_dir = Path("VaspInput.testing")
self.vinput.write_input(tmp_dir)
filepath = tmp_dir / "INCAR"
incar = Incar.from_file(filepath)
self.assertEqual(incar["NSW"], 99)
for name in ("INCAR", "POSCAR", "POTCAR", "KPOINTS"):
(tmp_dir / name).unlink()
tmp_dir.rmdir()
def test_run_vasp(self):
# To add some test.
with ScratchDir(".") as d:
self.vinput.run_vasp(d, vasp_cmd=["cat", "INCAR"])
with open(os.path.join(d, "vasp.out"), "r") as f:
output = f.read()
self.assertEqual(output.split("\n")[0], "ALGO = Damped")
def test_from_directory(self):
vi = VaspInput.from_directory(self.TEST_FILES_DIR,
optional_files={"CONTCAR.Li2O": Poscar})
self.assertEqual(vi["INCAR"]["ALGO"], "Damped")
self.assertIn("CONTCAR.Li2O", vi)
d = vi.as_dict()
vinput = VaspInput.from_dict(d)
self.assertIn("CONTCAR.Li2O", vinput)
if __name__ == "__main__":
unittest.main()
|
[] |
[] |
[
"PMG_VASP_PSP_DIR"
] |
[]
|
["PMG_VASP_PSP_DIR"]
|
python
| 1 | 0 | |
cloud-warden/config.py
|
"""Configuration - Static vars"""
import boto3
import os
ASG_CONFIG = boto3.client('autoscaling', region_name='eu-west-1')
EC2_CONFIG = boto3.client('ec2', region_name='eu-west-1')
WEBHOOK_URL = os.environ.get('WEBHOOK_URL')
SAWMILL_DEVELOPER_LOGS = os.environ.get('SAWMILL_DEVELOPER_LOGS')
SAWMILL_PB_MODE = os.environ.get('SAWMILL_PB_MODE')
AWS_REGIONS = [
'us-east-2', 'us-east-1', 'us-west-1', 'us-west-2', 'ap-east-1', 'ap-south-1', 'ap-northeast-1',
'ap-northeast-2', 'ap-northeast-3', 'ap-southeast-1', 'ap-southeast-2', 'ca-central-1', 'cn-north-1',
'cn-northwest-1', 'eu-central-1', 'eu-west-1', 'eu-west-2', 'eu-west-3', 'eu-north01', 'me-south-1',
'sa-east-1', 'us-gov-east-1', 'us-gov-west-1'
]
WARDEN_SCHEDULES = ['OfficeHours', 'ExtendedHours', 'DailyOnDemand', 'WeeklyOnDemand', 'terraform']
OFF_STATE = ['shutting-down', 'terminated', 'stopping', 'stopped']
ON_STATE = ['pending', 'running']
|
[] |
[] |
[
"SAWMILL_DEVELOPER_LOGS",
"WEBHOOK_URL",
"SAWMILL_PB_MODE"
] |
[]
|
["SAWMILL_DEVELOPER_LOGS", "WEBHOOK_URL", "SAWMILL_PB_MODE"]
|
python
| 3 | 0 | |
src/lib/kombi/Crawler/Fs/Video/VideoCrawler.py
|
import subprocess
import os
import json
from ..FileCrawler import FileCrawler
class VideoCrawler(FileCrawler):
"""
Abstracted video crawler.
"""
__ffprobeExecutable = os.environ.get('KOMBI_FFPROBE_EXECUTABLE', 'ffprobe')
def __init__(self, *args, **kwargs):
"""
Create a video crawler.
"""
super(VideoCrawler, self).__init__(*args, **kwargs)
self.setVar('category', 'video')
# setting a video tag
self.setTag(
'video',
self.pathHolder().baseName()
)
def var(self, name):
"""
Return var value using lazy loading implementation for width and height.
"""
if self.__ffprobeExecutable and name in ('width', 'height') and name not in self.varNames():
self.__computeWidthHeight()
return super(VideoCrawler, self).var(name)
def __computeWidthHeight(self):
"""
Query width and height using ffprobe and set them as crawler variables.
"""
# Get width and height from movie using ffprobe
cmd = '{} -v quiet -print_format json -show_entries stream=height,width "{}"'.format(
self. __ffprobeExecutable,
self.var('filePath')
)
# calling ffmpeg
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=os.environ,
shell=True
)
# capturing the output
output, error = process.communicate()
result = json.loads(output.decode("utf-8"))
if "streams" in result:
self.setVar('width', result['streams'][0]['width'])
self.setVar('height', result['streams'][0]['height'])
|
[] |
[] |
[
"KOMBI_FFPROBE_EXECUTABLE"
] |
[]
|
["KOMBI_FFPROBE_EXECUTABLE"]
|
python
| 1 | 0 | |
sklearn/_config.py
|
"""Global configuration state and functions for management
"""
import os
from contextlib import contextmanager as contextmanager
_global_config = {
'assume_finite': bool(os.environ.get('SKLEARN_ASSUME_FINITE', False)),
'working_memory': int(os.environ.get('SKLEARN_WORKING_MEMORY', 1024)),
'print_changed_only': True,
'display': 'text',
}
def get_config():
"""Retrieve current values for configuration set by :func:`set_config`
Returns
-------
config : dict
Keys are parameter names that can be passed to :func:`set_config`.
See Also
--------
config_context: Context manager for global scikit-learn configuration
set_config: Set global scikit-learn configuration
"""
return _global_config.copy()
def set_config(assume_finite=None, working_memory=None,
print_changed_only=None, display=None):
"""Set global scikit-learn configuration
.. versionadded:: 0.19
Parameters
----------
assume_finite : bool, default=None
If True, validation for finiteness will be skipped,
saving time, but leading to potential crashes. If
False, validation for finiteness will be performed,
avoiding error. Global default: False.
.. versionadded:: 0.19
working_memory : int, default=None
If set, scikit-learn will attempt to limit the size of temporary arrays
to this number of MiB (per job when parallelised), often saving both
computation time and memory on expensive operations that can be
performed in chunks. Global default: 1024.
.. versionadded:: 0.20
print_changed_only : bool, default=None
If True, only the parameters that were set to non-default
values will be printed when printing an estimator. For example,
``print(SVC())`` while True will only print 'SVC()' while the default
behaviour would be to print 'SVC(C=1.0, cache_size=200, ...)' with
all the non-changed parameters.
.. versionadded:: 0.21
display : {'text', 'diagram'}, default=None
If 'diagram', estimators will be displayed as a diagram in a Jupyter
lab or notebook context. If 'text', estimators will be displayed as
text. Default is 'text'.
.. versionadded:: 0.23
See Also
--------
config_context: Context manager for global scikit-learn configuration
get_config: Retrieve current values of the global configuration
"""
if assume_finite is not None:
_global_config['assume_finite'] = assume_finite
if working_memory is not None:
_global_config['working_memory'] = working_memory
if print_changed_only is not None:
_global_config['print_changed_only'] = print_changed_only
if display is not None:
_global_config['display'] = display
@contextmanager
def config_context(**new_config):
"""Context manager for global scikit-learn configuration
Parameters
----------
assume_finite : bool, default=False
If True, validation for finiteness will be skipped,
saving time, but leading to potential crashes. If
False, validation for finiteness will be performed,
avoiding error. Global default: False.
working_memory : int, default=1024
If set, scikit-learn will attempt to limit the size of temporary arrays
to this number of MiB (per job when parallelised), often saving both
computation time and memory on expensive operations that can be
performed in chunks. Global default: 1024.
print_changed_only : bool, default=True
If True, only the parameters that were set to non-default
values will be printed when printing an estimator. For example,
``print(SVC())`` while True will only print 'SVC()', but would print
'SVC(C=1.0, cache_size=200, ...)' with all the non-changed parameters
when False. Default is True.
.. versionchanged:: 0.23
Default changed from False to True.
display : {'text', 'diagram'}, default='text'
If 'diagram', estimators will be displayed as a diagram in a Jupyter
lab or notebook context. If 'text', estimators will be displayed as
text. Default is 'text'.
.. versionadded:: 0.23
Notes
-----
All settings, not just those presently modified, will be returned to
their previous values when the context manager is exited. This is not
thread-safe.
Examples
--------
>>> import sklearn
>>> from sklearn.utils.validation import assert_all_finite
>>> with sklearn.config_context(assume_finite=True):
... assert_all_finite([float('nan')])
>>> with sklearn.config_context(assume_finite=True):
... with sklearn.config_context(assume_finite=False):
... assert_all_finite([float('nan')])
Traceback (most recent call last):
...
ValueError: Input contains NaN, ...
See Also
--------
set_config: Set global scikit-learn configuration
get_config: Retrieve current values of the global configuration
"""
old_config = get_config().copy()
set_config(**new_config)
try:
yield
finally:
set_config(**old_config)
|
[] |
[] |
[
"SKLEARN_WORKING_MEMORY",
"SKLEARN_ASSUME_FINITE"
] |
[]
|
["SKLEARN_WORKING_MEMORY", "SKLEARN_ASSUME_FINITE"]
|
python
| 2 | 0 | |
vendor/github.com/palantir/godel/apps/okgo/generated_src/internal/github.com/palantir/checks/extimport/extimport.go
|
// Copyright 2016 Palantir Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package amalgomated
import (
"fmt"
"go/build"
"go/token"
"io"
"os"
"path"
"path/filepath"
"sort"
"strings"
"github.com/nmiyake/pkg/dirs"
"github.com/nmiyake/pkg/errorstringer"
"github.com/palantir/pkg/cli"
"github.com/palantir/pkg/cli/flag"
"github.com/palantir/pkg/pkgpath"
"github.com/pkg/errors"
)
const (
pkgsFlagName = "pkgs"
listFlagName = "list"
allFlagName = "all"
)
var (
pkgsFlag = flag.StringSlice{
Name: pkgsFlagName,
Usage: "paths to the packages to check",
}
listFlag = flag.BoolFlag{
Name: listFlagName,
Alias: "l",
Usage: "print external dependencies one per line",
}
allFlag = flag.BoolFlag{
Name: allFlagName,
Alias: "a",
Usage: "list all external dependencies, including those multiple levels deep",
}
)
func AmalgomatedMain() {
app := cli.NewApp(cli.DebugHandler(errorstringer.SingleStack))
app.Flags = append(app.Flags,
listFlag,
allFlag,
pkgsFlag,
)
app.Action = func(ctx cli.Context) error {
wd, err := dirs.GetwdEvalSymLinks()
if err != nil {
return errors.Wrapf(err, "Failed to get working directory")
}
return doExtimport(wd, ctx.Slice(pkgsFlagName), ctx.Bool(listFlagName), ctx.Bool(allFlagName), ctx.App.Stdout)
}
os.Exit(app.Run(os.Args))
}
func doExtimport(projectDir string, pkgPaths []string, list, all bool, w io.Writer) error {
if !path.IsAbs(projectDir) {
return errors.Errorf("projectDir %s must be an absolute path", projectDir)
}
gopath := os.Getenv("GOPATH")
if gopath == "" {
return errors.Errorf("GOPATH environment variable must be set")
}
if relPath, err := filepath.Rel(path.Join(gopath, "src"), projectDir); err != nil || strings.HasPrefix(relPath, "../") {
return errors.Wrapf(err, "Project directory %s must be a subdirectory of $GOPATH/src (%s)", projectDir, path.Join(gopath, "src"))
}
if len(pkgPaths) == 0 {
pkgs, err := pkgpath.PackagesInDir(projectDir, pkgpath.DefaultGoPkgExcludeMatcher())
if err != nil {
return errors.Wrapf(err, "Failed to list packages")
}
pkgPaths, err = pkgs.Paths(pkgpath.Relative)
if err != nil {
return errors.Wrapf(err, "Failed to convert package paths")
}
}
internalPkgs := make(map[string]bool)
externalPkgs := make(map[string][]string)
printedPkgs := make(map[string]bool)
type pkgWithSrc struct {
pkg string
src string
}
externalImportsExist := false
pkgsToProcess := make([]pkgWithSrc, len(pkgPaths))
for i, pkgPath := range pkgPaths {
pkgsToProcess[i] = pkgWithSrc{
pkg: "./.",
src: path.Join(projectDir, pkgPath),
}
}
processedPkgs := make(map[pkgWithSrc]bool)
for len(pkgsToProcess) > 0 {
currPkg := pkgsToProcess[0]
pkgsToProcess = pkgsToProcess[1:]
if processedPkgs[currPkg] {
continue
}
processedPkgs[currPkg] = true
externalPkgs, err := checkImports(currPkg.pkg, currPkg.src, projectDir, internalPkgs, externalPkgs, w, list, printedPkgs)
if err != nil {
return errors.Wrapf(err, "Failed to check imports for %v", currPkg)
} else if len(externalPkgs) == 0 {
continue
}
externalImportsExist = true
if list && all {
// when run in "list all" mode, process all external packages as well so that all
// external dependencies (even those multiple levels deep) are listed
for _, currExternalPkg := range externalPkgs {
externalPkgWithSrc := pkgWithSrc{
pkg: currExternalPkg,
src: currPkg.src,
}
if !processedPkgs[externalPkgWithSrc] {
pkgsToProcess = append(pkgsToProcess, externalPkgWithSrc)
}
}
}
}
if externalImportsExist {
return fmt.Errorf("")
}
return nil
}
// checkImports returns any external imports for the package "pkg". Does so by getting the "import" statements in all of
// the .go files (including tests) in the directory and then resolving the imports using standard Go rules assuming that
// the resolution occurs in "srcDir" (this is done so that special directories like "vendor" and "internal" are handled
// correctly). An import is considered external if its resolved location is outside of the directory tree of
// "projectRootDir".
func checkImports(pkgPath, srcDir, projectRootDir string, internalPkgs map[string]bool, externalPkgs map[string][]string, w io.Writer, list bool, printedPkgs map[string]bool) ([]string, error) {
// get all imports in package
pkg, err := build.Import(pkgPath, srcDir, build.ImportComment)
if err != nil {
return nil, errors.Wrapf(err, "Failed to import package %s using srcDir %s", pkgPath, srcDir)
}
importsToCheck := make(map[string][]token.Position)
addImportPosToMap(importsToCheck, pkg.ImportPos)
addImportPosToMap(importsToCheck, pkg.TestImportPos)
addImportPosToMap(importsToCheck, pkg.XTestImportPos)
var externalPkgsFound []string
// check imports for each file in the package
sortedFiles, fileToImports := fileToImportsMap(importsToCheck)
for _, currFile := range sortedFiles {
// check each import in the file
for _, currImportLine := range fileToImports[currFile] {
chain, err := getExternalImport(currImportLine.name, srcDir, projectRootDir, internalPkgs, externalPkgs)
if err != nil {
return nil, errors.Wrapf(err, "isExternalImport failed for %s", currImportLine)
}
if len(chain) > 0 {
externalPkg := chain[len(chain)-1]
externalPkgsFound = append(externalPkgsFound, externalPkg)
if list {
if _, ok := printedPkgs[externalPkg]; !ok {
fmt.Fprintln(w, externalPkg)
}
printedPkgs[externalPkg] = true
} else {
msg := fmt.Sprintf("%v:%v:%v: imports external package %v", currFile, currImportLine.pos.Line, currImportLine.pos.Column, externalPkg)
if len(chain) > 1 {
msg += fmt.Sprintf(" transitively via %v", strings.Join(chain[:len(chain)-1], " -> "))
}
fmt.Fprintln(w, msg)
}
}
}
}
return externalPkgsFound, nil
}
// getExternalImport takes an import and returns the chain to the external import if the import is external and nil
// otherwise. Assumes that the import occurs in a package in "srcDir". The import is considered external if its resolved
// path is not a subdirectory of the project root.
func getExternalImport(importPkgPath, srcDir, projectRoot string, internalPkgs map[string]bool, externalPkgs map[string][]string) ([]string, error) {
if !strings.Contains(importPkgPath, ".") || internalPkgs[importPkgPath] {
// if package is a standard package or known to be internal, return empty
return nil, nil
} else if chain, ok := externalPkgs[importPkgPath]; ok {
// if package is external and result is cached, return directly
return chain, nil
}
pkg, err := build.Import(importPkgPath, srcDir, build.ImportComment)
if err != nil {
return nil, errors.Wrapf(err, "Failed to import package %s", importPkgPath)
}
if rel, err := filepath.Rel(projectRoot, pkg.Dir); err != nil || strings.HasPrefix(rel, "../") {
currChain := []string{importPkgPath}
externalPkgs[importPkgPath] = currChain
return currChain, nil
}
// current import is internal, but check if any of its imports are external. Resolve the imports for this
// directories).
sort.Strings(pkg.Imports)
for _, currImport := range pkg.Imports {
chain, err := getExternalImport(currImport, pkg.Dir, projectRoot, internalPkgs, externalPkgs)
if err != nil {
return nil, errors.Wrapf(err, "isExternalImport failed for %v", currImport)
}
// if any import is external, this import is external
if len(chain) > 0 {
currChain := append([]string{importPkgPath}, chain...)
externalPkgs[importPkgPath] = currChain
return currChain, nil
}
}
// if all checks pass, mark this package as internal and return false
internalPkgs[importPkgPath] = true
return nil, nil
}
func addImportPosToMap(dst, src map[string][]token.Position) {
for k, v := range src {
dst[k] = v
}
}
type importLine struct {
name string
pos token.Position
}
type byLineNum []importLine
func (a byLineNum) Len() int { return len(a) }
func (a byLineNum) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byLineNum) Less(i, j int) bool {
if a[i].pos.Line == a[j].pos.Line {
// if line numbers are the same, do secondary sort by column position
return a[i].pos.Column < a[j].pos.Column
}
return a[i].pos.Line < a[j].pos.Line
}
func fileToImportsMap(importPos map[string][]token.Position) ([]string, map[string][]importLine) {
output := make(map[string][]importLine)
for k, v := range importPos {
for _, currPos := range v {
output[currPos.Filename] = append(output[currPos.Filename], importLine{
name: k,
pos: currPos,
})
}
}
var sortedKeys []string
for k, v := range output {
sortedKeys = append(sortedKeys, k)
sort.Sort(byLineNum(v))
}
sort.Strings(sortedKeys)
return sortedKeys, output
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
cmd/kube-vip.go
|
package cmd
import (
"context"
"fmt"
"net/http"
"os"
"time"
"github.com/kube-vip/kube-vip/pkg/kubevip"
"github.com/kube-vip/kube-vip/pkg/manager"
"github.com/kube-vip/kube-vip/pkg/packet"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
// Path to the configuration file
var configPath string
// Path to the configuration file
var namespace string
// Disable the Virtual IP (bind to the existing network stack)
var disableVIP bool
// Disable the Virtual IP (bind to the existing network stack)
var controlPlane bool
// Run as a load balancer service (within a pod / kubernetes)
var serviceArp bool
// ConfigMap name within a Kubernetes cluster
var configMap string
// Configure the level of loggin
var logLevel uint32
// Provider Config
var providerConfig string
// Release - this struct contains the release information populated when building kube-vip
var Release struct {
Version string
Build string
}
// Structs used via the various subcommands
var initConfig kubevip.Config
var initLoadBalancer kubevip.LoadBalancer
// Points to a kubernetes configuration file
var kubeConfigPath string
var kubeVipCmd = &cobra.Command{
Use: "kube-vip",
Short: "This is a server for providing a Virtual IP and load-balancer for the Kubernetes control-plane",
}
func init() {
localpeer, err := autoGenLocalPeer()
if err != nil {
log.Fatalln(err)
}
initConfig.LocalPeer = *localpeer
//initConfig.Peers = append(initConfig.Peers, *localpeer)
kubeVipCmd.PersistentFlags().StringVar(&initConfig.Interface, "interface", "", "Name of the interface to bind to")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.VIP, "vip", "", "The Virtual IP address")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.Address, "address", "", "an address (IP or DNS name) to use as a VIP")
kubeVipCmd.PersistentFlags().IntVar(&initConfig.Port, "port", 6443, "listen port for the VIP")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.VIPCIDR, "cidr", "32", "The CIDR range for the virtual IP address")
kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableARP, "arp", false, "Enable Arp for Vip changes")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.Annotations, "annotations", "", "Set Node annotations prefix for parsing")
kubeVipCmd.PersistentFlags().BoolVar(&initConfig.DDNS, "ddns", false, "use Dynamic DNS + DHCP to allocate VIP for address")
// Clustering type (leaderElection)
kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableLeaderElection, "leaderElection", false, "Use the Kubernetes leader election mechanism for clustering")
kubeVipCmd.PersistentFlags().IntVar(&initConfig.LeaseDuration, "leaseDuration", 5, "Length of time a Kubernetes leader lease can be held for")
kubeVipCmd.PersistentFlags().IntVar(&initConfig.RenewDeadline, "leaseRenewDuration", 3, "Length of time a Kubernetes leader can attempt to renew its lease")
kubeVipCmd.PersistentFlags().IntVar(&initConfig.RetryPeriod, "leaseRetry", 1, "Number of times the host will retry to hold a lease")
// Clustering type (raft)
kubeVipCmd.PersistentFlags().BoolVar(&initConfig.StartAsLeader, "startAsLeader", false, "Start this instance as the cluster leader")
kubeVipCmd.PersistentFlags().BoolVar(&initConfig.AddPeersAsBackends, "addPeersToLB", true, "Add raft peers to the load-balancer")
// Packet flags
kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableMetal, "metal", false, "This will use the Equinix Metal API (requires the token ENV) to update the EIP <-> VIP")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.MetalAPIKey, "metalKey", "", "The API token for authenticating with the Equinix Metal API")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.MetalProject, "metalProject", "", "The name of project already created within Equinix Metal")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.MetalProjectID, "metalProjectID", "", "The ID of project already created within Equinix Metal")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.ProviderConfig, "provider-config", "", "The path to a provider configuration")
// Load Balancer flags
kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableLoadBalancer, "lbEnable", false, "Enable a load-balancer on the VIP")
kubeVipCmd.PersistentFlags().BoolVar(&initLoadBalancer.BindToVip, "lbBindToVip", true, "Bind example load balancer to VIP")
kubeVipCmd.PersistentFlags().StringVar(&initLoadBalancer.Type, "lbType", "tcp", "Type of load balancer instance (TCP/HTTP)")
kubeVipCmd.PersistentFlags().StringVar(&initLoadBalancer.Name, "lbName", "Kubeadm Load Balancer", "The name of a load balancer instance")
kubeVipCmd.PersistentFlags().IntVar(&initLoadBalancer.Port, "lbPort", 6443, "Port that load balancer will expose on")
kubeVipCmd.PersistentFlags().IntVar(&initLoadBalancer.BackendPort, "lbBackEndPort", 6444, "A port that all backends may be using (optional)")
// BGP flags
kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableBGP, "bgp", false, "This will enable BGP support within kube-vip")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.BGPConfig.RouterID, "bgpRouterID", "", "The routerID for the bgp server")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.BGPConfig.SourceIF, "sourceIF", "", "The source interface for bgp peering (not to be used with sourceIP)")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.BGPConfig.SourceIP, "sourceIP", "", "The source address for bgp peering (not to be used with sourceIF)")
kubeVipCmd.PersistentFlags().Uint32Var(&initConfig.BGPConfig.AS, "localAS", 65000, "The local AS number for the bgp server")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.BGPPeerConfig.Address, "peerAddress", "", "The address of a BGP peer")
kubeVipCmd.PersistentFlags().Uint32Var(&initConfig.BGPPeerConfig.AS, "peerAS", 65000, "The AS number for a BGP peer")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.BGPPeerConfig.Password, "peerPass", "", "The md5 password for a BGP peer")
kubeVipCmd.PersistentFlags().BoolVar(&initConfig.BGPPeerConfig.MultiHop, "multihop", false, "This will enable BGP multihop support")
kubeVipCmd.PersistentFlags().StringSliceVar(&initConfig.BGPPeers, "bgppeers", []string{}, "Comma seperated BGP Peer, format: address:as:password:multihop")
// Control plane specific flags
kubeVipCmd.PersistentFlags().StringVarP(&initConfig.Namespace, "namespace", "n", "kube-system", "The configuration map defined within the cluster")
// Manage logging
kubeVipCmd.PersistentFlags().Uint32Var(&logLevel, "log", 4, "Set the level of logging")
// Service flags
kubeVipService.Flags().StringVarP(&configMap, "configMap", "c", "plndr", "The configuration map defined within the cluster")
kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableControlPane, "controlplane", false, "Enable HA for control plane, hybrid mode")
kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableServices, "services", false, "Enable Kubernetes services, hybrid mode")
// Prometheus HTTP Server
kubeVipCmd.PersistentFlags().StringVar(&initConfig.PrometheusHTTPServer, "promethuesHTTPServer", ":2112", "Host and port used to expose Promethues metrics via an HTTP server")
kubeVipCmd.AddCommand(kubeKubeadm)
kubeVipCmd.AddCommand(kubeManifest)
kubeVipCmd.AddCommand(kubeVipManager)
kubeVipCmd.AddCommand(kubeVipSample)
kubeVipCmd.AddCommand(kubeVipService)
kubeVipCmd.AddCommand(kubeVipStart)
kubeVipCmd.AddCommand(kubeVipVersion)
// Sample commands
kubeVipSample.AddCommand(kubeVipSampleConfig)
kubeVipSample.AddCommand(kubeVipSampleManifest)
}
// Execute - starts the command parsing process
func Execute() {
if err := kubeVipCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
var kubeVipVersion = &cobra.Command{
Use: "version",
Short: "Version and Release information about the Kubernetes Virtual IP Server",
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("Kube-VIP Release Information\n")
fmt.Printf("Version: %s\n", Release.Version)
fmt.Printf("Build: %s\n", Release.Build)
},
}
var kubeVipSample = &cobra.Command{
Use: "sample",
Short: "Generate a Sample configuration",
Run: func(cmd *cobra.Command, args []string) {
cmd.Help()
},
}
var kubeVipService = &cobra.Command{
Use: "service",
Short: "Start the Virtual IP / Load balancer as a service within a Kubernetes cluster",
Run: func(cmd *cobra.Command, args []string) {
// Set the logging level for all subsequent functions
log.SetLevel(log.Level(logLevel))
// parse environment variables, these will overwrite anything loaded or flags
err := kubevip.ParseEnvironment(&initConfig)
if err != nil {
log.Fatalln(err)
}
// User Environment variables as an option to make manifest clearer
envConfigMap := os.Getenv("vip_configmap")
if envConfigMap != "" {
configMap = envConfigMap
}
// Define the new service manager
mgr, err := manager.New(configMap, &initConfig)
if err != nil {
log.Fatalf("%v", err)
}
// Start the service manager, this will watch the config Map and construct kube-vip services for it
err = mgr.Start()
if err != nil {
log.Fatalf("%v", err)
}
},
}
var kubeVipManager = &cobra.Command{
Use: "manager",
Short: "Start the kube-vip manager",
Run: func(cmd *cobra.Command, args []string) {
// Set the logging level for all subsequent functions
log.SetLevel(log.Level(logLevel))
go servePrometheusHTTPServer(cmd.Context(), PrometheusHTTPServerConfig{
Addr: initConfig.PrometheusHTTPServer,
})
// parse environment variables, these will overwrite anything loaded or flags
err := kubevip.ParseEnvironment(&initConfig)
if err != nil {
log.Fatalln(err)
}
// User Environment variables as an option to make manifest clearer
envConfigMap := os.Getenv("vip_configmap")
if envConfigMap != "" {
configMap = envConfigMap
}
// If Packet is enabled and there is a provider configuration passed
if initConfig.EnableMetal {
if providerConfig != "" {
providerAPI, providerProject, err := packet.GetPacketConfig(providerConfig)
if err != nil {
log.Fatalf("%v", err)
}
initConfig.MetalAPIKey = providerAPI
initConfig.MetalProject = providerProject
}
}
// Define the new service manager
mgr, err := manager.New(configMap, &initConfig)
if err != nil {
log.Fatalf("%v", err)
}
prometheus.MustRegister(mgr.PrometheusCollector()...)
// Start the service manager, this will watch the config Map and construct kube-vip services for it
err = mgr.Start()
if err != nil {
log.Fatalf("%v", err)
}
},
}
type PrometheusHTTPServerConfig struct {
// Addr sets the http server address used to expose the metric endpoint
Addr string
}
func servePrometheusHTTPServer(ctx context.Context, config PrometheusHTTPServerConfig) {
var err error
mux := http.NewServeMux()
mux.Handle("/metrics", promhttp.Handler())
srv := &http.Server{
Addr: config.Addr,
Handler: mux,
}
go func() {
if err = srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
log.Fatalf("listen:%+s\n", err)
}
}()
log.Printf("server started")
<-ctx.Done()
log.Printf("server stopped")
ctxShutDown, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer func() {
cancel()
}()
if err = srv.Shutdown(ctxShutDown); err != nil {
log.Fatalf("server Shutdown Failed:%+s", err)
}
if err == http.ErrServerClosed {
err = nil
}
}
|
[
"\"vip_configmap\"",
"\"vip_configmap\""
] |
[] |
[
"vip_configmap"
] |
[]
|
["vip_configmap"]
|
go
| 1 | 0 | |
teuthology/worker.py
|
import logging
import os
import subprocess
import sys
import tempfile
import time
import yaml
from datetime import datetime
from teuthology import setup_log_file, install_except_hook
from . import beanstalk
from . import report
from . import safepath
from .config import config as teuth_config
from .config import set_config_attr
from .exceptions import BranchNotFoundError, SkipJob, MaxWhileTries
from .kill import kill_job
from .repo_utils import fetch_qa_suite, fetch_teuthology
log = logging.getLogger(__name__)
start_time = datetime.utcnow()
restart_file_path = '/tmp/teuthology-restart-workers'
stop_file_path = '/tmp/teuthology-stop-workers'
def sentinel(path):
if not os.path.exists(path):
return False
file_mtime = datetime.utcfromtimestamp(os.path.getmtime(path))
if file_mtime > start_time:
return True
else:
return False
def restart():
log.info('Restarting...')
args = sys.argv[:]
args.insert(0, sys.executable)
os.execv(sys.executable, args)
def stop():
log.info('Stopping...')
sys.exit(0)
def load_config(ctx=None):
teuth_config.load()
if ctx is not None:
if not os.path.isdir(ctx.archive_dir):
sys.exit("{prog}: archive directory must exist: {path}".format(
prog=os.path.basename(sys.argv[0]),
path=ctx.archive_dir,
))
else:
teuth_config.archive_base = ctx.archive_dir
def main(ctx):
loglevel = logging.INFO
if ctx.verbose:
loglevel = logging.DEBUG
log.setLevel(loglevel)
log_file_path = os.path.join(ctx.log_dir, 'worker.{tube}.{pid}'.format(
pid=os.getpid(), tube=ctx.tube,))
setup_log_file(log_file_path)
install_except_hook()
load_config(ctx=ctx)
set_config_attr(ctx)
connection = beanstalk.connect()
beanstalk.watch_tube(connection, ctx.tube)
result_proc = None
if teuth_config.teuthology_path is None:
fetch_teuthology('master')
fetch_qa_suite('master')
keep_running = True
while keep_running:
# Check to see if we have a teuthology-results process hanging around
# and if so, read its return code so that it can exit.
if result_proc is not None and result_proc.poll() is not None:
log.debug("teuthology-results exited with code: %s",
result_proc.returncode)
result_proc = None
if sentinel(restart_file_path):
restart()
elif sentinel(stop_file_path):
stop()
load_config()
job = connection.reserve(timeout=60)
if job is None:
continue
# bury the job so it won't be re-run if it fails
job.bury()
job_id = job.jid
log.info('Reserved job %d', job_id)
log.info('Config is: %s', job.body)
job_config = yaml.safe_load(job.body)
job_config['job_id'] = str(job_id)
if job_config.get('stop_worker'):
keep_running = False
try:
job_config, teuth_bin_path = prep_job(
job_config,
log_file_path,
ctx.archive_dir,
)
run_job(
job_config,
teuth_bin_path,
ctx.archive_dir,
ctx.verbose,
)
except SkipJob:
continue
# This try/except block is to keep the worker from dying when
# beanstalkc throws a SocketError
try:
job.delete()
except Exception:
log.exception("Saw exception while trying to delete job")
def prep_job(job_config, log_file_path, archive_dir):
job_id = job_config['job_id']
safe_archive = safepath.munge(job_config['name'])
job_config['worker_log'] = log_file_path
archive_path_full = os.path.join(
archive_dir, safe_archive, str(job_id))
job_config['archive_path'] = archive_path_full
# If the teuthology branch was not specified, default to master and
# store that value.
teuthology_branch = job_config.get('teuthology_branch', 'master')
job_config['teuthology_branch'] = teuthology_branch
try:
if teuth_config.teuthology_path is not None:
teuth_path = teuth_config.teuthology_path
else:
teuth_path = fetch_teuthology(branch=teuthology_branch)
# For the teuthology tasks, we look for suite_branch, and if we
# don't get that, we look for branch, and fall back to 'master'.
# last-in-suite jobs don't have suite_branch or branch set.
ceph_branch = job_config.get('branch', 'master')
suite_branch = job_config.get('suite_branch', ceph_branch)
suite_repo = job_config.get('suite_repo')
if suite_repo:
teuth_config.ceph_qa_suite_git_url = suite_repo
job_config['suite_path'] = os.path.normpath(os.path.join(
fetch_qa_suite(suite_branch),
job_config.get('suite_relpath', ''),
))
except BranchNotFoundError as exc:
log.exception("Branch not found; marking job as dead")
report.try_push_job_info(
job_config,
dict(status='dead', failure_reason=str(exc))
)
raise SkipJob()
except MaxWhileTries as exc:
log.exception("Failed to fetch or bootstrap; marking job as dead")
report.try_push_job_info(
job_config,
dict(status='dead', failure_reason=str(exc))
)
raise SkipJob()
teuth_bin_path = os.path.join(teuth_path, 'virtualenv', 'bin')
if not os.path.isdir(teuth_bin_path):
raise RuntimeError("teuthology branch %s at %s not bootstrapped!" %
(teuthology_branch, teuth_bin_path))
return job_config, teuth_bin_path
def run_job(job_config, teuth_bin_path, archive_dir, verbose):
safe_archive = safepath.munge(job_config['name'])
if job_config.get('last_in_suite'):
if teuth_config.results_server:
report.try_delete_jobs(job_config['name'], job_config['job_id'])
log.info('Generating results email for %s', job_config['name'])
args = [
os.path.join(teuth_bin_path, 'teuthology-results'),
'--timeout',
str(job_config.get('results_timeout',
teuth_config.results_timeout)),
'--email',
job_config['email'],
'--archive-dir',
os.path.join(archive_dir, safe_archive),
'--name',
job_config['name'],
]
# Execute teuthology-results, passing 'preexec_fn=os.setpgrp' to
# make sure that it will continue to run if this worker process
# dies (e.g. because of a restart)
result_proc = subprocess.Popen(args=args, preexec_fn=os.setpgrp)
log.info("teuthology-results PID: %s", result_proc.pid)
return
log.info('Creating archive dir %s', job_config['archive_path'])
safepath.makedirs('/', job_config['archive_path'])
log.info('Running job %s', job_config['job_id'])
suite_path = job_config['suite_path']
arg = [
os.path.join(teuth_bin_path, 'teuthology'),
]
# The following is for compatibility with older schedulers, from before we
# started merging the contents of job_config['config'] into job_config
# itself.
if 'config' in job_config:
inner_config = job_config.pop('config')
if not isinstance(inner_config, dict):
log.warn("run_job: job_config['config'] isn't a dict, it's a %s",
str(type(inner_config)))
else:
job_config.update(inner_config)
if verbose or job_config['verbose']:
arg.append('-v')
arg.extend([
'--lock',
'--block',
'--owner', job_config['owner'],
'--archive', job_config['archive_path'],
'--name', job_config['name'],
])
if job_config['description'] is not None:
arg.extend(['--description', job_config['description']])
arg.append('--')
with tempfile.NamedTemporaryFile(prefix='teuthology-worker.',
suffix='.tmp',) as tmp:
yaml.safe_dump(data=job_config, stream=tmp)
tmp.flush()
arg.append(tmp.name)
env = os.environ.copy()
python_path = env.get('PYTHONPATH', '')
python_path = ':'.join([suite_path, python_path]).strip(':')
env['PYTHONPATH'] = python_path
log.debug("Running: %s" % ' '.join(arg))
p = subprocess.Popen(args=arg, env=env)
log.info("Job archive: %s", job_config['archive_path'])
log.info("Job PID: %s", str(p.pid))
if teuth_config.results_server:
log.info("Running with watchdog")
try:
run_with_watchdog(p, job_config)
except Exception:
log.exception("run_with_watchdog had an unhandled exception")
raise
else:
log.info("Running without watchdog")
# This sleep() is to give the child time to start up and create the
# archive dir.
time.sleep(5)
symlink_worker_log(job_config['worker_log'],
job_config['archive_path'])
p.wait()
if p.returncode != 0:
log.error('Child exited with code %d', p.returncode)
else:
log.info('Success!')
def run_with_watchdog(process, job_config):
job_start_time = datetime.utcnow()
# Only push the information that's relevant to the watchdog, to save db
# load
job_info = dict(
name=job_config['name'],
job_id=job_config['job_id'],
)
# Sleep once outside of the loop to avoid double-posting jobs
time.sleep(teuth_config.watchdog_interval)
symlink_worker_log(job_config['worker_log'], job_config['archive_path'])
while process.poll() is None:
# Kill jobs that have been running longer than the global max
run_time = datetime.utcnow() - job_start_time
total_seconds = run_time.days * 60 * 60 * 24 + run_time.seconds
if total_seconds > teuth_config.max_job_time:
log.warning("Job ran longer than {max}s. Killing...".format(
max=teuth_config.max_job_time))
kill_job(job_info['name'], job_info['job_id'],
teuth_config.archive_base)
# calling this without a status just updates the jobs updated time
report.try_push_job_info(job_info)
time.sleep(teuth_config.watchdog_interval)
# The job finished. Let's make sure paddles knows.
branches_sans_reporting = ('argonaut', 'bobtail', 'cuttlefish', 'dumpling')
if job_config.get('teuthology_branch') in branches_sans_reporting:
# The job ran with a teuthology branch that may not have the reporting
# feature. Let's call teuthology-report (which will be from the master
# branch) to report the job manually.
cmd = "teuthology-report -v -D -r {run_name} -j {job_id}".format(
run_name=job_info['name'],
job_id=job_info['job_id'])
try:
log.info("Executing %s" % cmd)
report_proc = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
while report_proc.poll() is None:
for line in report_proc.stdout.readlines():
log.info(line.strip())
time.sleep(1)
log.info("Reported results via the teuthology-report command")
except Exception:
log.exception("teuthology-report failed")
else:
# Let's make sure that paddles knows the job is finished. We don't know
# the status, but if it was a pass or fail it will have already been
# reported to paddles. In that case paddles ignores the 'dead' status.
# If the job was killed, paddles will use the 'dead' status.
report.try_push_job_info(job_info, dict(status='dead'))
def symlink_worker_log(worker_log_path, archive_dir):
try:
log.debug("Worker log: %s", worker_log_path)
os.symlink(worker_log_path, os.path.join(archive_dir, 'worker.log'))
except Exception:
log.exception("Failed to symlink worker log")
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
ncappzoo/apps/security-cam/security-picam.py
|
#!/usr/bin/python3
# ****************************************************************************
# Copyright(c) 2017 Intel Corporation.
# License: MIT See LICENSE file in root directory.
# ****************************************************************************
# DIY smart security camera PoC using Raspberry Pi Camera and
# Intel® Movidius™ Neural Compute Stick (NCS)
import os
import sys
import numpy
import select
import ntpath
import argparse
import picamera
import picamera.array
import mvnc.mvncapi as mvnc
from PIL import Image
from time import localtime, strftime
from utils import visualize_output
from utils import deserialize_output
# "Class of interest" - Display detections only if they match this class ID
CLASS_PERSON = 15
# Detection threshold: Minimum confidance to tag as valid detection
CONFIDANCE_THRESHOLD = 0.60 # 60% confidant
# Variable to store commandline arguments
ARGS = None
# ---- Step 1: Open the enumerated device and get a handle to it -------------
def open_ncs_device():
# Look for enumerated NCS device(s); quit program if none found.
devices = mvnc.EnumerateDevices()
if len( devices ) == 0:
print( "No devices found" )
quit()
# Get a handle to the first enumerated device and open it
device = mvnc.Device( devices[0] )
device.OpenDevice()
return device
# ---- Step 2: Load a graph file onto the NCS device -------------------------
def load_graph( device ):
# Read the graph file into a buffer
with open( ARGS.graph, mode='rb' ) as f:
blob = f.read()
# Load the graph buffer into the NCS
graph = device.AllocateGraph( blob )
return graph
# ---- Step 3: Pre-process the images ----------------------------------------
def pre_process_image( frame ):
# Read & resize image
# [Image size is defined by choosen network, during training]
img = Image.fromarray( frame )
img = img.resize( ARGS.dim )
img = numpy.array( img )
# Mean subtraction & scaling [A common technique used to center the data]
img = img.astype( numpy.float16 )
img = ( img - numpy.float16( ARGS.mean ) ) * ARGS.scale
return img
# ---- Step 4: Read & print inference results from the NCS -------------------
def infer_image( graph, img, frame ):
# Load the image as a half-precision floating point array
graph.LoadTensor( img, 'user object' )
# Get the results from NCS
output, userobj = graph.GetResult()
# Get execution time
inference_time = graph.GetGraphOption( mvnc.GraphOption.TIME_TAKEN )
# Deserialize the output into a python dictionary
output_dict = deserialize_output.ssd(
output,
CONFIDANCE_THRESHOLD,
frame.shape )
# Print the results (each image/frame may have multiple objects)
for i in range( 0, output_dict['num_detections'] ):
# Filter a specific class/category
if( output_dict.get( 'detection_classes_' + str(i) ) == CLASS_PERSON ):
cur_time = strftime( "%Y_%m_%d_%H_%M_%S", localtime() )
print( "Person detected on " + cur_time )
# Extract top-left & bottom-right coordinates of detected objects
(y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
(y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]
# Prep string to overlay on the image
display_str = (
labels[output_dict.get('detection_classes_' + str(i))]
+ ": "
+ str( output_dict.get('detection_scores_' + str(i) ) )
+ "%" )
# Overlay bounding boxes, detection class and scores
frame = visualize_output.draw_bounding_box(
y1, x1, y2, x2,
frame,
thickness=4,
color=(255, 255, 0),
display_str=display_str )
# Capture snapshots
img = Image.fromarray( frame )
photo = ( os.path.dirname(os.path.realpath(__file__))
+ "/captures/photo_"
+ cur_time + ".jpg" )
img.save( photo )
# If a display is available, show the image on which inference was performed
if 'DISPLAY' in os.environ:
img.show()
# ---- Step 5: Unload the graph and close the device -------------------------
def close_ncs_device( device, graph ):
graph.DeallocateGraph()
device.CloseDevice()
# ---- Main function (entry point for this script ) --------------------------
def main():
device = open_ncs_device()
graph = load_graph( device )
# Main loop: Capture live stream & send frames to NCS
with picamera.PiCamera() as camera:
with picamera.array.PiRGBArray( camera ) as frame:
while( True ):
camera.resolution = ( 640, 480 )
camera.capture( frame, ARGS.colormode, use_video_port=True )
img = pre_process_image( frame.array )
infer_image( graph, img, frame.array )
# Clear PiRGBArray, so you can re-use it for next capture
frame.seek( 0 )
frame.truncate()
# Run the program until <ENTER> is pressed
i, o, e = select.select( [sys.stdin], [], [], 0.1 )
if( i ):
break
close_ncs_device( device, graph )
# ---- Define 'main' function as the entry point for this script -------------
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="DIY smart security camera using \
Rapberry Pi Camera and Intel® Movidius™ Neural Compute Stick. \
\n Hit <ENTER> to terminate program." )
parser.add_argument( '-g', '--graph', type=str,
default='../../caffe/SSD_MobileNet/graph',
help="Absolute path to the neural network graph file." )
parser.add_argument( '-l', '--labels', type=str,
default='../../caffe/SSD_MobileNet/labels.txt',
help="Absolute path to labels file." )
parser.add_argument( '-M', '--mean', type=float,
nargs='+',
default=[127.5, 127.5, 127.5],
help="',' delimited floating point values for image mean." )
parser.add_argument( '-S', '--scale', type=float,
default=0.00789,
help="Absolute path to labels file." )
parser.add_argument( '-D', '--dim', type=int,
nargs='+',
default=[300, 300],
help="Image dimensions. ex. -D 224 224" )
parser.add_argument( '-c', '--colormode', type=str,
default="bgr",
help="RGB vs BGR color sequence. This is network dependent." )
ARGS = parser.parse_args()
# Load the labels file
labels =[ line.rstrip('\n') for line in
open( ARGS.labels ) if line != 'classes\n']
main()
# ==== End of file ===========================================================
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/jnvportal/wsgi.py
|
"""
WSGI config for jnvportal project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jnvportal.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/main/java/com/github/frankfarrell/blastradius/GitRepository.java
|
package com.github.frankfarrell.blastradius;
import com.github.zafarkhaja.semver.Version;
import org.eclipse.jgit.api.Git;
import org.eclipse.jgit.api.errors.GitAPIException;
import org.eclipse.jgit.diff.DiffEntry;
import org.eclipse.jgit.errors.IncorrectObjectTypeException;
import org.eclipse.jgit.lib.*;
import org.eclipse.jgit.revwalk.RevCommit;
import org.eclipse.jgit.revwalk.RevWalk;
import org.eclipse.jgit.storage.file.FileRepositoryBuilder;
import org.eclipse.jgit.treewalk.CanonicalTreeParser;
import org.gradle.api.InvalidUserDataException;
import org.gradle.api.logging.Logger;
import org.gradle.api.logging.Logging;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
/**
* Created by frankfarrell on 13/10/2017
*
*/
public class GitRepository {
private static final Logger logger = Logging.getLogger(GitRepository.class);
private final Repository repository;
//Used for lazy evaluation
protected Optional<Version> headVersion;
public GitRepository() throws IOException {
final FileRepositoryBuilder repositoryBuilder = new FileRepositoryBuilder();
repositoryBuilder.findGitDir();
this.repository = repositoryBuilder.build();
}
public GitRepository(final Repository repository){
this.repository = repository;
}
/*
An Optional.empty() result means that changes cannot be determined.
The client can decide what to do, but its recommened to deploy everything
*/
public Optional<List<String>> getPathsThatHaveChanged(final DiffStrategy diffStrategy) throws IOException, GitAPIException {
return getPathsThatHaveChanged(diffStrategy, Optional.empty());
}
public Optional<List<String>> getPathsThatHaveChanged(final DiffStrategy diffStrategy, final Optional<String> previousCommit) throws IOException, GitAPIException {
logger.info("Currently on branch {}", repository.getBranch());
final Optional<CommitIds> commitIds;
//TODO Consider if we want fallbacks here or not -ffarrell
switch (diffStrategy){
case JENKINS_LAST_COMMIT:
commitIds = getCommitIdsFromJenkinsEnvVar();
break;
case PREVIOUS_TAG:
commitIds = getCommitIdsFromPreviousTag();
break;
case PREVIOUS_COMMIT:
commitIds = getCommitIdsFromPreviousCommit();
break;
case SPECIFIC_COMMIT:
if(previousCommit.isPresent()){
commitIds = Optional.of(new CommitIds(repository.resolve(previousCommit.get()) , repository.resolve(Constants.HEAD)));
}
else{
throw new InvalidUserDataException("previousCommit hash must be specified if the SPECIFIC_COMMIT diff strategy is used");
}
break;
default:
throw new RuntimeException("This is impossible, but it makes the compiler happy");
}
if(commitIds.isPresent()){
logger.info("Prev commit id: {}", commitIds.get().previousCommit);
logger.info("Current commit id: {}", commitIds.get().currentCommit);
try{
final List<String> pathsWithDiff = getPathsWithDiff(repository,
commitIds.get().previousCommit,
commitIds.get().currentCommit);
return Optional.of(pathsWithDiff);
}
//If something goes wrong here it probably means that the git ObjectIds are messed up. We'll just deploy everything
catch (Exception e){
return Optional.empty();
}
}
else {
return Optional.empty();
}
}
//its just a normal commit. Compare HEAD with HEAD ~1
private Optional<CommitIds> getCommitIdsFromPreviousCommit() throws IOException {
logger.info("Comparing to previous head");
final String previousCommitPointer = "HEAD~1";
//Need to confirm that it returns nulls if it doesnt exist
final Optional<ObjectId> previousCommitId = Optional.ofNullable(repository.resolve(previousCommitPointer));
if(previousCommitId.isPresent()){
return Optional.of(new CommitIds(previousCommitId.get(), repository.resolve(Constants.HEAD)));
}
else{
//First ever commit
return Optional.empty();
}
}
private Optional<CommitIds> getCommitIdsFromPreviousTag() throws IOException {
final List<Version> versions = getAllVersionsInRepository();
final Map<String, Ref> allTagsOnRepository = repository.getTags();
if(getHeadVersion().isPresent()){
System.out.println("Using head version");
final Version headVer = getHeadVersion().get();
logger.debug("Current version: {}", headVer);
final int indexOfHead = versions.indexOf(headVer);
if (indexOfHead > 0) {
final Version currVersion = versions.get(indexOfHead);
final Version prevVersion = versions.get(indexOfHead - 1);
logger.info("Prev version: {}", prevVersion);
return Optional.of(new CommitIds(allTagsOnRepository.get(prevVersion.toString()).getObjectId(), allTagsOnRepository.get(currVersion.toString()).getObjectId()));
}
//First ever tagged commit, must deploy
else{
return Optional.empty();
}
}
else {
//HEAD is not a tagged commit
if(versions.size()>0){
final Version prevVersion = versions.get(versions.size() - 1);
logger.info("Prev version: {}", prevVersion);
return Optional.of(new CommitIds(allTagsOnRepository.get(prevVersion.toString()).getObjectId(), repository.resolve(Constants.HEAD)));
}
else{
return Optional.empty();
}
}
}
//Running on jenkins with a previous commit
//Using https://wiki.jenkins.io/display/JENKINS/Git+Plugin
private Optional<CommitIds> getCommitIdsFromJenkinsEnvVar() throws IOException {
final Optional<String> jenkinsPreviousSuccessfulCommit = Optional.ofNullable(System.getenv("GIT_PREVIOUS_SUCCESSFUL_COMMIT"));
if(jenkinsPreviousSuccessfulCommit.isPresent()){
final Optional<ObjectId> previousCommitId = Optional.ofNullable(repository.resolve(jenkinsPreviousSuccessfulCommit.get()));
if(previousCommitId.isPresent()){
return Optional.of(new CommitIds(previousCommitId.get(), repository.resolve(Constants.HEAD)));
}
else{
//This could happen if the previos ref in jenkinsPreviousSuccessfulCommit doesn't exist, eg after a rebase
return Optional.empty();
}
}
else{
return Optional.empty();
}
}
protected List<Version> getAllVersionsInRepository() {
return repository
.getTags()
.keySet()
.stream()
.map(key -> {
try {
return Optional.of(Version.valueOf(key));
} catch (Exception e) {
return Optional.<Version>empty();
}
})
.filter(Optional::isPresent)
.map(Optional::get)
.sorted()
.collect(Collectors.toList());
}
//Lazy evaluation
protected synchronized Optional<Version> getHeadVersion() throws IOException {
if(headVersion != null){
return headVersion;
}
else {
final List<String> tagsOnHead = getTagsOnHead();
logger.debug("Tags on head {}", tagsOnHead.size());
tagsOnHead.forEach(tag -> logger.debug("Tag on head {}", tag));
this.headVersion =
tagsOnHead.stream().filter(tag -> {
try {
logger.debug("Trying to turn tag into version for {}", tag);
Version.valueOf(tag);
return true;
} catch (Exception e) {
logger.error("Failed to turn tag into version for {}", tag ,e);
return false;
}
})
.map(Version::valueOf)
.findFirst();
return headVersion;
}
}
protected List<String> getTagsOnHead() throws IOException {
final ObjectId head = repository.resolve(Constants.HEAD);
logger.info("Head is {}", head.toString());
return repository.getTags().entrySet().stream()
.filter(entry -> {
final Boolean value = entry.getValue().getPeeledObjectId() != null &&
entry.getValue().getPeeledObjectId().compareTo(head) == 0;
logger.debug("Comparing {} to {} : result {}", entry.getValue().getPeeledObjectId(), head, value);
return value;
})
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
protected static List<String> getPathsWithDiff(Repository repository, ObjectId previousCommit, ObjectId currentCommit) throws GitAPIException, IOException {
Git git = Git.wrap(repository);
try (ObjectReader reader = repository.newObjectReader()) {
final RevWalk walk = new RevWalk(repository);
final RevCommit prevRevCommit = walk.parseCommit(previousCommit);
final ObjectId prevTreeId = prevRevCommit.getTree().getId();
final CanonicalTreeParser previousVersionTreeIter = new CanonicalTreeParser(null, reader, prevTreeId);
final RevCommit currRevCommit = walk.parseCommit(currentCommit);
final ObjectId currTreeId = currRevCommit.getTree().getId();
final CanonicalTreeParser currVersionTreeIter = new CanonicalTreeParser(null, reader, currTreeId);
logger.info("Trying git diff {} with {}", prevRevCommit.toString(), currRevCommit.toString()) ;
final List<DiffEntry> diff = git.diff()
.setNewTree(currVersionTreeIter)
.setOldTree(previousVersionTreeIter)
.setShowNameAndStatusOnly(true)
.call();
return diff.stream()
/*
Would be ideal if we could do this, but rename modifications could also include changes
.filter(diffe -> diffe.getChangeType().equals(DiffEntry.ChangeType.MODIFY))
*/
.map(x -> "/" + x.getNewPath())
.peek(diffe -> logger.debug("Diff {}", diffe))
.collect(Collectors.toList());
} catch (IncorrectObjectTypeException e) {
//TODO What does this actually mean? -ffarrell 2017-09-11
throw e;
}
}
private class CommitIds {
final ObjectId previousCommit;
final ObjectId currentCommit;
private CommitIds(final ObjectId previousCommit, final ObjectId currentCommit) {
this.previousCommit = previousCommit;
this.currentCommit = currentCommit;
}
}
}
|
[
"\"GIT_PREVIOUS_SUCCESSFUL_COMMIT\""
] |
[] |
[
"GIT_PREVIOUS_SUCCESSFUL_COMMIT"
] |
[]
|
["GIT_PREVIOUS_SUCCESSFUL_COMMIT"]
|
java
| 1 | 0 | |
pypy/translator/goal/test2/test_app_main.py
|
"""
Tests for the entry point of pypy-c, app_main.py.
"""
import py
import sys, os, re
import autopath
from pypy.tool.udir import udir
banner = sys.version.splitlines()[0]
def relpath(path):
# force 'path' to be a relative path, for testing purposes
curdir = py.path.local()
p = py.path.local(path)
result = []
while not p.relto(curdir):
result.append(os.pardir)
if curdir == curdir.dirpath():
return str(path) # no relative path found, give up
curdir = curdir.dirpath()
result.append(p.relto(curdir))
return os.path.join(*result)
app_main = os.path.join(autopath.this_dir, os.pardir, 'app_main.py')
app_main = os.path.abspath(app_main)
_counter = 0
def getscript(source):
global _counter
p = udir.join('demo_test_app_main_%d.py' % (_counter,))
_counter += 1
p.write(str(py.code.Source(source)))
return relpath(p)
demo_script = getscript("""
print 'hello'
print 'Name:', __name__
print 'File:', __file__
import sys
print 'Exec:', sys.executable
print 'Argv:', sys.argv
print 'goodbye'
myvalue = 6*7
""")
crashing_demo_script = getscript("""
print 'Hello2'
myvalue2 = 11
ooups
myvalue2 = 22
print 'Goodbye2' # should not be reached
""")
class TestInteraction:
"""
These tests require pexpect (UNIX-only).
http://pexpect.sourceforge.net/
"""
def _spawn(self, *args, **kwds):
try:
import pexpect
except ImportError, e:
py.test.skip(str(e))
else:
# Version is of the style "0.999" or "2.1". Older versions of
# pexpect try to get the fileno of stdin, which generally won't
# work with py.test (due to sys.stdin being a DontReadFromInput
# instance).
version = map(int, pexpect.__version__.split('.'))
# I only tested 0.999 and 2.1. The former does not work, the
# latter does. Feel free to refine this measurement.
# -exarkun, 17/12/2007
if version < [2, 1]:
py.test.skip(
"pexpect version too old, requires 2.1 or newer: %r" % (
pexpect.__version__,))
kwds.setdefault('timeout', 10)
print 'SPAWN:', args, kwds
child = pexpect.spawn(*args, **kwds)
child.logfile = sys.stdout
return child
def spawn(self, argv):
return self._spawn(sys.executable, [app_main] + argv)
def test_interactive(self):
child = self.spawn([])
child.expect('Python ') # banner
child.expect('>>> ') # prompt
child.sendline('[6*7]')
child.expect(re.escape('[42]'))
child.sendline('def f(x):')
child.expect(re.escape('... '))
child.sendline(' return x + 100')
child.expect(re.escape('... '))
child.sendline('')
child.expect('>>> ')
child.sendline('f(98)')
child.expect('198')
child.expect('>>> ')
child.sendline('__name__')
child.expect("'__main__'")
def test_run_script(self):
child = self.spawn([demo_script])
idx = child.expect(['hello', 'Python ', '>>> '])
assert idx == 0 # no banner or prompt
child.expect(re.escape("Name: __main__"))
child.expect(re.escape('File: ' + demo_script))
child.expect(re.escape('Exec: ' + app_main))
child.expect(re.escape('Argv: ' + repr([demo_script])))
child.expect('goodbye')
def test_run_script_with_args(self):
argv = [demo_script, 'hello', 'world']
child = self.spawn(argv)
child.expect(re.escape('Argv: ' + repr(argv)))
child.expect('goodbye')
def test_no_such_script(self):
import errno
msg = os.strerror(errno.ENOENT) # 'No such file or directory'
child = self.spawn(['xxx-no-such-file-xxx'])
child.expect(re.escape(msg))
def test_option_i(self):
argv = [demo_script, 'foo', 'bar']
child = self.spawn(['-i'] + argv)
idx = child.expect(['hello', re.escape(banner)])
assert idx == 0 # no banner
child.expect(re.escape('File: ' + demo_script))
child.expect(re.escape('Argv: ' + repr(argv)))
child.expect('goodbye')
idx = child.expect(['>>> ', re.escape(banner)])
assert idx == 0 # prompt, but still no banner
child.sendline('myvalue * 102')
child.expect('4284')
child.sendline('__name__')
child.expect('__main__')
def test_option_i_crashing(self):
argv = [crashing_demo_script, 'foo', 'bar']
child = self.spawn(['-i'] + argv)
idx = child.expect(['Hello2', re.escape(banner)])
assert idx == 0 # no banner
child.expect('NameError')
child.sendline('myvalue2 * 1001')
child.expect('11011')
child.sendline('import sys; sys.argv')
child.expect(re.escape(repr(argv)))
child.sendline('sys.last_type.__name__')
child.expect(re.escape(repr('NameError')))
def test_options_i_c(self):
child = self.spawn(['-i', '-c', 'x=555'])
idx = child.expect(['>>> ', re.escape(banner)])
assert idx == 0 # prompt, but no banner
child.sendline('x')
child.expect('555')
child.sendline('__name__')
child.expect('__main__')
child.sendline('import sys; sys.argv')
child.expect(re.escape("['-c']"))
def test_options_i_c_crashing(self):
child = self.spawn(['-i', '-c', 'x=666;foobar'])
child.expect('NameError')
idx = child.expect(['>>> ', re.escape(banner)])
assert idx == 0 # prompt, but no banner
child.sendline('x')
child.expect('666')
child.sendline('__name__')
child.expect('__main__')
child.sendline('import sys; sys.argv')
child.expect(re.escape("['-c']"))
child.sendline('sys.last_type.__name__')
child.expect(re.escape(repr('NameError')))
def test_atexit(self):
child = self.spawn([])
child.expect('>>> ')
child.sendline('def f(): print "foobye"')
child.sendline('')
child.sendline('import atexit; atexit.register(f)')
child.sendline('6*7')
child.expect('42')
# pexpect's sendeof() is confused by py.test capturing, though
# I think that it is a bug of sendeof()
old = sys.stdin
try:
sys.stdin = child
child.sendeof()
finally:
sys.stdin = old
child.expect('foobye')
def test_pythonstartup(self):
old = os.environ.get('PYTHONSTARTUP', '')
try:
os.environ['PYTHONSTARTUP'] = crashing_demo_script
child = self.spawn([])
child.expect(re.escape(banner))
child.expect('Traceback')
child.expect('NameError')
child.expect('>>> ')
child.sendline('[myvalue2]')
child.expect(re.escape('[11]'))
child.expect('>>> ')
child = self.spawn(['-i', demo_script])
for line in ['hello', 'goodbye', '>>> ']:
idx = child.expect([line, 'Hello2'])
assert idx == 0 # no PYTHONSTARTUP run here
child.sendline('myvalue2')
child.expect('Traceback')
child.expect('NameError')
finally:
os.environ['PYTHONSTARTUP'] = old
def test_unbuffered(self):
line = 'import os,sys;sys.stdout.write(str(789));os.read(0,1)'
child = self.spawn(['-u', '-c', line])
child.expect('789') # expect to see it before the timeout hits
child.sendline('X')
def test_options_i_m(self):
if sys.platform == "win32":
skip("close_fds is not supported on Windows platforms")
p = os.path.join(autopath.this_dir, 'mymodule.py')
p = os.path.abspath(p)
child = self.spawn(['-i',
'-m', 'pypy.translator.goal.test2.mymodule',
'extra'])
child.expect('mymodule running')
child.expect('Name: __main__')
child.expect(re.escape('File: ' + p))
child.expect(re.escape('Argv: ' + repr([p, 'extra'])))
child.expect('>>> ')
#XXX the following doesn't work on CPython 2.5 either
#child.sendline('somevalue')
#child.expect(re.escape(repr("foobar")))
#child.expect('>>> ')
child.sendline('import sys')
child.sendline('"pypy.translator.goal.test2" in sys.modules')
child.expect('True')
child.sendline('"pypy.translator.goal.test2.mymodule" in sys.modules')
child.expect('False')
def test_options_u_i(self):
if sys.platform == "win32":
skip("close_fds is not supported on Windows platforms")
import subprocess, select, os
python = sys.executable
pipe = subprocess.Popen([python, app_main, "-u", "-i"],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=0, close_fds=True)
iwtd, owtd, ewtd = select.select([pipe.stdout], [], [], 5)
assert iwtd # else we timed out
data = os.read(pipe.stdout.fileno(), 1024)
assert data.startswith('Python')
def test_paste_several_lines_doesnt_mess_prompt(self):
py.test.skip("this can only work if readline is enabled")
child = self.spawn([])
child.expect('>>> ')
child.sendline('if 1:\n print 42\n')
child.expect('... print 42')
child.expect('... ')
child.expect('42')
child.expect('>>> ')
def test_pythoninspect(self):
old = os.environ.get('PYTHONINSPECT', '')
try:
os.environ['PYTHONINSPECT'] = '1'
path = getscript("""
print 6*7
""")
child = self.spawn([path])
child.expect('42')
child.expect('>>> ')
finally:
os.environ['PYTHONINSPECT'] = old
def test_set_pythoninspect(self):
path = getscript("""
import os
os.environ['PYTHONINSPECT'] = '1'
print 6*7
""")
child = self.spawn([path])
child.expect('42')
child.expect('>>> ')
def test_clear_pythoninspect(self):
py.test.skip("obscure difference with CPython -- do we care?")
old = os.environ.get('PYTHONINSPECT', '')
try:
path = getscript("""
import os
del os.environ['PYTHONINSPECT']
""")
child = self.spawn([path])
xxx # do we expect a prompt or not? CPython gives one
finally:
os.environ['PYTHONINSPECT'] = old
def test_stdout_flushes_before_stdin_blocks(self):
# This doesn't really test app_main.py, but a behavior that
# can only be checked on top of py.py with pexpect.
path = getscript("""
import sys
sys.stdout.write('Are you suggesting coconuts migrate? ')
line = sys.stdin.readline()
assert line.rstrip() == 'Not at all. They could be carried.'
print 'A five ounce bird could not carry a one pound coconut.'
""")
py_py = os.path.join(autopath.pypydir, 'bin', 'py.py')
child = self._spawn(sys.executable, [py_py, path])
child.expect('Are you suggesting coconuts migrate?', timeout=120)
child.sendline('Not at all. They could be carried.')
child.expect('A five ounce bird could not carry a one pound coconut.')
def test_no_space_before_argument(self):
child = self.spawn(['-cprint "hel" + "lo"'])
child.expect('hello')
child = self.spawn(['-mpypy.translator.goal.test2.mymodule'])
child.expect('mymodule running')
class TestNonInteractive:
def run(self, cmdline, senddata='', expect_prompt=False,
expect_banner=False):
cmdline = '%s "%s" %s' % (sys.executable, app_main, cmdline)
print 'POPEN:', cmdline
child_in, child_out_err = os.popen4(cmdline)
child_in.write(senddata)
child_in.close()
data = child_out_err.read()
child_out_err.close()
assert (banner in data) == expect_banner # no banner unless expected
assert ('>>> ' in data) == expect_prompt # no prompt unless expected
return data
def test_script_on_stdin(self):
for extraargs, expected_argv in [
('', ['']),
('-', ['-']),
('- hello world', ['-', 'hello', 'world']),
]:
data = self.run('%s < "%s"' % (extraargs, demo_script))
assert "hello" in data
assert "Name: __main__" in data
assert "File: <stdin>" in data
assert ("Exec: " + app_main) in data
assert ("Argv: " + repr(expected_argv)) in data
assert "goodbye" in data
def test_run_crashing_script(self):
data = self.run('"%s"' % (crashing_demo_script,))
assert 'Hello2' in data
assert 'NameError' in data
assert 'Goodbye2' not in data
def test_crashing_script_on_stdin(self):
data = self.run(' < "%s"' % (crashing_demo_script,))
assert 'Hello2' in data
assert 'NameError' in data
assert 'Goodbye2' not in data
def test_option_W(self):
data = self.run('-W d -c "print 42"')
assert '42' in data
data = self.run('-Wd -c "print 42"')
assert '42' in data
def test_option_W_crashing(self):
data = self.run('-W')
assert 'Argument expected for the -W option' in data
def test_option_W_arg_ignored(self):
data = self.run('-Wc')
assert "Invalid -W option ignored: invalid action: 'c'" in data
def test_option_W_arg_ignored2(self):
data = self.run('-W-W')
assert "Invalid -W option ignored: invalid action:" in data
def test_option_c(self):
data = self.run('-c "print 6**5"')
assert '7776' in data
def test_no_pythonstartup(self):
old = os.environ.get('PYTHONSTARTUP', '')
try:
os.environ['PYTHONSTARTUP'] = crashing_demo_script
data = self.run('"%s"' % (demo_script,))
assert 'Hello2' not in data
data = self.run('-c pass')
assert 'Hello2' not in data
finally:
os.environ['PYTHONSTARTUP'] = old
def test_option_m(self):
p = os.path.join(autopath.this_dir, 'mymodule.py')
p = os.path.abspath(p)
data = self.run('-m pypy.translator.goal.test2.mymodule extra')
assert 'mymodule running' in data
assert 'Name: __main__' in data
# ignoring case for windows. abspath behaves different from autopath
# concerning drive letters right now.
assert ('File: ' + p) in data
assert ('Argv: ' + repr([p, 'extra'])) in data
def test_pythoninspect_doesnt_override_isatty(self):
old = os.environ.get('PYTHONINSPECT', '')
try:
os.environ['PYTHONINSPECT'] = '1'
data = self.run('', senddata='6*7\nprint 2+3\n')
assert data == '5\n'
finally:
os.environ['PYTHONINSPECT'] = old
def test_i_flag_overrides_isatty(self):
data = self.run('-i', senddata='6*7\nraise SystemExit\n',
expect_prompt=True, expect_banner=True)
assert '42\n' in data
# if a file name is passed, the banner is never printed but
# we get a prompt anyway
cmdline = '-i %s' % getscript("""
print 'hello world'
""")
data = self.run(cmdline, senddata='6*7\nraise SystemExit\n',
expect_prompt=True, expect_banner=False)
assert 'hello world\n' in data
assert '42\n' in data
def test_non_interactive_stdout_fully_buffered(self):
path = getscript(r"""
import sys, time
sys.stdout.write('\x00(STDOUT)\n\x00') # stays in buffers
time.sleep(1)
sys.stderr.write('\x00[STDERR]\n\x00')
time.sleep(1)
# stdout flushed automatically here
""")
cmdline = '%s -u "%s" %s' % (sys.executable, app_main, path)
print 'POPEN:', cmdline
child_in, child_out_err = os.popen4(cmdline)
data = child_out_err.read(11)
assert data == '\x00[STDERR]\n\x00' # from stderr
child_in.close()
data = child_out_err.read(11)
assert data == '\x00(STDOUT)\n\x00' # from stdout
child_out_err.close()
|
[] |
[] |
[
"PYTHONSTARTUP",
"PYTHONINSPECT"
] |
[]
|
["PYTHONSTARTUP", "PYTHONINSPECT"]
|
python
| 2 | 0 | |
neural-nets/run.py
|
import argparse
import os
import sys
import time
from importlib import import_module
import numpy as np
import pandas as pd
import pickle5 as pickle
import torch
from train_eval import training
from utils import (
build_dataset,
get_time_dif,
plot_acc_graph,
plot_CE_graph,
plot_f1score_graph,
plot_logloss_graph,
)
parser = argparse.ArgumentParser(description="Mulitclass Text Classification")
parser.add_argument(
"--model",
type=str,
required=True,
help="choose a model: CNN, BiLSTM", # RCNN, RNN_Attention, BERT is coming soon
)
parser.add_argument(
"--cuda", type=int, default=0, help="choose a cuda from: [0,1,2,3,4,5,6,7]"
)
parser.add_argument("--device", type=str, default="cpu", help="choose cuda or cpu")
parser.add_argument("--visdom", type=bool, default=False, help="use visdom or not")
args = parser.parse_args()
if __name__ == "__main__":
# new parameters
dataset = "./data/result_non_split_strict.pkl"
embedding_path = "./data/pretrained_pubmed400D_for_TEXTCNN.pkl"
with (open(embedding_path, "rb")) as openfile:
embedding = pickle.load(openfile)
model_name = args.model
cuda_num = args.cuda
device = args.device
visdom = args.visdom
# pass to config
x = import_module("models." + model_name)
config = x.Config(dataset, embedding)
config.visdom = visdom
config.device = torch.device(device)
config.cuda_num = cuda_num
if config.device == "cuda":
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
torch.cuda.set_device(config.cuda_num)
# set seeds so the results are the same
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
torch.backends.cudnn.deterministic = True
# build data loaders
print("Loading data...")
start_time = time.time()
class_wts, train_loader, valid_loader = build_dataset(config)
config.class_wts = class_wts
time_dif = get_time_dif(start_time)
print("Time usage for loading data:", time_dif)
# import models
model = x.Model(config).to(config.device)
print(model.parameters)
if config.visdom == True:
import visdom
from visualize import Visualizer
tfmt = "%m%d_%H%M%S"
vis = Visualizer(time.strftime(tfmt))
# training
train_loss, valid_loss, val_accs, val_loglosses, val_f1scores = training(
config, model, train_loader, valid_loader
)
# plots
plot_logloss_graph(model, val_loglosses)
plot_acc_graph(model, val_accs)
plot_f1score_graph(model, val_f1scores)
plot_CE_graph(model, train_loss, valid_loss)
|
[] |
[] |
[
"CUDA_DEVICE_ORDER"
] |
[]
|
["CUDA_DEVICE_ORDER"]
|
python
| 1 | 0 | |
worklog.py
|
#!/usr/bin/env python3
import os
import hashlib
import json
import time
import calendar
from datetime import datetime
from flask import Flask, abort, jsonify, request, session
from flask_sqlalchemy import SQLAlchemy
with open(os.environ.get('WORKLOG_ENV') or '.env') as f:
env = json.load(f)
app = Flask(__name__)
app.config.update(env)
db = SQLAlchemy(app)
# TZ
# Helpers
def to_ts(dt):
return calendar.timegm(dt.timetuple())
# Models
class Record(db.Model):
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.Text, nullable=False)
tags = db.Column(db.String(127), default='')
created_at = db.Column(db.DateTime(),
nullable=False, default=datetime.utcnow)
def serialize(self):
return {'id': self.id, 'content': self.content,
'tags': self.tags.split('|'), 'created_at': to_ts(self.created_at)}
# Views
def _gen_token():
return hashlib.md5(env['account.password'].encode('utf8')).hexdigest()
def _valid_auth():
return request.headers.get('Authorization', '') == 'Bearer ' + _gen_token()
def _validate_login(username, password):
return env['account.username'] == username and \
env['account.password'] == password
def _search_records(keywords, tags, limit, offset):
query = Record.query.filter(Record.content.like('%%%s%%' % keywords),
Record.tags.like('%%%s%%' % tags))
return query.count(), query.limit(limit).offset(offset).all()
def _get_records(tags, offset, limit):
query = Record.query
if tags:
query = query.filter(Record.tags.like('%%%s%%' % tags))
return query.count(), query.limit(limit).offset(offset).all()
def _add_record(content, tags, **kwargs):
record = Record(content=content, tags='|'.join(tags))
db.session.add(record)
db.session.commit()
return record
def _update_record(id, **kwargs):
record = Record.query.get(id)
for k, v in kwargs.items():
setattr(record, k, v)
db.session.add(record)
db.session.commit()
return record
def _delete_record(id):
record = Record.query.get(id)
if record:
db.session.delete(record)
db.session.commit()
@app.before_request
def require_login():
if not _valid_auth():
abort(401)
@app.route('/')
def index():
return 'Hello world'
@app.route('/api/1/records')
def get_records():
keyword, tags = request.args.get('keyword'), request.args.get('tags')
offset = request.args.get('offset', type=int, default=0)
limit = request.args.get('limit', type=int, default=20)
if keyword:
n, records = _search_records(keyword, tags, offset, limit)
else:
n, records = _get_records(tags, offset, limit)
return jsonify(msg="OK", data={
'records': [r.serialize() for r in records],
'count': n})
@app.route('/api/1/records', methods=['POST'])
def add_record():
record = _add_record(**request.get_json())
return jsonify(msg="OK", data={'record': record.serialize()})
@app.route('/api/1/records/<int:id>', methods=['PUT'])
def update_record(id):
record = _update_record(id, **request.get_json())
return jsonify(msg="OK", data={'record': record.serialize()})
@app.route('/api/1/records/<int:id>', methods=['DELETE'])
def delete_record(id):
_delete_record(id)
return jsonify(msg="OK")
# RUN
db.create_all()
|
[] |
[] |
[
"WORKLOG_ENV"
] |
[]
|
["WORKLOG_ENV"]
|
python
| 1 | 0 | |
env/environment.go
|
// SPDX-FileCopyrightText: 2020 SAP SE or an SAP affiliate company and Cloud Security Client Go contributors
//
// SPDX-License-Identifier: Apache-2.0
package env
import (
"io/ioutil"
"os"
"strings"
)
// Platform holds the type string of the platform the application runs on
type Platform string
const (
cloudFoundry Platform = "CLOUD_FOUNDRY"
kubernetes Platform = "KUBERNETES"
unknown Platform = "UNKNOWN"
)
func getPlatform() Platform {
if strings.TrimSpace(os.Getenv("VCAP_SERVICES")) != "" {
return cloudFoundry
}
_, err := ioutil.ReadFile("/etc/ias/url")
if err == nil {
return kubernetes
}
return unknown
}
|
[
"\"VCAP_SERVICES\""
] |
[] |
[
"VCAP_SERVICES"
] |
[]
|
["VCAP_SERVICES"]
|
go
| 1 | 0 | |
service/config_test.go
|
/*
Copyright 2021 Citrix Systems, Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"bytes"
"fmt"
"log"
"math/rand"
"testing"
"time"
"os"
"github.com/citrix/adc-nitro-go/resource/config"
)
var client *NitroClient
//Used to generate random config object names
var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
func randomString(n int) string {
b := make([]rune, n)
for i := range b {
b[i] = letters[rand.Intn(len(letters))]
}
return string(b)
}
func randomIP() string {
return fmt.Sprintf("%d.%d.%d.%d", rand.Intn(125)+1, rand.Intn(252)+1, rand.Intn(252)+1, rand.Intn(252)+1)
}
//init random and client
func init() {
rand.Seed(time.Now().UnixNano())
var err error
client, err = NewNitroClientFromEnv()
if err != nil {
log.Fatal("Could not create a client: ", err)
}
_, ok := os.LookupEnv("NS_LOG") //if NS_LOG has been set then let the client get it from the environment
if !ok {
client.SetLogLevel("INFO")
}
}
func TestMain(m *testing.M) {
r := m.Run()
client.ClearConfig()
os.Exit(r)
}
// Functional tests
func TestClearConfig(t *testing.T) {
err := client.ClearConfig()
if err != nil {
t.Error("Could not clear config: ", err)
}
}
func TestAdd(t *testing.T) {
rndIP := randomIP()
lbName := "test_lb_" + randomString(5)
lb1 := config.Lbvserver{
Name: lbName,
Ipv46: rndIP,
Lbmethod: "ROUNDROBIN",
Servicetype: "HTTP",
Port: 8000,
}
_, err := client.AddResource(Lbvserver.Type(), lbName, &lb1)
if err != nil {
t.Error("Could not add Lbvserver: ", err)
t.Log("Not continuing test")
return
}
rsrc, err := client.FindResource(Lbvserver.Type(), lbName)
if err != nil {
t.Error("Did not find resource of type ", err, Lbvserver.Type(), ":", lbName)
}
val, ok := rsrc["ipv46"]
if ok {
if val != rndIP {
t.Error("Wrong ipv46 for lb ", lbName, ": ", val)
}
val, ok = rsrc["lbmethod"]
if val != "ROUNDROBIN" {
t.Error("Wrong lbmethod for lb ", lbName, ": ", val)
}
val, ok = rsrc["servicetype"]
if val != "HTTP" {
t.Error("Wrong servicetype for lb ", err, lbName, ": ", val)
}
}
if !ok {
t.Error("Non existent property in retrieved lb ", lbName)
}
svcName := randomString(5)
rndIP2 := randomIP()
service1 := config.Service{
Name: svcName,
Ip: rndIP2,
Port: 80,
Servicetype: "HTTP",
}
client.AddResource(Service.Type(), svcName, &service1)
_, err = client.FindResource(Service.Type(), svcName)
if err != nil {
t.Error("Did not find resource of type ", err, Service.Type(), ":", svcName)
}
}
func TestApply(t *testing.T) {
aclName := "test_acl_" + randomString(5)
acl1 := config.Nsacl{
Aclname: aclName,
Aclaction: "ALLOW",
Srcip: true,
Srcipval: "192.168.11.10",
Destip: true,
Destipval: "192.183.83.11",
Priority: 1100,
}
_, err := client.AddResource(Nsacl.Type(), aclName, &acl1)
if err != nil {
t.Error("Could not add resource Nsacl", err)
t.Log("Cannot continue")
return
}
acls := config.Nsacls{}
client.ApplyResource(Nsacls.Type(), &acls)
readAcls, err := client.FindResourceArray(Nsacl.Type(), aclName)
if err != nil {
t.Error("Did not find resource of type ", Nsacl.Type(), err, ":", aclName)
}
if err == nil {
acl2 := readAcls[0]
t.Log("Found acl, kernelstate= ", acl2["kernelstate"])
if acl2["kernelstate"].(string) != "APPLIED" {
t.Error("ACL created but not APPLIED ", Nsacl.Type(), ":", aclName)
}
}
}
func TestUpdate(t *testing.T) {
rndIP := randomIP()
lbName := "test_lb_" + randomString(5)
lb1 := config.Lbvserver{
Name: lbName,
Ipv46: rndIP,
Lbmethod: "ROUNDROBIN",
Servicetype: "HTTP",
Port: 8000,
}
_, err := client.AddResource(Lbvserver.Type(), lbName, &lb1)
if err != nil {
t.Error("Could not create LB", err)
t.Log("Cannot continue")
return
}
lb1 = config.Lbvserver{
Name: lbName,
Lbmethod: "LEASTCONNECTION",
}
_, err = client.UpdateResource(Lbvserver.Type(), lbName, &lb1)
if err != nil {
t.Error("Could not update LB")
t.Log("Cannot continue")
return
}
rsrc, err := client.FindResource(Lbvserver.Type(), lbName)
if err != nil {
t.Error("Did not find resource of type ", Lbvserver.Type(), ":", lbName, err)
t.Log("Cannot continue")
return
}
val, ok := rsrc["lbmethod"]
if ok {
if val != "LEASTCONNECTION" {
t.Error("Did not update lb method to LEASTCONNECTION")
}
}
if !ok {
t.Error("Failed to retrieve lb vserver object")
}
}
func TestBindUnBind(t *testing.T) {
rndIP := randomIP()
lbName := "test_lb_" + randomString(5)
rndIP2 := randomIP()
svcName := "test_svc_" + randomString(5)
lb1 := config.Lbvserver{
Name: lbName,
Ipv46: rndIP,
Lbmethod: "ROUNDROBIN",
Servicetype: "HTTP",
Port: 8000,
}
_, err := client.AddResource(Lbvserver.Type(), lbName, &lb1)
if err != nil {
t.Error("Could not create LB", err)
t.Log("Cannot continue")
return
}
service1 := config.Service{
Name: svcName,
Ip: rndIP2,
Port: 80,
Servicetype: "HTTP",
}
_, err = client.AddResource(Service.Type(), svcName, &service1)
if err != nil {
t.Error("Could not create service", err)
t.Log("Cannot continue")
return
}
binding := config.Lbvserverservicebinding{
Name: lbName,
Servicename: svcName,
}
err = client.BindResource(Lbvserver.Type(), lbName, Service.Type(), svcName, &binding)
if err != nil {
t.Error("Could not bind LB to svc", err)
t.Log("Cannot continue")
return
}
exists := client.ResourceBindingExists(Lbvserver.Type(), lbName, Service.Type(), "servicename", svcName)
if !exists {
t.Error("Failed to bind service to lb vserver")
t.Log("Cannot continue")
return
}
err = client.UnbindResource(Lbvserver.Type(), lbName, Service.Type(), svcName, "servicename")
if err != nil {
t.Error("Could not unbind LB to svc", err)
t.Log("Cannot continue")
return
}
exists = client.ResourceBindingExists(Lbvserver.Type(), lbName, Service.Type(), "servicename", svcName)
if exists {
t.Error("Failed to unbind service to lb vserver")
}
}
func TestFindBoundResource(t *testing.T) {
lbName := "test_lb_" + randomString(5)
lb1 := config.Lbvserver{
Name: lbName,
Ipv46: randomIP(),
Lbmethod: "ROUNDROBIN",
Servicetype: "HTTP",
Port: 8000,
}
_, err := client.AddResource(Lbvserver.Type(), lbName, &lb1)
if err != nil {
t.Error("Failed to add resource of type ", Lbvserver.Type(), ":", "sample_lb_1", err)
t.Log("Cannot continue")
return
}
svcName := "test_svc_" + randomString(5)
service1 := config.Service{
Name: svcName,
Ip: randomIP(),
Port: 80,
Servicetype: "HTTP",
}
_, err = client.AddResource(Service.Type(), svcName, &service1)
if err != nil {
t.Error("Failed to add resource of type ", Service.Type(), ":", svcName, err)
t.Log("Cannot continue")
return
}
binding := config.Lbvserverservicebinding{
Name: lbName,
Servicename: svcName,
}
err = client.BindResource(Lbvserver.Type(), lbName, Service.Type(), svcName, &binding)
if err != nil {
t.Error("Failed to bind resource of type ", Service.Type(), ":", svcName)
t.Log("Cannot continue")
return
}
result, err := client.FindBoundResource(Lbvserver.Type(), lbName, Service.Type(), "servicename", svcName)
if err != nil {
t.Error("Failed to find bound resource of type ", Service.Type(), ":", svcName)
t.Log("Cannot continue")
return
}
//t.Log("Found bound resource ", result)
if result["servicename"] != svcName {
t.Error("Failed to find bound resource of type ", Service.Type(), ":", svcName)
}
}
func TestDelete(t *testing.T) {
rndIP := randomIP()
lbName := "test_lb_" + randomString(5)
lb1 := config.Lbvserver{
Name: lbName,
Ipv46: rndIP,
Lbmethod: "ROUNDROBIN",
Servicetype: "HTTP",
Port: 8000,
}
_, err := client.AddResource(Lbvserver.Type(), lbName, &lb1)
if err != nil {
t.Error("Could not create LB", err)
t.Log("Cannot continue")
return
}
err = client.DeleteResource(Lbvserver.Type(), lbName)
if err != nil {
t.Error("Could not delete LB", lbName, err)
t.Log("Cannot continue")
return
}
if client.ResourceExists(Lbvserver.Type(), lbName) {
t.Error("Failed to delete ", lbName)
}
}
func TestDeleteWithArgs(t *testing.T) {
monitorName := "test_lb_monitor_" + randomString(5)
lbmonitor := config.Lbmonitor{
Monitorname: monitorName,
Type: "http",
Retries: 20,
Failureretries: 10,
Downtime: 60,
}
_, err := client.AddResource(Lbmonitor.Type(), monitorName, &lbmonitor)
if err != nil {
t.Error("Could not create monitor", err)
t.Log("Cannot continue")
return
}
args := map[string]string{"type": "http"}
err = client.DeleteResourceWithArgsMap(Lbmonitor.Type(), monitorName, args)
if err != nil {
t.Error("Could not delete monitor", monitorName, err)
t.Log("Cannot continue")
return
}
}
func TestEnableFeatures(t *testing.T) {
features := []string{"SSL", "CS"}
err := client.EnableFeatures(features)
if err != nil {
t.Error("Failed to enable features", err)
t.Log("Cannot continue")
return
}
result, err := client.ListEnabledFeatures()
if err != nil {
t.Error("Failed to retrieve features", err)
t.Log("Cannot continue")
return
}
found := 0
for _, f := range features {
for _, r := range result {
if f == r {
found = found + 1
}
}
}
if found != len(features) {
t.Error("Requested features do not match enabled features=", features, "result=", result)
}
}
func TestEnableModes(t *testing.T) {
modes := []string{"ULFD", "MBF"}
err := client.EnableModes(modes)
if err != nil {
t.Error("Failed to enable modes", err)
t.Log("Cannot continue")
return
}
result, err := client.ListEnabledModes()
if err != nil {
t.Error("Failed to retrieve modes", err)
t.Log("Cannot continue")
return
}
found := 0
for _, m := range modes {
for _, r := range result {
if m == r {
found = found + 1
}
}
}
if found != len(modes) {
t.Error("Requested modes do not match enabled modes=", modes, "result=", result)
}
}
func TestSaveConfig(t *testing.T) {
err := client.SaveConfig()
if err != nil {
t.Error("Failed to save config", err)
}
}
func TestFindAllResources(t *testing.T) {
lbName1 := "test_lb_" + randomString(5)
lbName2 := "test_lb_" + randomString(5)
lb1 := config.Lbvserver{
Name: lbName1,
Ipv46: randomIP(),
Lbmethod: "ROUNDROBIN",
Servicetype: "HTTP",
Port: 8000,
}
lb2 := config.Lbvserver{
Name: lbName2,
Ipv46: randomIP(),
Lbmethod: "LEASTCONNECTION",
Servicetype: "HTTP",
Port: 8000,
}
_, err := client.AddResource(Lbvserver.Type(), lbName1, &lb1)
if err != nil {
t.Error("Failed to add resource of type ", Lbvserver.Type(), ":", lbName1)
t.Log("Cannot continue")
return
}
_, err = client.AddResource(Lbvserver.Type(), lbName2, &lb2)
if err != nil {
t.Error("Failed to add resource of type ", Lbvserver.Type(), ":", lbName2)
t.Log("Cannot continue")
return
}
rsrcs, err := client.FindAllResources(Lbvserver.Type())
if err != nil {
t.Error("Did not find resources of type ", Lbvserver.Type(), err)
}
if len(rsrcs) < 2 {
t.Error("Found only ", len(rsrcs), " resources of type ", Lbvserver.Type(), " expected at least 2")
}
found := 0
for _, v := range rsrcs {
name := v["name"].(string)
if name == lbName1 || name == lbName2 {
found = found + 1
}
}
if found != 2 {
t.Error("Did not find all configured lbvservers")
}
}
func TestFindAllBoundResources(t *testing.T) {
lbName1 := "test_lb_" + randomString(5)
svcName1 := "test_svc_" + randomString(5)
svcName2 := "test_svc_" + randomString(5)
lb1 := config.Lbvserver{
Name: lbName1,
Ipv46: randomIP(),
Lbmethod: "ROUNDROBIN",
Servicetype: "HTTP",
Port: 8000,
}
_, err := client.AddResource(Lbvserver.Type(), lbName1, &lb1)
if err != nil {
t.Error("Could not create LB")
}
service1 := config.Service{
Name: svcName1,
Ip: randomIP(),
Port: 80,
Servicetype: "HTTP",
}
service2 := config.Service{
Name: svcName2,
Ip: randomIP(),
Port: 80,
Servicetype: "HTTP",
}
_, err = client.AddResource(Service.Type(), svcName1, &service1)
if err != nil {
t.Error("Could not create service service1", err)
t.Log("Cannot continue")
return
}
_, err = client.AddResource(Service.Type(), svcName2, &service2)
if err != nil {
t.Error("Could not create service service2", err)
t.Log("Cannot continue")
return
}
binding1 := config.Lbvserverservicebinding{
Name: lbName1,
Servicename: svcName1,
}
binding2 := config.Lbvserverservicebinding{
Name: lbName1,
Servicename: svcName2,
}
err = client.BindResource(Lbvserver.Type(), lbName1, Service.Type(), svcName1, &binding1)
if err != nil {
t.Error("Could not bind service service1")
t.Log("Cannot continue")
return
}
err = client.BindResource(Lbvserver.Type(), lbName1, Service.Type(), svcName2, &binding2)
if err != nil {
t.Error("Could not bind service service2")
t.Log("Cannot continue")
return
}
rsrcs, err := client.FindAllBoundResources(Lbvserver.Type(), lbName1, Service.Type())
if err != nil {
t.Error("Did not find bound resources of type ", Service.Type())
}
if len(rsrcs) < 2 {
t.Error("Found only ", len(rsrcs), " resources of type ", Service.Type(), " expected at least 2")
t.Log("Cannot continue")
return
}
found := 0
for _, v := range rsrcs {
name := v["servicename"].(string)
if name == svcName1 || name == svcName2 {
found = found + 1
}
}
if found != 2 {
t.Error("Did not find all bound services")
}
}
func TestAction(t *testing.T) {
svcGrpName := "test_sg_" + randomString(5)
sg1 := config.Servicegroup{
Servicegroupname: svcGrpName,
Servicetype: "http",
}
_, err := client.AddResource(Servicegroup.Type(), svcGrpName, &sg1)
if err != nil {
t.Error("Could not add resource service group", err)
t.Log("Cannot continue")
return
}
createServer := config.Server{
Ipaddress: "192.168.1.101",
Name: "test-srvr",
}
_, err = client.AddResource(Server.Type(), "test-server", &createServer)
if err != nil {
t.Error("Could not add resource server", err)
t.Log("Cannot continue")
return
}
bindSvcGrpToServer := config.Servicegroupservicegroupmemberbinding{
Servicegroupname: svcGrpName,
Servername: "test-srvr",
Port: 22,
}
_, err = client.AddResource(Servicegroup_servicegroupmember_binding.Type(), "test-svcgroup", &bindSvcGrpToServer)
if err != nil {
t.Error("Could not bind resource server", err)
t.Log("Cannot continue")
return
}
bindSvcGrpToServer2 := config.Servicegroupservicegroupmemberbinding{
Servicegroupname: svcGrpName,
Ip: "192.168.1.102",
Port: 22,
}
_, err = client.AddResource(Servicegroup_servicegroupmember_binding.Type(), "test-svcgroup", &bindSvcGrpToServer2)
if err != nil {
t.Error("Could not bind resource server", err)
t.Log("Cannot continue")
return
}
sg2 := config.Servicegroup{
Servicegroupname: svcGrpName,
Servername: "test-srvr",
Port: 22,
Delay: 100,
Graceful: "YES",
}
err = client.ActOnResource(Servicegroup.Type(), &sg2, "disable")
if err != nil {
t.Error("Could not disable server", err)
t.Log("Cannot continue")
return
}
sg3 := config.Servicegroup{
Servicegroupname: svcGrpName,
Servername: "test-srvr",
Port: 22,
}
err = client.ActOnResource(Servicegroup.Type(), &sg3, "enable")
if err != nil {
t.Error("Could not enable server", err)
t.Log("Cannot continue")
return
}
sg4 := config.Servicegroup{
Servicegroupname: svcGrpName,
Newname: svcGrpName + "-NEW",
}
err = client.ActOnResource(Servicegroup.Type(), &sg4, "rename")
if err != nil {
t.Error("Could not rename servicegroup", err)
t.Log("Cannot continue")
return
}
}
func TestUpdateUnnamedResource(t *testing.T) {
if os.Getenv("ADC_PLATFORM") == "CPX" {
t.Skip("Skipping test not supported by CPX")
}
rnat := config.Rnat{
Natip: "172.17.0.2",
Netmask: "255.255.240.0",
Network: "192.168.16.0",
}
err := client.UpdateUnnamedResource(Rnat.Type(), &rnat)
if err != nil {
t.Error("Could not add Rnat", err)
//t.Log("Cannot continue")
return
}
}
func TestFindFilteredResource(t *testing.T) {
if os.Getenv("ADC_PLATFORM") == "CPX" {
t.Skip("Skipping test not supported by CPX")
}
rnat := config.Rnat{
Natip: "172.17.0.2",
Netmask: "255.255.240.0",
Network: "192.168.16.0",
}
err := client.UpdateUnnamedResource(Rnat.Type(), &rnat)
if err != nil {
t.Error("Could not add Rnat", err)
t.Log("Cannot continue")
return
}
d, err := client.FindFilteredResourceArray(Rnat.Type(), map[string]string{"network": "192.168.16.0", "netmask": "255.255.240.0", "natip": "172.17.0.2"})
if err != nil {
t.Error("Could not find Rnat", err)
t.Log("Cannot continue")
return
}
if len(d) != 1 {
t.Error("Error finding Rnat", fmt.Errorf("Wrong number of RNAT discovered: %d", len(d)))
return
}
rnat2 := d[0]
if rnat2["natip"].(string) == "172.17.0.2" && rnat2["netmask"].(string) == "255.255.240.0" && rnat2["network"].(string) == "192.168.16.0" {
return
} else {
t.Error("Error finding Rnat", fmt.Errorf("Discovered RNAT does not match"))
}
}
// TestDesiredStateServicegroupAPI tests the servicegroup_servicegroupmemberlist_binding API
// which is used to bind multiple IP-only members to servicegroup in single Nitro call
func TestDesiredStateServicegroupAPI(t *testing.T) {
svcGrpName := "test_sg_" + randomString(5)
sg1 := config.Servicegroup{
Servicegroupname: svcGrpName,
Servicetype: "http",
Autoscale: "API",
}
_, err := client.AddResource(Servicegroup.Type(), svcGrpName, &sg1)
if err != nil {
t.Error("Could not add resource autoscale service group", err)
t.Log("Cannot continue")
return
}
ipmembers := []config.Members{
{
Ip: "1.1.1.1",
Port: 80,
},
{
Ip: "2.2.2.2",
Port: 80,
},
{
Ip: "3.3.3.3",
Port: 80,
},
}
bindSvcGrpToServer := config.Servicegroupservicegroupmemberlistbinding{
Servicegroupname: svcGrpName,
Members: ipmembers,
}
_, err = client.AddResource(Servicegroup_servicegroupmemberlist_binding.Type(), "test-svcgroup", &bindSvcGrpToServer)
if err != nil {
t.Error("Could not bind resource server", err)
t.Log("Cannot continue")
return
}
}
func TestNullAction(t *testing.T) {
if os.Getenv("ADC_PLATFORM") == "CPX" {
t.Skip("Skipping test not supported by CPX")
}
reboot := config.Reboot{
Warm: true,
}
err := client.ActOnResource("reboot", &reboot, "")
if err != nil {
t.Error("Could not make null action reboot", err)
t.Log("Cannot continue")
return
}
// Add a timeout to wait for instance to be back online
time.Sleep(120 * time.Second)
}
// TestTokenBasedAuth tests token-based authentication and tests if session-is is cleared in case of session-expiry
func TestTokenBasedAuth(t *testing.T) {
var err error
err = client.Login()
if err != nil {
t.Error("Login Failed")
return
}
rndIP := randomIP()
lbName := "test_lb_" + randomString(5)
lb1 := config.Lbvserver{
Name: lbName,
Ipv46: rndIP,
Lbmethod: "ROUNDROBIN",
Servicetype: "HTTP",
Port: 8000,
}
_, err = client.AddResource(Lbvserver.Type(), lbName, &lb1)
if err != nil {
t.Error("Could not add Lbvserver: ", err)
t.Log("Not continuing test")
return
}
rsrc, err := client.FindResource(Lbvserver.Type(), lbName)
if err != nil {
t.Error("Did not find resource of type ", err, Lbvserver.Type(), ":", lbName)
} else {
t.Log("LB-METHOD: ", rsrc["lbmethod"])
}
err = client.DeleteResource(Lbvserver.Type(), lbName)
if err != nil {
t.Error("Could not delete LB", lbName, err)
t.Log("Cannot continue")
return
}
err = client.Logout()
if err != nil {
t.Error("Logout Failed")
return
}
// Test if session-id is cleared in case of session-expiry
client.timeout = 10
client.Login()
time.Sleep(15 * time.Second)
_, err = client.AddResource(Lbvserver.Type(), lbName, &lb1)
if err != nil {
if client.IsLoggedIn() {
t.Error("Sessionid not cleared")
return
}
t.Log("sessionid cleared because of session-expiry")
} else {
t.Error("Adding lbvserver should have failed because of session-expiry")
}
}
func TestConstructQueryString(t *testing.T) {
var buf bytes.Buffer
log.SetOutput(&buf)
generateTestCase := func(findParams FindParams, expected string) func(t *testing.T) {
return func(t *testing.T) {
output := constructQueryString(&findParams)
if output != expected {
t.Log(buf.String())
t.Logf("Expected output \"%s\"", expected)
t.Logf("Actual output \"%s\"", output)
t.Fail()
}
}
}
var argsMap, attrsMap, filterMap map[string]string
var findParams FindParams
argsMap = make(map[string]string)
argsMap["hello"] = "bye"
findParams = FindParams{
ArgsMap: argsMap,
}
t.Run("CASE=1", generateTestCase(findParams, "?args=hello:bye"))
argsMap["bye"] = "hello"
findParams = FindParams{
ArgsMap: argsMap,
}
t.Run("CASE=2", generateTestCase(findParams, "?args=bye:hello,hello:bye"))
attrsMap = make(map[string]string)
attrsMap["bye"] = "hello"
findParams = FindParams{
AttrsMap: attrsMap,
}
t.Run("CASE=3", generateTestCase(findParams, "?attrs=bye:hello"))
attrsMap["hello"] = "bye"
t.Run("CASE=4", generateTestCase(findParams, "?attrs=bye:hello,hello:bye"))
filterMap = make(map[string]string)
filterMap["bye"] = "hello"
findParams = FindParams{
FilterMap: filterMap,
}
t.Run("CASE=5", generateTestCase(findParams, "?filter=bye:hello"))
filterMap["hello"] = "bye"
t.Run("CASE=6", generateTestCase(findParams, "?filter=bye:hello,hello:bye"))
filterMap = make(map[string]string)
attrsMap = make(map[string]string)
argsMap = make(map[string]string)
filterMap["bye"] = "hello"
attrsMap["bye"] = "hello"
argsMap["bye"] = "hello"
findParams = FindParams{
FilterMap: filterMap,
ArgsMap: argsMap,
AttrsMap: attrsMap,
}
t.Run("CASE=7", generateTestCase(findParams, "?args=bye:hello&filter=bye:hello&attrs=bye:hello"))
filterMap["hello"] = "bye"
attrsMap["hello"] = "bye"
argsMap["hello"] = "bye"
expected := "?args=bye:hello,hello:bye&filter=bye:hello,hello:bye&attrs=bye:hello,hello:bye"
t.Run("CASE=8", generateTestCase(findParams, expected))
}
func TestConstructUrlPathString(t *testing.T) {
var buf bytes.Buffer
log.SetOutput(&buf)
generateTestCase := func(findParams FindParams, expected string) func(t *testing.T) {
return func(t *testing.T) {
output := constructUrlPathString(&findParams)
if output != expected {
t.Log(buf.String())
t.Logf("Expected output \"%s\"", expected)
t.Logf("Actual output \"%s\"", output)
t.Fail()
}
}
}
var findParams FindParams
findParams = FindParams{
ResourceType: "resourcetype",
}
t.Run("CASE=1", generateTestCase(findParams, "resourcetype"))
findParams = FindParams{
ResourceName: "resourcename",
}
t.Run("CASE=2", generateTestCase(findParams, "resourcename"))
findParams = FindParams{
ResourceType: "resourcetype",
ResourceName: "resourcename",
}
t.Run("CASE=3", generateTestCase(findParams, "resourcetype/resourcename"))
}
func TestFindResourceArrayWithParams(t *testing.T) {
var buf bytes.Buffer
log.SetOutput(&buf)
testCase1 := func(t *testing.T) {
findParams := FindParams{
ResourceType: "lbvserver",
ResourceName: "definitelynotexists",
ResourceMissingErrorCode: 258,
}
resource, err := client.FindResourceArrayWithParams(findParams)
hasErrors := false
if err != nil {
hasErrors = true
t.Logf("Error from NITRO request: %s", err.Error())
}
if len(resource) > 0 {
hasErrors = true
t.Logf("Resource array not empty")
}
if hasErrors {
t.Log(buf.String())
t.Fail()
}
}
t.Run("CASE=1", testCase1)
testCase2 := func(t *testing.T) {
argsMap := make(map[string]string)
argsMap["filename"] = "ns.conf"
argsMap["filelocation"] = "%2Fnsconfig"
findParams := FindParams{
ResourceType: "systemfile",
ArgsMap: argsMap,
}
resource, err := client.FindResourceArrayWithParams(findParams)
hasErrors := false
if err != nil {
hasErrors = true
t.Logf("Error from NITRO request: %s", err.Error())
}
if len(resource) != 1 {
hasErrors = true
t.Logf("Resource array not exactly 1")
}
if hasErrors {
t.Log(buf.String())
t.Fail()
}
}
t.Run("CASE=2", testCase2)
testCase3 := func(t *testing.T) {
argsMap := make(map[string]string)
//argsMap["filename"] = "ns.conf"
argsMap["filelocation"] = "%2Fnsconfig"
findParams := FindParams{
ResourceType: "systemfile",
ArgsMap: argsMap,
}
resource, err := client.FindResourceArrayWithParams(findParams)
hasErrors := false
if err != nil {
hasErrors = true
t.Logf("Error from NITRO request: %s", err.Error())
}
if len(resource) <= 1 {
hasErrors = true
t.Logf("Resource array len not > 1")
}
if hasErrors {
t.Log(buf.String())
t.Fail()
}
}
t.Run("CASE=3", testCase3)
testCase4 := func(t *testing.T) {
t.Skipf("TODO: find a resource for which NITRO returns a map instead of an array")
}
t.Run("CASE=4", testCase4)
}
|
[
"\"ADC_PLATFORM\"",
"\"ADC_PLATFORM\"",
"\"ADC_PLATFORM\""
] |
[] |
[
"ADC_PLATFORM"
] |
[]
|
["ADC_PLATFORM"]
|
go
| 1 | 0 | |
polarmine/collectors/twitter_collector.py
|
import os
import time
import math
import treelib
import tweepy
import itertools
import urllib
import validators
from typing import Optional, Iterator
from polarmine.collectors.collector import Collector
from polarmine.thread import Thread
from polarmine.comment import Comment
from polarmine.tweepy import APIv2
QUOTE_MIN_REPLIES = 1
TWEET_MIN_REPLIES = 1
def safe(func):
def inner(*args, **kwargs):
passed = False
while not passed:
try:
ret = func(*args, **kwargs)
passed = True
except Exception:
time.sleep(1)
return ret
return inner
@safe
def safe_list(iterable):
return list(iterable)
class TwitterCollector(Collector):
"""TwitterCollector."""
def __init__(self, **kwargs):
super(TwitterCollector, self).__init__(**kwargs)
(
consumer_key,
consumer_secret,
access_key,
access_secret,
) = self.__get_keys__()
# authorize twitter, initialize tweepy
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
self.twitter = APIv2(auth, wait_on_rate_limit=True, retry_delay=10)
def __get_keys__(self):
"""Retrieve twitter keys from environment"""
consumer_key = os.getenv("TWITTER_CONSUMER_KEY")
consumer_secret = os.getenv("TWITTER_CONSUMER_SECRET")
access_key = os.getenv("TWITTER_ACCESS_KEY")
access_secret = os.getenv("TWITTER_ACCESS_SECRET")
if (
consumer_secret is None
or consumer_key is None
or access_key is None
or access_secret is None
):
raise Exception(
"You didn't properly setup twitter \
environment variable, follow the README"
)
return consumer_key, consumer_secret, access_key, access_secret
def __find_statuses__(
self,
ncontents: int,
keyword: Optional[str],
page: Optional[str],
) -> list[tweepy.Status]:
"""Find n statuses containing `keyword` or from a certain user `page`.
Either `keyword` or `page` must not be None
Args:
ncontents (int): the number of statuses to find
keyword (Optional[str]): the keyword used to filter tweets
page (Optional[str]): the user from which tweets are retrieved
Returns:
list[tweepy.Status]: a list of statuses
"""
nstatuses_found = 0
statuses_found = []
max_id = None
while nstatuses_found < ncontents:
nstatuses_remaining = ncontents - nstatuses_found
if keyword is not None:
cursor = tweepy.Cursor(
self.twitter.search,
q=f"{keyword} min_replies:{TWEET_MIN_REPLIES} -filter:replies",
tweet_mode="extended",
max_id=max_id,
)
elif page is not None:
cursor = tweepy.Cursor(
self.twitter.user_timeline,
screen_name=page,
tweet_mode="extended",
exclude_replies=True,
max_id=max_id,
)
else:
raise NotImplementedError
# get the remaining number of statuses
statuses_last = list(cursor.items(nstatuses_remaining))
statuses_found.extend(statuses_last)
if len(statuses_last) > 0:
# use the id of the last (least recent) status retrieved
# it is also the last in the list as the statuses are order
# from the most recent to the oldest
status_last = statuses_last[-1]
# need to subtract 1 since in the API it is specified that the
# tweet id equal to max_id is considered
max_id = status_last.id - 1
nstatuses_found += len(statuses_last)
return statuses_found
def __status_to_shares__(self, status: tweepy.Status) -> Iterator:
"""Find statuses which sharing the same external url of the given status
Args:
status (tweepy.Status): status from which external url is extracted, if present
Returns:
Iterator: an iterator over statuses sharing the same url
"""
# check if url is present in the provided status (it is supposed to be at the end)
url = status.full_text.split()[-1]
# check if it is a valid url
if validators.url(url):
try:
url_redirected = urllib.request.urlopen(url).url
except urllib.error.HTTPError:
# generic error, happens when tweet has some images?
return iter([])
except (UnicodeDecodeError, UnicodeEncodeError):
# unicode error, there are some invalid ASCII character in the request
return iter([])
except urllib.error.URLError:
# certificate errors, may happen when the url is quite strange
return iter([])
except ConnectionResetError:
return iter([])
url_parsed = urllib.parse.urlparse(url_redirected)
# remove query parameters from the url
url_cleaned = urllib.parse.urljoin(url_redirected, url_parsed.path)
query = f"{url_cleaned} min_replies:{TWEET_MIN_REPLIES}"
cursor = tweepy.Cursor(
self.twitter.search, q=query, tweet_mode="extended"
)
return iter(safe_list(cursor.items()))
else:
return iter([])
def __status_to_discussion_tree__(
self,
status: tweepy.Status,
root_data: object = None,
limit: int = 10000,
) -> treelib.Tree:
"""Retrieve discussion tree associated to a certain status
Args:
status (tweepy.Status): the status for which replies are looked for
root_data (dict): if not None, it is used as `data` for the root
node of the resulting thread, otherwise a Comment object is
attached
limit (int): maximum number of tweets to check when looking
for replies
Returns:
treelib.Tree: the Tree of comment replies
"""
status_author_name = status.author.screen_name
status_id = status.id
# tree object storing comment thread
discussion_tree = treelib.Tree()
if root_data is not None:
# use provided data
discussion_tree.create_node(
tag=hash(status_author_name),
identifier=status_id,
data=root_data,
)
else:
# create comment object, associated to root node of this tree
# the tag of the node is the author of the tweet
# this branch is tipically taken by quote replies
comment_text = status.full_text
comment_author = hash(status.author.screen_name)
comment_time = status.created_at.timestamp()
comment = Comment(comment_text, comment_author, comment_time)
discussion_tree.create_node(
tag=comment.author, identifier=status_id, data=comment
)
# get dictionary of replies
# where each key is the str(id) of a tweet and the value is the
# list of id of the replies to it
replies_dict = self.twitter.get_replies_id(
status_id, status_author_name
)
# initially the queue will contain only the root node
queue = [status_id]
i = 0
while len(queue) > 0 and i < limit:
reply = queue.pop(0)
# replies to the current reply
reply_replies = replies_dict.get(reply, [])
# require 100 tweets at a time
for i in range(math.ceil(len(reply_replies) / 100)):
# cycle to handle "Connection reset by peer"
fetched = False
while not fetched:
try:
# probably needs int instead of string
statuses_batch = self.twitter.statuses_lookup(
reply_replies[i * 100 : (i + 1) * 100],
tweet_mode="extended",
)
fetched = True
except tweepy.error.TweepError:
print("Connection problems")
for s in statuses_batch:
# create comment object, associated to node
# the tag of the node is the author of the tweet
comment_id = s.id
comment_text = s.full_text
comment_author = hash(s.author.screen_name)
comment_time = s.created_at.timestamp()
comment = Comment(
comment_text, comment_author, comment_time
)
discussion_tree.create_node(
tag=comment_author,
identifier=comment_id,
data=comment,
parent=reply,
)
queue.extend(reply_replies)
i += 1
return discussion_tree
def __status_to_content__(self, status: tweepy.Status) -> str:
"""Find content associated with the status, which will correspond to
the url it shared if present, otherwise to the status url
Args:
status (tweepy.Status): status from which content is extracted
Returns:
str: the url of the content
"""
# url of the status, eventually used as content url if no valid url
# is found at the end of the tweet itself
status_url = f"https://twitter.com/user/status/{status.id}"
# check if url is present in the provided status (it is supposed to be at the end)
status_text_url = status.full_text.split()[-1]
# check if it is a valid url
if validators.url(status_text_url):
try:
url_redirected = urllib.request.urlopen(status_text_url).url
except urllib.error.HTTPError:
# generic error, happens when tweet has some images?
return status_url
except (UnicodeDecodeError, UnicodeEncodeError):
# unicode error, there are some invalid ASCII character in the request
return status_url
except urllib.error.URLError:
# certificate errors, may happen when the url is quite strange
return status_url
url_parsed = urllib.parse.urlparse(url_redirected)
# remove query parameters from the url
url_cleaned = urllib.parse.urljoin(url_redirected, url_parsed.path)
return url_cleaned
else:
return status_url
def __status_to_discussion_trees__(
self,
status: tweepy.Status,
keyword: str,
limit: int,
cross: bool,
exclude_share: set,
) -> list[treelib.Tree]:
"""Find threads of comments associated to a certain status
Args:
status (tweepy.Status): the status for which threads are looked for
keyword (str): the keyword used to filter status
limit (int): maximum number of tweets to check when looking
for replies
exclude_share (set(str)): a set of contents for which cross is
not performed even if `cross` is True. If None it is ignored, otherwise
it will be updated with the content if not present in the set
Returns:
list[treelib.Tree]: the discussions trees associated with the
status. The root node,
corresponding to a root status, is associated with a
`Thread` object in the node `data` while the other nodes have
a `Comment` object
"""
status_author_name = status.author.screen_name
status_id = status.id
# retrieve content url, associated to threads
content = self.__status_to_content__(status)
thread_url = f"https://twitter.com/user/status/{status_id}"
thread_text = status.full_text
thread_time = status.created_at.timestamp()
thread_author = hash(status_author_name)
thread = Thread(
thread_url,
thread_text,
thread_time,
thread_author,
content,
keyword,
)
# thread/tree including only replies to original status
thread = self.__status_to_discussion_tree__(status, thread, limit)
if cross:
# add quote tweets of the obtained tweets
# for each tweet search the twitter url, requiring at least
# QUOTE_MIN_REPLIES reply
query = f"https://twitter.com/{status_author_name}/status/{status_id} min_replies:{QUOTE_MIN_REPLIES}"
# cursor over quotes of the status
cursor_quote = tweepy.Cursor(
self.twitter.search, q=query, tweet_mode="extended"
)
try:
for quote_reply in cursor_quote.items():
# exclude quote replies which also reply to some tweet to
# prevent having duplicates (which would be detected among the
# normal replies of the root tweet). This is a rare case, yet
# some fancy guys like doing it. This is an extreme solution,
# in fact it would suffice to check that the current tweet
# replies to another tweet which has not beed already fetched
# nor it will be
if quote_reply.in_reply_to_status_id is None:
# quote replies can be handled as normal status since their
# text is the reply (without including the quote)
discussion_subtree = (
self.__status_to_discussion_tree__(
quote_reply, limit=limit
)
)
# add subthread as children of the root
thread.paste(status_id, discussion_subtree)
except tweepy.TweepError:
pass
if exclude_share is None or content not in exclude_share:
if exclude_share is not None:
exclude_share.add(content)
# tweets which share the same url (usually pointing to an
# external site)
for status_share in self.__status_to_shares__(status):
# skip the original author's tweet
if status_share.id == status_id:
continue
# create content object, associated to root node
thread_share_url = (
f"https://twitter.com/user/status/{status_share.id}"
)
thread_share_text = status_share.full_text
thread_share_time = status_share.created_at.timestamp()
thread_share_author = hash(status_share.author.screen_name)
thread_share = Thread(
thread_share_url,
thread_share_text,
thread_share_time,
thread_share_author,
content,
keyword,
)
discussion_tree_share = self.__status_to_discussion_tree__(
status_share, limit=limit, root_data=thread_share
)
yield discussion_tree_share
yield thread
def collect(
self,
ncontents: int,
keyword: str = None,
page: str = None,
limit: int = 10000,
cross: bool = True,
) -> list[treelib.Tree]:
"""collect content and their relative comment threads
Args:
ncontents: number of contents to find
keyword (Optional[str]): keyword used for filtering content.
If page is not None then it is ignored
page (Optional[str]): the starting page from which content is
found.
limit (int): maximum number of tweets to check when looking
for replies
cross (bool): if True includes also the retweets of the found statuses
in the result
Returns:
list[Tree]: a list of tree, each associated to a thread.
The root node is associated to the discussion root and its `data`
is a Thread object, while for the other nodes it is a `Comment`
"""
statuses = self.__find_statuses__(ncontents, keyword, page)
discussion_trees = iter([])
# set used to track mined contents this is necessary because some
# twitter accounts like @nytimes tweet many times the same link so
# contents in this set will be prevented from mining again tweets
# sharing the same content/url. Also, since the @nytimes and other
# similar accounts (like @foxnews) add some query parameters to the
# link, their tweet will not be mined twice. Notice that if a user will
# tweet twice the same identical url without query parameters then it
# will be mined more than once
# because generators are used, this set will be populated only once
# discussion_trees are generated, so you probably don't want to use it
# elsewhere
contents = set()
for status in statuses:
content_discussion_trees = self.__status_to_discussion_trees__(
status, keyword, limit, cross, contents
)
discussion_trees = itertools.chain(
discussion_trees, content_discussion_trees
)
return discussion_trees
|
[] |
[] |
[
"TWITTER_CONSUMER_SECRET",
"TWITTER_ACCESS_SECRET",
"TWITTER_CONSUMER_KEY",
"TWITTER_ACCESS_KEY"
] |
[]
|
["TWITTER_CONSUMER_SECRET", "TWITTER_ACCESS_SECRET", "TWITTER_CONSUMER_KEY", "TWITTER_ACCESS_KEY"]
|
python
| 4 | 0 | |
test/helpers/oc/runner.go
|
package oc
import (
"bytes"
"fmt"
"os"
osexec "os/exec"
"strings"
"time"
"github.com/ViaQ/logerr/log"
)
// Runner is for executing the command. It provides implementation for
// the methods in oc.Command interface.
// Other commands like oc.Exec, oc.Get, oc.Literal collect their arguments
// and use Runner to run the commad with arguments.
// It provides different modes of executing the commands, Run/RunFor/Output/OutputFor
//
// As fas as possible, it is to be kept independent of oc command syntax.
// TODO(vimalk78)
// Move KUBECONFIG out from here
// CMD is the command to be run by the runner
const CMD string = "oc"
// runner encapsulates os/exec/Cmd, collects args, and runs CMD
type runner struct {
*osexec.Cmd
args []string
configPath string
// This must be set by oc.Commands to collect arguments before calling Run
collectArgsFunc func() []string
tostdout bool
err error
}
func (r *runner) Run() (string, error) {
if r.err != nil {
return "composed command failed", r.err
}
r.setArgs(r.collectArgsFunc())
// never write to this channel
return r.runCmd(make(chan time.Time, 1))
}
func (r *runner) runCmd(timeoutCh <-chan time.Time) (string, error) {
// #nosec G204
r.Cmd = osexec.Command(CMD, r.args...)
var outbuf bytes.Buffer
var errbuf bytes.Buffer
if r.tostdout {
r.Cmd.Stdout = os.Stdout
r.Cmd.Stderr = os.Stderr
} else {
r.Cmd.Stdout = &outbuf
r.Cmd.Stderr = &errbuf
}
r.Cmd.Env = []string{fmt.Sprintf("%s=%s", "KUBECONFIG", os.Getenv("KUBECONFIG"))}
cmdargs := strings.Join(r.args, " ")
err := r.Cmd.Start()
if err != nil {
log.Error(err, "could not start oc command", "arguments", cmdargs)
return "", err
}
// Wait for the process to finish or kill it after a timeout (whichever happens first):
done := make(chan error, 1)
go func() {
done <- r.Cmd.Wait()
}()
select {
case <-timeoutCh:
if err = r.Cmd.Process.Kill(); err != nil {
log.Error(err, "failed to kill process: ")
}
case err = <-done:
if err != nil {
log.Error(err, "oc finished with error = %v")
}
}
if err != nil {
if r.tostdout {
return "", err
}
errout := strings.TrimSpace(errbuf.String())
log.Info("command result", "arguments", cmdargs, "output", errout, "error", err)
return errout, err
}
if r.tostdout {
return "", nil
}
out := strings.TrimSpace(outbuf.String())
if len(out) > 500 {
log.Info("output(truncated 500/length)", "arguments", cmdargs, "length", len(out), "result", truncateString(out, 500))
} else {
log.Info("command output", "arguments", cmdargs, "output", out)
}
return out, nil
}
func (r *runner) RunFor(d time.Duration) (string, error) {
if r.err != nil {
return "composed command failed", r.err
}
r.setArgs(r.collectArgsFunc())
return r.runCmd(time.After(d))
}
func (r *runner) Kill() error {
if r.Process != nil {
return r.Process.Kill()
}
return nil
}
func (r *runner) Output() error {
r.tostdout = true
_, err := r.Run()
return err
}
func (r *runner) OutputFor(d time.Duration) error {
r.tostdout = true
_, err := r.RunFor(d)
return err
}
func (r *runner) String() string {
if r.configPath != "" {
return fmt.Sprintf("%s --config %s", CMD, r.configPath)
}
return CMD
}
func sanitizeArgStr(argstr string) string {
return strings.Join(sanitizeArgs(argstr), " ")
}
// sanitize the args, removes any unwanted spaces
func sanitizeArgs(argstr string) []string {
outargs := []string{}
args := strings.Split(argstr, " ")
for _, arg := range args {
arg = strings.TrimSpace(arg)
if arg != "" {
outargs = append(outargs, arg)
}
}
return outargs
}
func (r *runner) setArgs(args []string) {
r.args = args
}
func truncateString(str string, num int) string {
trunc := str
if len(str) > num {
if num > 4 {
num -= 4
}
trunc = str[0:num] + " ..."
}
return trunc
}
|
[
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
tools/make_distrib.py
|
# Copyright (c) 2011 The Chromium Embedded Framework Authors. All rights
# reserved. Use of this source code is governed by a BSD-style license that
# can be found in the LICENSE file.
from __future__ import absolute_import
from __future__ import print_function
from cef_version import VersionFormatter
from date_util import *
from exec_util import exec_cmd
from file_util import *
import git_util as git
from io import open
from make_cmake import process_cmake_template
from optparse import OptionParser
import os
import re
import shlex
import subprocess
import sys
import tarfile
import zipfile
def create_zip_archive(input_dir):
""" Creates a zip archive of the specified input directory. """
zip_file = input_dir + '.zip'
zf = zipfile.ZipFile(zip_file, 'w', zipfile.ZIP_DEFLATED, True)
def addDir(dir):
for f in os.listdir(dir):
full_path = os.path.join(dir, f)
if os.path.isdir(full_path):
addDir(full_path)
else:
zf.write(full_path, os.path.relpath(full_path, \
os.path.join(input_dir, os.pardir)))
addDir(input_dir)
zf.close()
def create_tar_archive(input_dir, format):
""" Creates a tar archive of the specified input directory. """
# Supported formats include "gz" and "bz2".
tar_file = input_dir + '.tar.' + format
tf = tarfile.open(tar_file, "w:" + format)
# The default tar format changed from GNU_FORMAT to PAX_FORMAT in Python 3.8.
# However, PAX_FORMAT generates additional @PaxHeader entries and truncates file
# names on Windows, so we'll stick with the previous default.
tf.format = tarfile.GNU_FORMAT
tf.add(input_dir, arcname=os.path.basename(input_dir))
tf.close()
def create_7z_archive(input_dir, format):
""" Creates a 7z archive of the specified input directory. """
# CEF_COMMAND_7ZIP might be "c:\Program Files (x86)\7Zip\7z.exe" or /usr/bin/7za
# or simply 7z if the user knows that it's in the PATH var. Supported formats
# depend on the 7za version -- check the 7-zip documentation for details.
command = os.environ['CEF_COMMAND_7ZIP']
working_dir = os.path.abspath(os.path.join(input_dir, os.pardir))
tar_file = None
if format in ('xz', 'gzip', 'bzip2'):
# These formats only support one file per archive. Create a tar file first.
tar_file = input_dir + '.tar'
run('"%s" a -ttar -y %s %s' % (command, tar_file, input_dir), working_dir)
zip_file = tar_file + '.' + format
zip_input = tar_file
else:
zip_file = input_dir + '.' + format
zip_input = input_dir
# Create the compressed archive.
run('"%s" a -t%s -y %s %s' % (command, format, zip_file, zip_input),
working_dir)
if not tar_file is None:
remove_file(tar_file)
def create_output_dir(name, parent_dir):
""" Creates an output directory and adds the path to the archive list. """
output_dir = os.path.abspath(os.path.join(parent_dir, name))
remove_dir(output_dir, options.quiet)
make_dir(output_dir, options.quiet)
archive_dirs.append(output_dir)
return output_dir
def get_readme_component(name):
""" Loads a README file component. """
paths = []
# platform directory
if platform == 'windows':
platform_cmp = 'win'
elif platform == 'mac':
platform_cmp = 'mac'
elif platform == 'linux':
platform_cmp = 'linux'
paths.append(os.path.join(script_dir, 'distrib', platform_cmp))
# shared directory
paths.append(os.path.join(script_dir, 'distrib'))
# load the file if it exists
for path in paths:
file = os.path.join(path, 'README.' + name + '.txt')
if path_exists(file):
return read_file(file)
raise Exception('Readme component not found: ' + name)
def create_readme():
""" Creates the README.TXT file. """
# gather the components
header_data = get_readme_component('header')
mode_data = get_readme_component(mode)
redistrib_data = get_readme_component('redistrib')
footer_data = get_readme_component('footer')
# format the file
data = header_data + '\n\n' + mode_data
if mode != 'sandbox':
data += '\n\n' + redistrib_data
data += '\n\n' + footer_data
data = data.replace('$CEF_URL$', cef_url)
data = data.replace('$CEF_REV$', cef_rev)
data = data.replace('$CEF_VER$', cef_ver)
data = data.replace('$CHROMIUM_URL$', chromium_url)
data = data.replace('$CHROMIUM_REV$', chromium_rev)
data = data.replace('$CHROMIUM_VER$', chromium_ver)
data = data.replace('$DATE$', date)
if platform == 'windows':
platform_str = 'Windows'
elif platform == 'mac':
platform_str = 'MacOS'
elif platform == 'linux':
platform_str = 'Linux'
data = data.replace('$PLATFORM$', platform_str)
if mode == 'standard':
distrib_type = 'Standard'
distrib_desc = 'This distribution contains all components necessary to build and distribute an\n' \
'application using CEF on the ' + platform_str + ' platform. Please see the LICENSING\n' \
'section of this document for licensing terms and conditions.'
elif mode == 'minimal':
distrib_type = 'Minimal'
distrib_desc = 'This distribution contains the minimial components necessary to build and\n' \
'distribute an application using CEF on the ' + platform_str + ' platform. Please see\n' \
'the LICENSING section of this document for licensing terms and conditions.'
elif mode == 'client':
distrib_type = 'Client'
if platform == 'linux':
client_app = 'cefsimple'
else:
client_app = 'cefclient'
distrib_desc = 'This distribution contains a release build of the ' + client_app + ' sample application\n' \
'for the ' + platform_str + ' platform. Please see the LICENSING section of this document for\n' \
'licensing terms and conditions.'
elif mode == 'sandbox':
distrib_type = 'Sandbox'
distrib_desc = 'This distribution contains only the cef_sandbox static library. Please see\n' \
'the LICENSING section of this document for licensing terms and conditions.'
data = data.replace('$DISTRIB_TYPE$', distrib_type)
data = data.replace('$DISTRIB_DESC$', distrib_desc)
write_file(os.path.join(output_dir, 'README.txt'), data)
if not options.quiet:
sys.stdout.write('Creating README.TXT file.\n')
def copy_gtest(tests_dir):
""" Copy GTest files to the expected directory structure. """
if not options.quiet:
sys.stdout.write('Building gtest directory structure.\n')
src_gtest_dir = os.path.join(cef_dir, 'tools', 'distrib', 'gtest')
target_gtest_dir = os.path.join(tests_dir, 'gtest')
# gtest header file at tests/gtest/include/gtest/gtest.h
target_gtest_header_dir = os.path.join(target_gtest_dir, 'include', 'gtest')
make_dir(target_gtest_header_dir, options.quiet)
copy_file(
os.path.join(src_gtest_dir, 'gtest.h'), target_gtest_header_dir,
options.quiet)
# gtest source file at tests/gtest/src/gtest-all.cc
target_gtest_cpp_dir = os.path.join(target_gtest_dir, 'src')
make_dir(target_gtest_cpp_dir, options.quiet)
copy_file(
os.path.join(src_gtest_dir, 'gtest-all.cc'), target_gtest_cpp_dir,
options.quiet)
# gtest LICENSE file at tests/gtest/LICENSE
copy_file(
os.path.join(src_gtest_dir, 'LICENSE'), target_gtest_dir, options.quiet)
# CEF README file at tests/gtest/README.cef
copy_file(
os.path.join(src_gtest_dir, 'README.cef'),
os.path.join(target_gtest_dir, 'README.cef'), options.quiet)
# Copy tests/gtest/teamcity files
copy_dir(
os.path.join(cef_dir, 'tests', 'gtest', 'teamcity'),
os.path.join(target_gtest_dir, 'teamcity'), options.quiet)
def transfer_gypi_files(src_dir, gypi_paths, gypi_path_prefix, dst_dir, quiet):
""" Transfer files from one location to another. """
for path in gypi_paths:
src = os.path.join(src_dir, path)
dst = os.path.join(dst_dir, path.replace(gypi_path_prefix, ''))
dst_path = os.path.dirname(dst)
make_dir(dst_path, quiet)
copy_file(src, dst, quiet)
def normalize_headers(file, new_path=''):
""" Normalize headers post-processing. Remove the path component from any
project include directives. """
data = read_file(file)
data = re.sub(r'''#include \"(?!include\/)[a-zA-Z0-9_\/]+\/+([a-zA-Z0-9_\.]+)\"''', \
"// Include path modified for CEF Binary Distribution.\n#include \""+new_path+"\\1\"", data)
write_file(file, data)
def eval_transfer_file(cef_dir, script_dir, transfer_cfg, output_dir, quiet):
""" Transfer files based on the specified configuration. """
if not path_exists(transfer_cfg):
return
configs = eval_file(transfer_cfg)
for cfg in configs:
dst = os.path.join(output_dir, cfg['target'])
# perform a copy if source is specified
if not cfg['source'] is None:
src = os.path.join(cef_dir, cfg['source'])
dst_path = os.path.dirname(dst)
make_dir(dst_path, quiet)
copy_file(src, dst, quiet)
# place a readme file in the destination directory
readme = os.path.join(dst_path, 'README-TRANSFER.txt')
if not path_exists(readme):
copy_file(
os.path.join(script_dir, 'distrib/README-TRANSFER.txt'), readme)
str = cfg['source'] + "\n"
with open(readme, 'a', encoding='utf-8') as fp:
if sys.version_info.major == 2:
fp.write(str.decode('utf-8'))
else:
fp.write(str)
# perform any required post-processing
if 'post-process' in cfg:
post = cfg['post-process']
if post == 'normalize_headers':
new_path = ''
if 'new_header_path' in cfg:
new_path = cfg['new_header_path']
normalize_headers(dst, new_path)
def transfer_files(cef_dir, script_dir, transfer_cfg_dir, mode, output_dir,
quiet):
# Non-mode-specific transfers.
transfer_cfg = os.path.join(transfer_cfg_dir, 'transfer.cfg')
eval_transfer_file(cef_dir, script_dir, transfer_cfg, output_dir, quiet)
# Mode-specific transfers.
transfer_cfg = os.path.join(transfer_cfg_dir, 'transfer_%s.cfg' % mode)
eval_transfer_file(cef_dir, script_dir, transfer_cfg, output_dir, quiet)
# |paths| is a list of dictionary values with the following keys:
# path [required] Input file or directory path relative to |build_dir|.
# By default this will also be the output path relative
# to |dst_dir|.
# out_path [optional] Override the output path relative to |dst_dir|.
# conditional [optional] Set to True if the path is conditional on build
# settings. Missing conditional paths will not be
# treated as an error.
# delete [optional] Glob pattern of files to delete after the copy.
def copy_files_list(build_dir, dst_dir, paths):
''' Copy the files listed in |paths| from |build_dir| to |dst_dir|. '''
for entry in paths:
source_path = os.path.join(build_dir, entry['path'])
if os.path.exists(source_path):
target_path = os.path.join(dst_dir, entry['out_path']
if 'out_path' in entry else entry['path'])
make_dir(os.path.dirname(target_path), options.quiet)
if os.path.isdir(source_path):
copy_dir(source_path, target_path, options.quiet)
if 'delete' in entry:
for delete_path in get_files(
os.path.join(target_path, entry['delete'])):
if not os.path.isdir(delete_path):
remove_file(delete_path, options.quiet)
else:
raise Exception('Refusing to delete directory: %s' % delete_path)
else:
copy_file(source_path, target_path, options.quiet)
else:
if 'conditional' in entry and entry['conditional']:
sys.stdout.write('Missing conditional path: %s.\n' % source_path)
else:
raise Exception('Missing required path: %s' % source_path)
def get_exported_symbols(file):
""" Returns the global symbols exported by |file|. """
symbols = []
# Each symbol line has a value like:
# 0000000000000000 T _cef_sandbox_initialize
cmdline = 'nm -g -U %s' % file
result = exec_cmd(cmdline, os.path.join(cef_dir, 'tools'))
if len(result['err']) > 0:
raise Exception('ERROR: nm failed: %s' % result['err'])
for line in result['out'].split('\n'):
if line.find(' T ') < 0:
continue
symbol = line[line.rfind(' ') + 1:]
symbols.append(symbol)
return symbols
def get_undefined_symbols(file):
""" Returns the undefined symbols imported by |file|. """
symbols = []
# Each symbol line has a value like:
# cef_sandbox.a:cef_sandbox.o: _memcpy
cmdline = 'nm -u -A %s' % file
result = exec_cmd(cmdline, os.path.join(cef_dir, 'tools'))
if len(result['err']) > 0:
raise Exception('ERROR: nm failed: %s' % result['err'])
for line in result['out'].split('\n'):
if line.find(': ') < 0:
continue
symbol = line[line.rfind(': ') + 2:]
symbols.append(symbol)
return symbols
def combine_libs(platform, build_dir, libs, dest_lib):
""" Combine multiple static libraries into a single static library. """
intermediate_obj = None
if platform == 'windows':
cmdline = 'msvs_env.bat win%s "%s" combine_libs.py -o "%s"' % (
platform_arch, sys.executable, dest_lib)
elif platform == 'mac':
# Find CEF_EXPORT symbols from libcef_sandbox.a (include/cef_sandbox_mac.h)
# Export only symbols that include these strings.
symbol_match = [
'_cef_', # C symbols
'Cef', # C++ symbols
]
print('Finding exported symbols...')
assert 'libcef_sandbox.a' in libs[0], libs[0]
symbols = []
for symbol in get_exported_symbols(os.path.join(build_dir, libs[0])):
for match in symbol_match:
if symbol.find(match) >= 0:
symbols.append(symbol)
break
assert len(symbols) > 0
# Create an intermediate object file that combines all other object files.
# Symbols not identified above will be made private (local).
intermediate_obj = os.path.splitext(dest_lib)[0] + '.o'
arch = 'arm64' if options.arm64build else 'x86_64'
cmdline = 'ld -arch %s -r -o "%s"' % (arch, intermediate_obj)
for symbol in symbols:
cmdline += ' -exported_symbol %s' % symbol
for lib in libs:
lib_path = os.path.join(build_dir, lib)
for path in get_files(lib_path): # Expand wildcards in |lib_path|.
if not path_exists(path):
raise Exception('File not found: ' + path)
cmdline += ' "%s"' % path
run(cmdline, os.path.join(cef_dir, 'tools'))
if not intermediate_obj is None:
# Create an archive file containing the new object file.
cmdline = 'libtool -static -o "%s" "%s"' % (dest_lib, intermediate_obj)
run(cmdline, os.path.join(cef_dir, 'tools'))
remove_file(intermediate_obj)
# Verify that only the expected symbols are exported from the archive file.
print('Verifying exported symbols...')
result_symbols = get_exported_symbols(dest_lib)
if set(symbols) != set(result_symbols):
print('Expected', symbols)
print('Got', result_symbols)
raise Exception('Failure verifying exported symbols')
# Verify that no C++ symbols are imported by the archive file. If the
# archive imports C++ symbols and the client app links an incompatible C++
# library, the result will be undefined behavior.
# For example, to avoid importing libc++ symbols the cef_sandbox target
# should have a dependency on libc++abi. This dependency can be verified
# with the following command:
# gn path out/[config] //cef:cef_sandbox //buildtools/third_party/libc++abi
print('Verifying imported (undefined) symbols...')
undefined_symbols = get_undefined_symbols(dest_lib)
cpp_symbols = list(
filter(lambda symbol: symbol.startswith('__Z'), undefined_symbols))
if cpp_symbols:
print('Found C++ symbols:', cpp_symbols)
raise Exception('Failure verifying imported (undefined) symbols')
def run(command_line, working_dir):
""" Run a command. """
sys.stdout.write('-------- Running "'+command_line+'" in "'+\
working_dir+'"...'+"\n")
args = shlex.split(command_line.replace('\\', '\\\\'))
return subprocess.check_call(
args, cwd=working_dir, env=os.environ, shell=(sys.platform == 'win32'))
def print_error(msg):
print('Error: %s\nSee --help for usage.' % msg)
# cannot be loaded as a module
if __name__ != "__main__":
sys.stderr.write('This file cannot be loaded as a module!')
sys.exit()
# parse command-line options
disc = """
This utility builds the CEF Binary Distribution.
"""
parser = OptionParser(description=disc)
parser.add_option(
'--output-dir',
dest='outputdir',
metavar='DIR',
help='output directory [required]')
parser.add_option(
'--distrib-subdir',
dest='distribsubdir',
help='name of the subdirectory for the distribution',
default='')
parser.add_option(
'--distrib-subdir-suffix',
dest='distribsubdirsuffix',
help='suffix added to name of the subdirectory for the distribution',
default='')
parser.add_option(
'--allow-partial',
action='store_true',
dest='allowpartial',
default=False,
help='allow creation of partial distributions')
parser.add_option(
'--no-symbols',
action='store_true',
dest='nosymbols',
default=False,
help='don\'t create symbol files')
parser.add_option(
'--no-docs',
action='store_true',
dest='nodocs',
default=False,
help='don\'t create documentation')
parser.add_option(
'--no-archive',
action='store_true',
dest='noarchive',
default=False,
help='don\'t create archives for output directories')
parser.add_option(
'--ninja-build',
action='store_true',
dest='ninjabuild',
default=False,
help='build was created using ninja')
parser.add_option(
'--x64-build',
action='store_true',
dest='x64build',
default=False,
help='create a 64-bit binary distribution')
parser.add_option(
'--arm-build',
action='store_true',
dest='armbuild',
default=False,
help='create an ARM binary distribution (Linux only)')
parser.add_option(
'--arm64-build',
action='store_true',
dest='arm64build',
default=False,
help='create an ARM64 binary distribution (Linux only)')
parser.add_option(
'--minimal',
action='store_true',
dest='minimal',
default=False,
help='include only release build binary files')
parser.add_option(
'--client',
action='store_true',
dest='client',
default=False,
help='include only the sample application')
parser.add_option(
'--sandbox',
action='store_true',
dest='sandbox',
default=False,
help='include only the cef_sandbox static library (macOS and Windows only)')
parser.add_option(
'--ozone',
action='store_true',
dest='ozone',
default=False,
help='include ozone build related files (Linux only)')
parser.add_option(
'-q',
'--quiet',
action='store_true',
dest='quiet',
default=False,
help='do not output detailed status information')
(options, args) = parser.parse_args()
# Test the operating system.
platform = ''
if sys.platform == 'win32':
platform = 'windows'
elif sys.platform == 'darwin':
platform = 'mac'
elif sys.platform.startswith('linux'):
platform = 'linux'
# the outputdir option is required
if options.outputdir is None:
print_error('--output-dir is required.')
sys.exit()
if options.minimal and options.client:
print_error('Cannot specify both --minimal and --client.')
sys.exit()
if options.x64build + options.armbuild + options.arm64build > 1:
print_error('Invalid combination of build options.')
sys.exit()
if options.armbuild and platform != 'linux':
print_error('--arm-build is only supported on Linux.')
sys.exit()
if options.sandbox and not platform in ('mac', 'windows'):
print_error('--sandbox is only supported on macOS and Windows.')
sys.exit()
if not options.ninjabuild:
print_error('--ninja-build is required.')
sys.exit()
if options.ozone and platform != 'linux':
print_error('--ozone is only supported on Linux.')
sys.exit()
# script directory
script_dir = os.path.dirname(__file__)
# CEF root directory
cef_dir = os.path.abspath(os.path.join(script_dir, os.pardir))
# src directory
src_dir = os.path.abspath(os.path.join(cef_dir, os.pardir))
if not git.is_checkout(cef_dir):
raise Exception('Not a valid checkout: %s' % (cef_dir))
# retrieve information for CEF
cef_url = git.get_url(cef_dir)
cef_rev = git.get_hash(cef_dir)
cef_commit_number = git.get_commit_number(cef_dir)
if not git.is_checkout(src_dir):
raise Exception('Not a valid checkout: %s' % (src_dir))
# retrieve information for Chromium
chromium_url = git.get_url(src_dir)
chromium_rev = git.get_hash(src_dir)
date = get_date()
# format version strings
formatter = VersionFormatter()
cef_ver = formatter.get_version_string()
chromium_ver = formatter.get_chromium_version_string()
# list of output directories to be archived
archive_dirs = []
if options.x64build:
platform_arch = '64'
binary_arch = 'x64'
elif options.armbuild:
platform_arch = 'arm'
binary_arch = 'arm'
elif options.arm64build:
platform_arch = 'arm64'
binary_arch = 'arm64'
else:
platform_arch = '32'
binary_arch = 'x86'
# output directory
output_dir_base = 'cef_binary_' + cef_ver
if options.distribsubdir == '':
if platform == 'mac':
# For backwards compatibility keep the old default directory name on mac.
platform_name = 'macos' + ('x' if platform_arch == '64' else '')
else:
platform_name = platform
output_dir_name = output_dir_base + '_' + platform_name + platform_arch
if options.distribsubdirsuffix != '':
output_dir_name += '_' + options.distribsubdirsuffix
else:
output_dir_name = options.distribsubdir
if options.minimal:
mode = 'minimal'
output_dir_name = output_dir_name + '_minimal'
elif options.client:
mode = 'client'
output_dir_name = output_dir_name + '_client'
elif options.sandbox:
mode = 'sandbox'
output_dir_name = output_dir_name + '_sandbox'
else:
mode = 'standard'
if options.ozone:
output_dir_name = output_dir_name + '_ozone'
output_dir = create_output_dir(output_dir_name, options.outputdir)
# create the README.TXT file
create_readme()
# transfer the LICENSE.txt file
copy_file(os.path.join(cef_dir, 'LICENSE.txt'), output_dir, options.quiet)
# read the variables list from the autogenerated cef_paths.gypi file
cef_paths = eval_file(os.path.join(cef_dir, 'cef_paths.gypi'))
cef_paths = cef_paths['variables']
# read the variables list from the manually edited cef_paths2.gypi file
cef_paths2 = eval_file(os.path.join(cef_dir, 'cef_paths2.gypi'))
cef_paths2 = cef_paths2['variables']
# Determine the build directory suffix. CEF uses a consistent directory naming
# scheme for GN via GetAllPlatformConfigs in gn_args.py.
if options.x64build:
build_dir_suffix = '_GN_x64'
elif options.armbuild:
build_dir_suffix = '_GN_arm'
elif options.arm64build:
build_dir_suffix = '_GN_arm64'
else:
build_dir_suffix = '_GN_x86'
# Determine the build directory paths.
out_dir = os.path.join(src_dir, 'out')
build_dir_debug = os.path.join(out_dir, 'Debug' + build_dir_suffix)
build_dir_release = os.path.join(out_dir, 'Release' + build_dir_suffix)
if mode == 'standard' or mode == 'minimal':
# create the include directory
include_dir = os.path.join(output_dir, 'include')
make_dir(include_dir, options.quiet)
# create the cmake directory
cmake_dir = os.path.join(output_dir, 'cmake')
make_dir(cmake_dir, options.quiet)
# create the libcef_dll_wrapper directory
libcef_dll_dir = os.path.join(output_dir, 'libcef_dll')
make_dir(libcef_dll_dir, options.quiet)
# transfer common include files
transfer_gypi_files(cef_dir, cef_paths2['includes_common'], \
'include/', include_dir, options.quiet)
transfer_gypi_files(cef_dir, cef_paths2['includes_common_capi'], \
'include/', include_dir, options.quiet)
transfer_gypi_files(cef_dir, cef_paths2['includes_capi'], \
'include/', include_dir, options.quiet)
transfer_gypi_files(cef_dir, cef_paths2['includes_wrapper'], \
'include/', include_dir, options.quiet)
transfer_gypi_files(cef_dir, cef_paths['autogen_cpp_includes'], \
'include/', include_dir, options.quiet)
transfer_gypi_files(cef_dir, cef_paths['autogen_capi_includes'], \
'include/', include_dir, options.quiet)
# Transfer generated include files.
generated_includes = [
'cef_pack_resources.h',
'cef_pack_strings.h',
]
for include in generated_includes:
# Debug and Release build should be the same so grab whichever exists.
src_path = os.path.join(build_dir_release, 'includes', 'include', include)
if not os.path.exists(src_path):
src_path = os.path.join(build_dir_debug, 'includes', 'include', include)
if not os.path.exists(src_path):
raise Exception('Missing generated header file: %s' % include)
copy_file(src_path, os.path.join(include_dir, include), options.quiet)
# transfer common libcef_dll_wrapper files
transfer_gypi_files(cef_dir, cef_paths2['libcef_dll_wrapper_sources_base'], \
'libcef_dll/', libcef_dll_dir, options.quiet)
transfer_gypi_files(cef_dir, cef_paths2['libcef_dll_wrapper_sources_common'], \
'libcef_dll/', libcef_dll_dir, options.quiet)
transfer_gypi_files(cef_dir, cef_paths['autogen_client_side'], \
'libcef_dll/', libcef_dll_dir, options.quiet)
if mode == 'standard' or mode == 'minimal':
# transfer additional files
transfer_files(cef_dir, script_dir, os.path.join(script_dir, 'distrib'), \
mode, output_dir, options.quiet)
# process cmake templates
variables = cef_paths.copy()
variables.update(cef_paths2)
process_cmake_template(os.path.join(cef_dir, 'CMakeLists.txt.in'), \
os.path.join(output_dir, 'CMakeLists.txt'), \
variables, options.quiet)
process_cmake_template(os.path.join(cef_dir, 'cmake', 'cef_macros.cmake.in'), \
os.path.join(cmake_dir, 'cef_macros.cmake'), \
variables, options.quiet)
process_cmake_template(os.path.join(cef_dir, 'cmake', 'cef_variables.cmake.in'), \
os.path.join(cmake_dir, 'cef_variables.cmake'), \
variables, options.quiet)
process_cmake_template(os.path.join(cef_dir, 'cmake', 'FindCEF.cmake.in'), \
os.path.join(cmake_dir, 'FindCEF.cmake'), \
variables, options.quiet)
process_cmake_template(os.path.join(cef_dir, 'libcef_dll', 'CMakeLists.txt.in'), \
os.path.join(libcef_dll_dir, 'CMakeLists.txt'), \
variables, options.quiet)
if mode == 'standard':
# create the tests directory
tests_dir = os.path.join(output_dir, 'tests')
make_dir(tests_dir, options.quiet)
# create the tests/shared directory
shared_dir = os.path.join(tests_dir, 'shared')
make_dir(shared_dir, options.quiet)
if not options.ozone:
# create the tests/cefclient directory
cefclient_dir = os.path.join(tests_dir, 'cefclient')
make_dir(cefclient_dir, options.quiet)
# create the tests/cefsimple directory
cefsimple_dir = os.path.join(tests_dir, 'cefsimple')
make_dir(cefsimple_dir, options.quiet)
# create the tests/ceftests directory
ceftests_dir = os.path.join(tests_dir, 'ceftests')
make_dir(ceftests_dir, options.quiet)
# transfer common shared files
transfer_gypi_files(cef_dir, cef_paths2['shared_sources_browser'], \
'tests/shared/', shared_dir, options.quiet)
transfer_gypi_files(cef_dir, cef_paths2['shared_sources_common'], \
'tests/shared/', shared_dir, options.quiet)
transfer_gypi_files(cef_dir, cef_paths2['shared_sources_renderer'], \
'tests/shared/', shared_dir, options.quiet)
transfer_gypi_files(cef_dir, cef_paths2['shared_sources_resources'], \
'tests/shared/', shared_dir, options.quiet)
if not options.ozone:
# transfer common cefclient files
transfer_gypi_files(cef_dir, cef_paths2['cefclient_sources_browser'], \
'tests/cefclient/', cefclient_dir, options.quiet)
transfer_gypi_files(cef_dir, cef_paths2['cefclient_sources_common'], \
'tests/cefclient/', cefclient_dir, options.quiet)
transfer_gypi_files(cef_dir, cef_paths2['cefclient_sources_renderer'], \
'tests/cefclient/', cefclient_dir, options.quiet)
transfer_gypi_files(cef_dir, cef_paths2['cefclient_sources_resources'], \
'tests/cefclient/', cefclient_dir, options.quiet)
transfer_gypi_files(cef_dir, cef_paths2['cefclient_sources_resources_extensions_set_page_color'], \
'tests/cefclient/', cefclient_dir, options.quiet)
# transfer common cefsimple files
transfer_gypi_files(cef_dir, cef_paths2['cefsimple_sources_common'], \
'tests/cefsimple/', cefsimple_dir, options.quiet)
# transfer common ceftests files
transfer_gypi_files(cef_dir, cef_paths2['ceftests_sources_common'], \
'tests/ceftests/', ceftests_dir, options.quiet)
# copy GTest files
copy_gtest(tests_dir)
# process cmake templates
if not options.ozone:
process_cmake_template(os.path.join(cef_dir, 'tests', 'cefclient', 'CMakeLists.txt.in'), \
os.path.join(cefclient_dir, 'CMakeLists.txt'), \
variables, options.quiet)
process_cmake_template(os.path.join(cef_dir, 'tests', 'cefsimple', 'CMakeLists.txt.in'), \
os.path.join(cefsimple_dir, 'CMakeLists.txt'), \
variables, options.quiet)
process_cmake_template(os.path.join(cef_dir, 'tests', 'gtest', 'CMakeLists.txt.in'), \
os.path.join(tests_dir, 'gtest', 'CMakeLists.txt'), \
variables, options.quiet)
process_cmake_template(os.path.join(cef_dir, 'tests', 'ceftests', 'CMakeLists.txt.in'), \
os.path.join(ceftests_dir, 'CMakeLists.txt'), \
variables, options.quiet)
# transfer gypi files
copy_file(os.path.join(cef_dir, 'cef_paths.gypi'), \
os.path.join(output_dir, 'cef_paths.gypi'), options.quiet)
copy_file(os.path.join(cef_dir, 'cef_paths2.gypi'), \
os.path.join(output_dir, 'cef_paths2.gypi'), options.quiet)
if platform == 'windows':
libcef_dll = 'libcef.dll'
libcef_dll_lib = '%s.lib' % libcef_dll
libcef_dll_pdb = '%s.pdb' % libcef_dll
# yapf: disable
binaries = [
{'path': 'chrome_elf.dll'},
{'path': libcef_dll},
{'path': 'libEGL.dll'},
{'path': 'libGLESv2.dll'},
{'path': 'snapshot_blob.bin', 'conditional': True},
{'path': 'v8_context_snapshot.bin', 'conditional': True},
{'path': 'swiftshader\\libEGL.dll'},
{'path': 'swiftshader\\libGLESv2.dll'},
]
# yapf: enable
if mode == 'client':
binaries.append({
'path': 'cefsimple.exe' if platform_arch == 'arm64' else 'cefclient.exe'
})
else:
binaries.append({'path': libcef_dll_lib, 'out_path': 'libcef.lib'})
# yapf: disable
resources = [
{'path': 'chrome_100_percent.pak'},
{'path': 'chrome_200_percent.pak'},
{'path': 'resources.pak'},
{'path': 'icudtl.dat'},
{'path': 'locales', 'delete': '*.info'},
]
# yapf: enable
cef_sandbox_lib = 'obj\\cef\\cef_sandbox.lib'
sandbox_libs = [
'obj\\base\\base.lib',
'obj\\base\\base_static.lib',
'obj\\base\\third_party\\double_conversion\\double_conversion.lib',
'obj\\base\\third_party\\dynamic_annotations\\dynamic_annotations.lib',
'obj\\base\\win\\pe_image.lib',
cef_sandbox_lib,
'obj\\sandbox\\win\\sandbox.lib',
]
# Generate the cef_sandbox.lib merged library. A separate *_sandbox build
# should exist when GN is_official_build=true.
if mode in ('standard', 'minimal', 'sandbox'):
dirs = {
'Debug': (build_dir_debug + '_sandbox', build_dir_debug),
'Release': (build_dir_release + '_sandbox', build_dir_release)
}
for dir_name in dirs.keys():
for src_dir in dirs[dir_name]:
if path_exists(os.path.join(src_dir, cef_sandbox_lib)):
dst_dir = os.path.join(output_dir, dir_name)
make_dir(dst_dir, options.quiet)
combine_libs(platform, src_dir, sandbox_libs,
os.path.join(dst_dir, 'cef_sandbox.lib'))
break
valid_build_dir = None
if mode == 'standard':
# transfer Debug files
build_dir = build_dir_debug
if not options.allowpartial or path_exists(
os.path.join(build_dir, libcef_dll)):
valid_build_dir = build_dir
dst_dir = os.path.join(output_dir, 'Debug')
copy_files_list(build_dir, dst_dir, binaries)
copy_files(
os.path.join(script_dir, 'distrib/win/%s/*.dll' % binary_arch),
dst_dir, options.quiet)
if not options.nosymbols:
# create the symbol output directory
symbol_output_dir = create_output_dir(
output_dir_name + '_debug_symbols', options.outputdir)
# transfer contents
copy_file(
os.path.join(build_dir, libcef_dll_pdb), symbol_output_dir,
options.quiet)
else:
sys.stdout.write("No Debug build files.\n")
if mode != 'sandbox':
# transfer Release files
build_dir = build_dir_release
if not options.allowpartial or path_exists(
os.path.join(build_dir, libcef_dll)):
valid_build_dir = build_dir
dst_dir = os.path.join(output_dir, 'Release')
copy_files_list(build_dir, dst_dir, binaries)
copy_files(
os.path.join(script_dir, 'distrib/win/%s/*.dll' % binary_arch),
dst_dir, options.quiet)
if not options.nosymbols:
# create the symbol output directory
symbol_output_dir = create_output_dir(
output_dir_name + '_release_symbols', options.outputdir)
# transfer contents
copy_file(
os.path.join(build_dir, libcef_dll_pdb), symbol_output_dir,
options.quiet)
else:
sys.stdout.write("No Release build files.\n")
if not valid_build_dir is None:
# transfer resource files
build_dir = valid_build_dir
if mode == 'client':
dst_dir = os.path.join(output_dir, 'Release')
else:
dst_dir = os.path.join(output_dir, 'Resources')
copy_files_list(build_dir, dst_dir, resources)
if mode == 'standard' or mode == 'minimal':
# transfer include files
transfer_gypi_files(cef_dir, cef_paths2['includes_win'], \
'include/', include_dir, options.quiet)
transfer_gypi_files(cef_dir, cef_paths2['includes_win_capi'], \
'include/', include_dir, options.quiet)
# transfer additional files, if any
transfer_files(cef_dir, script_dir, os.path.join(script_dir, 'distrib', 'win'), \
mode, output_dir, options.quiet)
if mode == 'standard':
# transfer shared files
transfer_gypi_files(cef_dir, cef_paths2['shared_sources_win'], \
'tests/shared/', shared_dir, options.quiet)
# transfer cefclient files
transfer_gypi_files(cef_dir, cef_paths2['cefclient_sources_win'], \
'tests/cefclient/', cefclient_dir, options.quiet)
transfer_gypi_files(cef_dir, cef_paths2['cefclient_sources_resources_win'], \
'tests/cefclient/', cefclient_dir, options.quiet)
# transfer cefsimple files
transfer_gypi_files(cef_dir, cef_paths2['cefsimple_sources_win'], \
'tests/cefsimple/', cefsimple_dir, options.quiet)
transfer_gypi_files(cef_dir, cef_paths2['cefsimple_sources_resources_win'], \
'tests/cefsimple/', cefsimple_dir, options.quiet)
# transfer ceftests files
transfer_gypi_files(cef_dir, cef_paths2['ceftests_sources_win'], \
'tests/ceftests/', ceftests_dir, options.quiet)
transfer_gypi_files(cef_dir, cef_paths2['ceftests_sources_resources_win'], \
'tests/ceftests/', ceftests_dir, options.quiet)
if not options.nodocs:
# generate doc files
os.popen('make_cppdocs.bat ' + cef_rev)
src_dir = os.path.join(cef_dir, 'docs')
if path_exists(src_dir):
# create the docs output directory
docs_output_dir = create_output_dir(output_dir_base + '_docs',
options.outputdir)
# transfer contents
copy_dir(src_dir, docs_output_dir, options.quiet)
elif platform == 'mac':
framework_name = 'Chromium Embedded Framework'
framework_dsym = '%s.dSYM' % framework_name
cefclient_app = 'cefclient.app'
cef_sandbox_lib = 'obj/cef/libcef_sandbox.a'
sandbox_libs = [
cef_sandbox_lib,
'obj/sandbox/mac/libseatbelt.a',
'obj/sandbox/mac/libseatbelt_proto.a',
'obj/third_party/protobuf/libprotobuf_lite.a',
'obj/buildtools/third_party/libc++/libc++/*.o',
'obj/buildtools/third_party/libc++abi/libc++abi/*.o',
]
# Generate the cef_sandbox.a merged library. A separate *_sandbox build
# should exist when GN is_official_build=true.
if mode in ('standard', 'minimal', 'sandbox'):
dirs = {
'Debug': (build_dir_debug + '_sandbox', build_dir_debug),
'Release': (build_dir_release + '_sandbox', build_dir_release)
}
for dir_name in dirs.keys():
for src_dir in dirs[dir_name]:
if path_exists(os.path.join(src_dir, cef_sandbox_lib)):
dst_dir = os.path.join(output_dir, dir_name)
make_dir(dst_dir, options.quiet)
combine_libs(platform, src_dir, sandbox_libs,
os.path.join(dst_dir, 'cef_sandbox.a'))
break
valid_build_dir = None
if mode == 'standard':
# transfer Debug files
build_dir = build_dir_debug
if not options.allowpartial or path_exists(
os.path.join(build_dir, cefclient_app)):
valid_build_dir = build_dir
dst_dir = os.path.join(output_dir, 'Debug')
make_dir(dst_dir, options.quiet)
framework_src_dir = os.path.join(
build_dir, '%s/Contents/Frameworks/%s.framework/Versions/A' %
(cefclient_app, framework_name))
framework_dst_dir = os.path.join(dst_dir, '%s.framework' % framework_name)
copy_dir(framework_src_dir, framework_dst_dir, options.quiet)
if not options.nosymbols:
# create the symbol output directory
symbol_output_dir = create_output_dir(
output_dir_name + '_debug_symbols', options.outputdir)
# The real dSYM already exists, just copy it to the output directory.
# dSYMs are only generated when is_official_build=true or enable_dsyms=true.
# See //build/config/mac/symbols.gni.
copy_dir(
os.path.join(build_dir, framework_dsym),
os.path.join(symbol_output_dir, framework_dsym), options.quiet)
else:
sys.stdout.write("No Debug build files.\n")
if mode != 'sandbox':
# transfer Release files
build_dir = build_dir_release
if not options.allowpartial or path_exists(
os.path.join(build_dir, cefclient_app)):
valid_build_dir = build_dir
dst_dir = os.path.join(output_dir, 'Release')
make_dir(dst_dir, options.quiet)
framework_src_dir = os.path.join(
build_dir, '%s/Contents/Frameworks/%s.framework/Versions/A' %
(cefclient_app, framework_name))
if mode != 'client':
framework_dst_dir = os.path.join(dst_dir,
'%s.framework' % framework_name)
else:
copy_dir(
os.path.join(build_dir, cefclient_app),
os.path.join(dst_dir, cefclient_app), options.quiet)
# Replace the versioned framework with an unversioned framework in the sample app.
framework_dst_dir = os.path.join(
dst_dir, '%s/Contents/Frameworks/%s.framework' % (cefclient_app,
framework_name))
remove_dir(framework_dst_dir, options.quiet)
copy_dir(framework_src_dir, framework_dst_dir, options.quiet)
if not options.nosymbols:
# create the symbol output directory
symbol_output_dir = create_output_dir(
output_dir_name + '_release_symbols', options.outputdir)
# The real dSYM already exists, just copy it to the output directory.
# dSYMs are only generated when is_official_build=true or enable_dsyms=true.
# See //build/config/mac/symbols.gni.
copy_dir(
os.path.join(build_dir, framework_dsym),
os.path.join(symbol_output_dir, framework_dsym), options.quiet)
else:
sys.stdout.write("No Release build files.\n")
if mode == 'standard' or mode == 'minimal':
# transfer include files
transfer_gypi_files(cef_dir, cef_paths2['includes_mac'], \
'include/', include_dir, options.quiet)
transfer_gypi_files(cef_dir, cef_paths2['includes_mac_capi'], \
'include/', include_dir, options.quiet)
transfer_gypi_files(cef_dir, cef_paths2['includes_wrapper_mac'], \
'include/', include_dir, options.quiet)
# transfer libcef_dll_wrapper files
transfer_gypi_files(cef_dir, cef_paths2['libcef_dll_wrapper_sources_mac'], \
'libcef_dll/', libcef_dll_dir, options.quiet)
# transfer additional files, if any
transfer_files(cef_dir, script_dir, os.path.join(script_dir, 'distrib', 'mac'), \
mode, output_dir, options.quiet)
if mode == 'standard':
# transfer shared files
transfer_gypi_files(cef_dir, cef_paths2['shared_sources_mac'], \
'tests/shared/', shared_dir, options.quiet)
transfer_gypi_files(cef_dir, cef_paths2['shared_sources_mac_helper'], \
'tests/shared/', shared_dir, options.quiet)
# transfer cefclient files
transfer_gypi_files(cef_dir, cef_paths2['cefclient_sources_mac'], \
'tests/cefclient/', cefclient_dir, options.quiet)
# transfer cefclient/resources/mac files
copy_dir(os.path.join(cef_dir, 'tests/cefclient/resources/mac'), \
os.path.join(cefclient_dir, 'resources/mac'), \
options.quiet)
# transfer cefsimple files
transfer_gypi_files(cef_dir, cef_paths2['cefsimple_sources_mac'], \
'tests/cefsimple/', cefsimple_dir, options.quiet)
transfer_gypi_files(cef_dir, cef_paths2['cefsimple_sources_mac_helper'], \
'tests/cefsimple/', cefsimple_dir, options.quiet)
# transfer cefsimple/mac files
copy_dir(os.path.join(cef_dir, 'tests/cefsimple/mac'), \
os.path.join(cefsimple_dir, 'mac'), \
options.quiet)
# transfer ceftests files
transfer_gypi_files(cef_dir, cef_paths2['ceftests_sources_mac'], \
'tests/ceftests/', ceftests_dir, options.quiet)
transfer_gypi_files(cef_dir, cef_paths2['ceftests_sources_mac_helper'], \
'tests/ceftests/', ceftests_dir, options.quiet)
# transfer ceftests/resources/mac files
copy_dir(os.path.join(cef_dir, 'tests/ceftests/resources/mac'), \
os.path.join(ceftests_dir, 'resources/mac'), \
options.quiet)
elif platform == 'linux':
libcef_so = 'libcef.so'
# yapf: disable
binaries = [
{'path': 'chrome_sandbox', 'out_path': 'chrome-sandbox'},
{'path': libcef_so},
{'path': 'libEGL.so'},
{'path': 'libGLESv2.so'},
{'path': 'snapshot_blob.bin', 'conditional': True},
{'path': 'v8_context_snapshot.bin', 'conditional': True},
{'path': 'swiftshader/libEGL.so'},
{'path': 'swiftshader/libGLESv2.so'},
]
# yapf: enable
if options.ozone:
binaries.append({'path': 'libminigbm.so', 'conditional': True})
if mode == 'client':
binaries.append({'path': 'cefsimple'})
# yapf: disable
resources = [
{'path': 'chrome_100_percent.pak'},
{'path': 'chrome_200_percent.pak'},
{'path': 'resources.pak'},
{'path': 'icudtl.dat'},
{'path': 'locales', 'delete': '*.info'},
]
# yapf: enable
valid_build_dir = None
if mode == 'standard':
# transfer Debug files
build_dir = build_dir_debug
libcef_path = os.path.join(build_dir, libcef_so)
if not options.allowpartial or path_exists(libcef_path):
valid_build_dir = build_dir
dst_dir = os.path.join(output_dir, 'Debug')
copy_files_list(build_dir, dst_dir, binaries)
else:
sys.stdout.write("No Debug build files.\n")
# transfer Release files
build_dir = build_dir_release
libcef_path = os.path.join(build_dir, libcef_so)
if not options.allowpartial or path_exists(libcef_path):
valid_build_dir = build_dir
dst_dir = os.path.join(output_dir, 'Release')
copy_files_list(build_dir, dst_dir, binaries)
else:
sys.stdout.write("No Release build files.\n")
if not valid_build_dir is None:
# transfer resource files
build_dir = valid_build_dir
if mode == 'client':
dst_dir = os.path.join(output_dir, 'Release')
else:
dst_dir = os.path.join(output_dir, 'Resources')
copy_files_list(build_dir, dst_dir, resources)
if mode == 'standard' or mode == 'minimal':
# transfer include files
transfer_gypi_files(cef_dir, cef_paths2['includes_linux'], \
'include/', include_dir, options.quiet)
transfer_gypi_files(cef_dir, cef_paths2['includes_linux_capi'], \
'include/', include_dir, options.quiet)
# transfer additional files, if any
transfer_files(cef_dir, script_dir, os.path.join(script_dir, 'distrib', 'linux'), \
mode, output_dir, options.quiet)
if mode == 'standard':
# transfer shared files
transfer_gypi_files(cef_dir, cef_paths2['shared_sources_linux'], \
'tests/shared/', shared_dir, options.quiet)
if not options.ozone:
# transfer cefclient files
transfer_gypi_files(cef_dir, cef_paths2['cefclient_sources_linux'], \
'tests/cefclient/', cefclient_dir, options.quiet)
# transfer cefsimple files
transfer_gypi_files(cef_dir, cef_paths2['cefsimple_sources_linux'], \
'tests/cefsimple/', cefsimple_dir, options.quiet)
# transfer ceftests files
transfer_gypi_files(cef_dir, cef_paths2['ceftests_sources_linux'], \
'tests/ceftests/', ceftests_dir, options.quiet)
if not options.noarchive:
# create an archive for each output directory
archive_format = os.getenv('CEF_ARCHIVE_FORMAT', 'zip')
if archive_format not in ('zip', 'tar.gz', 'tar.bz2'):
raise Exception('Unsupported archive format: %s' % archive_format)
if os.getenv('CEF_COMMAND_7ZIP', '') != '':
archive_format = os.getenv('CEF_COMMAND_7ZIP_FORMAT', '7z')
for dir in archive_dirs:
if not options.quiet:
sys.stdout.write("Creating %s archive for %s...\n" %
(archive_format, os.path.basename(dir)))
if archive_format == 'zip':
create_zip_archive(dir)
elif archive_format == 'tar.gz':
create_tar_archive(dir, 'gz')
elif archive_format == 'tar.bz2':
create_tar_archive(dir, 'bz2')
else:
create_7z_archive(dir, archive_format)
|
[] |
[] |
[
"CEF_COMMAND_7ZIP",
"CEF_ARCHIVE_FORMAT",
"CEF_COMMAND_7ZIP_FORMAT"
] |
[]
|
["CEF_COMMAND_7ZIP", "CEF_ARCHIVE_FORMAT", "CEF_COMMAND_7ZIP_FORMAT"]
|
python
| 3 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.